aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/asn1/test/asn1_SUITE.erl11
-rw-r--r--lib/common_test/doc/src/cover_chapter.xml27
-rw-r--r--lib/common_test/doc/src/ct_run.xml2
-rw-r--r--lib/common_test/doc/src/run_test_chapter.xml5
-rw-r--r--lib/common_test/src/Makefile3
-rw-r--r--lib/common_test/src/ct.erl5
-rw-r--r--lib/common_test/src/ct_config.erl5
-rw-r--r--lib/common_test/src/ct_conn_log_h.erl6
-rw-r--r--lib/common_test/src/ct_framework.erl368
-rw-r--r--lib/common_test/src/ct_groups.erl599
-rw-r--r--lib/common_test/src/ct_master.erl2
-rw-r--r--lib/common_test/src/ct_master_logs.erl2
-rw-r--r--lib/common_test/src/ct_netconfc.erl33
-rw-r--r--lib/common_test/src/ct_run.erl196
-rw-r--r--lib/common_test/src/ct_slave.erl16
-rw-r--r--lib/common_test/src/ct_testspec.erl28
-rw-r--r--lib/common_test/src/ct_util.hrl1
-rw-r--r--lib/common_test/src/cth_log_redirect.erl2
-rw-r--r--lib/common_test/test/Makefile7
-rw-r--r--lib/common_test/test/common_test.cover10
-rw-r--r--lib/common_test/test/ct_config_SUITE.erl6
-rw-r--r--lib/common_test/test/ct_config_SUITE_data/config/test/config_static_SUITE.erl10
-rw-r--r--lib/common_test/test/ct_config_info_SUITE.erl14
-rw-r--r--lib/common_test/test/ct_cover_SUITE.erl271
-rw-r--r--lib/common_test/test/ct_cover_SUITE_data/cover_SUITE.erl156
-rw-r--r--lib/common_test/test/ct_cover_SUITE_data/cover_SUITE_data/.gitignore0
-rw-r--r--lib/common_test/test/ct_cover_SUITE_data/cover_test_mod.erl4
-rw-r--r--lib/common_test/test/ct_error_SUITE.erl446
-rw-r--r--lib/common_test/test/ct_error_SUITE_data/error/test/misc_error_1_SUITE.erl12
-rw-r--r--lib/common_test/test/ct_error_SUITE_data/error/test/timetrap_8_SUITE.erl258
-rw-r--r--lib/common_test/test/ct_group_leader_SUITE.erl181
-rw-r--r--lib/common_test/test/ct_group_leader_SUITE_data/group_leader_SUITE.erl252
-rw-r--r--lib/common_test/test/ct_groups_search_SUITE.erl1245
-rw-r--r--lib/common_test/test/ct_groups_search_SUITE_data/groups_search_dummy_1_SUITE.erl83
-rw-r--r--lib/common_test/test/ct_groups_search_SUITE_data/groups_search_dummy_2_SUITE.erl102
-rw-r--r--lib/common_test/test/ct_netconfc_SUITE.erl9
-rw-r--r--lib/common_test/test/ct_test_support.erl69
-rw-r--r--lib/compiler/src/v3_codegen.erl1
-rw-r--r--lib/compiler/test/Makefile4
-rw-r--r--lib/compiler/test/andor_SUITE.erl7
-rw-r--r--lib/compiler/test/beam_except_SUITE.erl (renamed from lib/compiler/test/beam_expect_SUITE.erl)2
-rw-r--r--lib/compiler/test/beam_validator_SUITE.erl19
-rw-r--r--lib/compiler/test/bs_bit_binaries_SUITE.erl12
-rw-r--r--lib/compiler/test/bs_construct_SUITE.erl10
-rw-r--r--lib/compiler/test/bs_match_SUITE.erl41
-rw-r--r--lib/compiler/test/compilation_SUITE.erl39
-rw-r--r--lib/compiler/test/core_SUITE.erl8
-rw-r--r--lib/compiler/test/core_fold_SUITE.erl8
-rw-r--r--lib/compiler/test/error_SUITE.erl19
-rw-r--r--lib/compiler/test/guard_SUITE.erl19
-rw-r--r--lib/compiler/test/inline_SUITE.erl31
-rw-r--r--lib/compiler/test/match_SUITE.erl8
-rw-r--r--lib/compiler/test/misc_SUITE.erl7
-rw-r--r--lib/compiler/test/receive_SUITE.erl6
-rw-r--r--lib/compiler/test/record_SUITE.erl10
-rw-r--r--lib/compiler/test/test_lib.erl15
-rw-r--r--lib/compiler/test/trycatch_SUITE.erl12
-rw-r--r--lib/compiler/test/warnings_SUITE.erl14
-rw-r--r--lib/diameter/.gitignore57
-rw-r--r--lib/diameter/Makefile32
-rw-r--r--lib/diameter/Makefile.in88
-rw-r--r--lib/diameter/aclocal.m465
-rwxr-xr-xlib/diameter/autoconf/config.guess1519
-rwxr-xr-xlib/diameter/autoconf/config.sub1630
-rwxr-xr-xlib/diameter/autoconf/install-sh519
-rw-r--r--lib/diameter/configure.in137
-rw-r--r--lib/diameter/doc/.gitignore4
-rw-r--r--lib/diameter/doc/src/.gitignore1
-rw-r--r--lib/diameter/doc/src/Makefile19
-rw-r--r--lib/diameter/doc/src/diameter.xml847
-rw-r--r--lib/diameter/doc/src/diameter_app.xml410
-rw-r--r--lib/diameter/doc/src/diameter_compile.xml9
-rw-r--r--lib/diameter/doc/src/diameter_dict.xml111
-rw-r--r--lib/diameter/doc/src/diameter_sctp.xml34
-rw-r--r--lib/diameter/doc/src/diameter_tcp.xml27
-rw-r--r--lib/diameter/doc/src/diameter_transport.xml66
-rw-r--r--lib/diameter/doc/src/files.mk2
-rw-r--r--lib/diameter/doc/src/notes.xml41
-rw-r--r--lib/diameter/doc/src/seealso.ent112
-rw-r--r--lib/diameter/doc/src/seehere.sed35
-rw-r--r--lib/diameter/make/release_targets.mk92
-rw-r--r--lib/diameter/make/rules.mk.in193
-rw-r--r--lib/diameter/make/subdir.mk53
-rw-r--r--lib/diameter/make/target.mk33
-rw-r--r--lib/diameter/src/Makefile9
-rw-r--r--lib/diameter/src/base/diameter.appup.src54
-rw-r--r--lib/diameter/src/base/diameter.erl18
-rw-r--r--lib/diameter/src/base/diameter_capx.erl6
-rw-r--r--lib/diameter/src/base/diameter_codec.erl3
-rw-r--r--lib/diameter/src/base/diameter_config.erl83
-rw-r--r--lib/diameter/src/base/diameter_peer.erl2
-rw-r--r--lib/diameter/src/base/diameter_peer_fsm.erl363
-rw-r--r--lib/diameter/src/base/diameter_reg.erl255
-rw-r--r--lib/diameter/src/base/diameter_service.erl825
-rw-r--r--lib/diameter/src/base/diameter_session.erl14
-rw-r--r--lib/diameter/src/base/diameter_watchdog.erl170
-rw-r--r--lib/diameter/test/Makefile9
-rw-r--r--lib/diameter/test/diameter_compiler_SUITE.erl12
-rw-r--r--lib/diameter/test/diameter_dpr_SUITE.erl196
-rw-r--r--lib/diameter/test/diameter_traffic_SUITE.erl136
-rw-r--r--lib/diameter/test/diameter_util.erl18
-rw-r--r--lib/diameter/test/diameter_watchdog_SUITE.erl711
-rw-r--r--lib/diameter/test/modules.mk5
-rw-r--r--lib/diameter/vsn.mk2
-rw-r--r--lib/erl_docgen/priv/xsl/Makefile3
-rw-r--r--lib/erl_docgen/priv/xsl/db_html.xsl36
-rw-r--r--lib/erl_docgen/priv/xsl/db_pdf.xsl2
-rw-r--r--lib/erl_docgen/priv/xsl/db_pdf_params.xsl8
-rw-r--r--lib/erl_docgen/vsn.mk2
-rw-r--r--lib/inets/doc/src/httpd.xml8
-rw-r--r--lib/inets/src/http_server/httpd_conf.erl15
-rw-r--r--lib/inets/src/http_server/httpd_request_handler.erl60
-rw-r--r--lib/inets/src/inets_app/inets.appup.src14
-rw-r--r--lib/inets/test/httpd_basic_SUITE.erl16
-rw-r--r--lib/inets/test/inets_app_test.erl16
-rw-r--r--lib/inets/vsn.mk2
-rw-r--r--lib/jinterface/java_src/com/ericsson/otp/erlang/OtpEpmd.java2
-rw-r--r--lib/kernel/doc/src/heart.xml33
-rw-r--r--lib/kernel/doc/src/inet.xml63
-rw-r--r--lib/kernel/src/gen_sctp.erl2
-rw-r--r--lib/kernel/src/gen_tcp.erl6
-rw-r--r--lib/kernel/src/gen_udp.erl6
-rw-r--r--lib/kernel/src/heart.erl3
-rw-r--r--lib/kernel/src/inet.erl8
-rw-r--r--lib/kernel/src/inet_int.hrl3
-rw-r--r--lib/kernel/test/code_SUITE.erl4
-rw-r--r--lib/kernel/test/global_SUITE.erl119
-rw-r--r--lib/kernel/test/heart_SUITE.erl401
-rw-r--r--lib/kernel/test/inet_sockopt_SUITE.erl155
-rw-r--r--lib/kernel/test/interactive_shell_SUITE.erl215
-rw-r--r--lib/kernel/test/kernel.cover2
-rw-r--r--lib/kernel/test/wrap_log_reader_SUITE.erl2
-rw-r--r--lib/public_key/asn1/AuthenticationFramework.asn1367
-rw-r--r--lib/public_key/asn1/InformationFramework.asn1682
-rw-r--r--lib/public_key/asn1/Makefile12
-rw-r--r--lib/public_key/asn1/OTP-PUB-KEY.set.asn2
-rw-r--r--lib/public_key/asn1/PKCS-10.asn170
-rw-r--r--lib/public_key/asn1/PKCS-7.asn1387
-rw-r--r--lib/public_key/asn1/PKIX1Explicit88.asn15
-rw-r--r--lib/public_key/asn1/SelectedAttributeTypes.asn11575
-rw-r--r--lib/public_key/asn1/UsefulDefinitions.asn1234
-rw-r--r--lib/public_key/doc/src/cert_records.xml43
-rw-r--r--lib/public_key/doc/src/introduction.xml14
-rw-r--r--lib/public_key/doc/src/public_key.xml4
-rw-r--r--lib/public_key/include/public_key.hrl2
-rw-r--r--lib/public_key/src/pubkey_pem.erl19
-rw-r--r--lib/public_key/test/public_key_SUITE.erl39
-rw-r--r--lib/public_key/test/public_key_SUITE_data/pkcs7_cert.pem23
-rw-r--r--lib/public_key/vsn.mk2
-rw-r--r--lib/runtime_tools/src/dyntrace.erl4
-rw-r--r--lib/runtime_tools/test/dbg_SUITE.erl4
-rw-r--r--lib/ssh/doc/src/ssh.xml14
-rw-r--r--lib/ssh/src/ssh.appup.src6
-rw-r--r--lib/ssh/src/ssh.erl89
-rw-r--r--lib/ssh/src/ssh_auth.erl67
-rw-r--r--lib/ssh/src/ssh_connection_handler.erl28
-rw-r--r--lib/ssh/src/ssh_connection_manager.erl109
-rw-r--r--lib/ssh/src/ssh_io.erl61
-rw-r--r--lib/ssh/src/ssh_transport.erl2
-rw-r--r--lib/ssh/test/ssh_basic_SUITE.erl25
-rw-r--r--lib/ssh/vsn.mk2
-rw-r--r--lib/ssl/src/ssl.appup.src15
-rw-r--r--lib/ssl/src/ssl.erl8
-rw-r--r--lib/ssl/src/ssl_connection.erl237
-rw-r--r--lib/ssl/vsn.mk2
-rw-r--r--lib/stdlib/src/proc_lib.erl38
-rw-r--r--lib/stdlib/test/base64_SUITE.erl5
-rw-r--r--lib/stdlib/test/dict_SUITE.erl20
-rw-r--r--lib/stdlib/test/dict_test_lib.erl55
-rw-r--r--lib/stdlib/test/epp_SUITE.erl9
-rw-r--r--lib/stdlib/test/gen_server_SUITE.erl3
-rw-r--r--lib/stdlib/test/id_transform_SUITE.erl4
-rw-r--r--lib/stdlib/test/io_proto_SUITE.erl163
-rw-r--r--lib/stdlib/test/proc_lib_SUITE.erl31
-rw-r--r--lib/stdlib/test/sets_SUITE.erl342
-rw-r--r--lib/stdlib/test/sets_test_lib.erl82
-rw-r--r--lib/stdlib/test/stdlib.cover15
-rw-r--r--lib/test_server/doc/src/test_server.xml14
-rw-r--r--lib/test_server/src/Makefile2
-rw-r--r--lib/test_server/src/erl2html2.erl247
-rw-r--r--lib/test_server/src/test_server.app.src1
-rw-r--r--lib/test_server/src/test_server.erl1238
-rw-r--r--lib/test_server/src/test_server_ctrl.erl1120
-rw-r--r--lib/test_server/src/test_server_gl.erl293
-rw-r--r--lib/test_server/src/test_server_h.erl5
-rw-r--r--lib/test_server/src/test_server_internal.hrl4
-rw-r--r--lib/test_server/src/test_server_io.erl317
-rw-r--r--lib/test_server/src/test_server_node.erl256
-rw-r--r--lib/test_server/src/test_server_sup.erl39
-rw-r--r--lib/test_server/src/ts.erl73
-rw-r--r--lib/test_server/src/ts_run.erl2
-rw-r--r--lib/test_server/test/Makefile3
-rw-r--r--lib/test_server/test/erl2html2_SUITE.erl254
-rw-r--r--lib/test_server/test/erl2html2_SUITE_data/Makefile.src2
-rw-r--r--lib/test_server/test/erl2html2_SUITE_data/header1.hrl4
-rw-r--r--lib/test_server/test/erl2html2_SUITE_data/include/header2.hrl0
-rw-r--r--lib/test_server/test/erl2html2_SUITE_data/m1.erl46
-rw-r--r--lib/test_server/test/test_server.cover20
-rw-r--r--lib/test_server/test/test_server_SUITE.erl21
-rw-r--r--lib/test_server/test/test_server_SUITE_data/Makefile.src3
-rw-r--r--lib/test_server/test/test_server_SUITE_data/test_server_SUITE.erl14
-rw-r--r--lib/test_server/test/test_server_SUITE_data/test_server_break_SUITE.erl148
-rw-r--r--lib/test_server/test/test_server_test_lib.erl23
-rw-r--r--lib/tools/doc/src/cover.xml20
-rw-r--r--lib/tools/src/cover.erl188
-rw-r--r--lib/tools/test/cover_SUITE.erl133
-rw-r--r--lib/tools/test/cover_SUITE_data/f.erl11
207 files changed, 14521 insertions, 9871 deletions
diff --git a/lib/asn1/test/asn1_SUITE.erl b/lib/asn1/test/asn1_SUITE.erl
index b0c37d79e7..79c7bf1476 100644
--- a/lib/asn1/test/asn1_SUITE.erl
+++ b/lib/asn1/test/asn1_SUITE.erl
@@ -1,3 +1,4 @@
+%%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 2001-2012. All Rights Reserved.
@@ -242,14 +243,13 @@ init_per_testcase(Func, Config) ->
true = code:add_patha(CaseDir),
Dog = case Func of
- testX420 -> test_server:timetrap({minutes, 90});
- _ -> test_server:timetrap({minutes, 60})
+ testX420 -> ct:timetrap({minutes, 90});
+ _ -> ct:timetrap({minutes, 60})
end,
[{case_dir, CaseDir}, {watchdog, Dog}|Config].
end_per_testcase(_Func, Config) ->
- code:del_path(?config(case_dir, Config)),
- test_server:timetrap_cancel(?config(watchdog, Config)).
+ code:del_path(?config(case_dir, Config)).
%%------------------------------------------------------------------------------
%% Test runners
@@ -1064,6 +1064,9 @@ test_modified_x420(Config) ->
asn1_test_lib:compile_all(Files, Config, [der]),
test_modified_x420:test_io(Config).
+
+testX420() ->
+ [{timetrap,{minutes,90}}].
testX420(Config) ->
test(Config, fun testX420/3, [ber, ber_bin, ber_bin_v2]).
testX420(Config, Rule, Opts) ->
diff --git a/lib/common_test/doc/src/cover_chapter.xml b/lib/common_test/doc/src/cover_chapter.xml
index 803a71de07..b2e64bfff0 100644
--- a/lib/common_test/doc/src/cover_chapter.xml
+++ b/lib/common_test/doc/src/cover_chapter.xml
@@ -109,6 +109,33 @@
</section>
<section>
+ <marker id="cover_stop"></marker>
+ <title>Stopping the cover tool when tests are completed</title>
+ <p>By default the Cover tool is automatically stopped when the
+ tests are completed. This causes the original (non cover
+ compiled) modules to be loaded back in to the test node. If a
+ process at this point is still running old code of any of the
+ modules that are cover compiled, meaning that it has not done
+ any fully qualified function call after the cover compilation,
+ the process will now be killed. To avoid this it is possible to
+ set the value of the <c>cover_stop</c> option to
+ <c>false</c>. This means that the modules will stay cover
+ compiled, and it is therefore only recommended if the erlang
+ node(s) under test is terminated after the test is completed
+ or if cover can be manually stopped.</p>
+
+ <p>The option can be set by using the <c>-cover_stop</c> flag with
+ <c>ct_run</c>, by adding <c>{cover_stop,true|false}</c> to the
+ Opts argument to <c><seealso
+ marker="ct#run_test-1">ct:run_test/1</seealso></c>, or by adding
+ a <c>cover_stop</c> term in your test specification (see chapter
+ about <seealso
+ marker="run_test_chapter#test_specifications">test
+ specifications</seealso>).</p>
+
+ </section>
+
+ <section>
<title>The cover specification file</title>
<p>These are the terms allowed in a cover specification file:</p>
diff --git a/lib/common_test/doc/src/ct_run.xml b/lib/common_test/doc/src/ct_run.xml
index 9cc5495af7..da18640df7 100644
--- a/lib/common_test/doc/src/ct_run.xml
+++ b/lib/common_test/doc/src/ct_run.xml
@@ -104,6 +104,7 @@
[-silent_connections [ConnType1 ConnType2 .. ConnTypeN]]
[-stylesheet CSSFile]
[-cover CoverCfgFile]
+ [-cover_stop Bool]
[-event_handler EvHandler1 EvHandler2 .. EvHandlerN] |
[-event_handler_init EvHandler1 InitArg1 and
EvHandler2 InitArg2 and .. EvHandlerN InitArgN]
@@ -138,6 +139,7 @@
[-silent_connections [ConnType1 ConnType2 .. ConnTypeN]]
[-stylesheet CSSFile]
[-cover CoverCfgFile]
+ [-cover_stop Bool]
[-event_handler EvHandler1 EvHandler2 .. EvHandlerN] |
[-event_handler_init EvHandler1 InitArg1 and
EvHandler2 InitArg2 and .. EvHandlerN InitArgN]
diff --git a/lib/common_test/doc/src/run_test_chapter.xml b/lib/common_test/doc/src/run_test_chapter.xml
index ea62df27cc..b5b914d506 100644
--- a/lib/common_test/doc/src/run_test_chapter.xml
+++ b/lib/common_test/doc/src/run_test_chapter.xml
@@ -153,6 +153,8 @@
<item><c><![CDATA[-stylesheet <css_file>]]></c>, points out a user HTML style sheet (see below).</item>
<item><c><![CDATA[-cover <cover_cfg_file>]]></c>, to perform code coverage test (see
<seealso marker="cover_chapter#cover">Code Coverage Analysis</seealso>).</item>
+ <item><c><![CDATA[-cover_stop <bool>]]></c>, to specify if the cover tool shall be stopped after the test is completed (see
+ <seealso marker="cover_chapter#cover_stop">Code Coverage Analysis</seealso>).</item>
<item><c><![CDATA[-event_handler <event_handlers>]]></c>, to install
<seealso marker="event_handler_chapter#event_handling">event handlers</seealso>.</item>
<item><c><![CDATA[-event_handler_init <event_handlers>]]></c>, to install
@@ -495,6 +497,9 @@
{cover, CoverSpecFile}.
{cover, NodeRefs, CoverSpecFile}.
+ {cover_stop, Bool}.
+ {cover_stop, NodeRefs, Bool}.
+
{include, IncludeDirs}.
{include, NodeRefs, IncludeDirs}.
diff --git a/lib/common_test/src/Makefile b/lib/common_test/src/Makefile
index f7dce195d7..dd2923ece9 100644
--- a/lib/common_test/src/Makefile
+++ b/lib/common_test/src/Makefile
@@ -73,7 +73,8 @@ MODULES= \
cth_surefire \
ct_netconfc \
ct_conn_log_h \
- cth_conn_log
+ cth_conn_log \
+ ct_groups
TARGET_MODULES= $(MODULES:%=$(EBIN)/%)
BEAM_FILES= $(MODULES:%=$(EBIN)/%.$(EMULATOR))
diff --git a/lib/common_test/src/ct.erl b/lib/common_test/src/ct.erl
index 5014309c0f..6ecd0a022c 100644
--- a/lib/common_test/src/ct.erl
+++ b/lib/common_test/src/ct.erl
@@ -148,7 +148,7 @@ run(TestDirs) ->
%%% {config,CfgFiles} | {userconfig, UserConfig} |
%%% {allow_user_terms,Bool} | {logdir,LogDir} |
%%% {silent_connections,Conns} | {stylesheet,CSSFile} |
-%%% {cover,CoverSpecFile} | {step,StepOpts} |
+%%% {cover,CoverSpecFile} | {cover_stop,Bool} | {step,StepOpts} |
%%% {event_handler,EventHandlers} | {include,InclDirs} |
%%% {auto_compile,Bool} | {create_priv_dir,CreatePrivDir} |
%%% {multiply_timetraps,M} | {scale_timetraps,Bool} |
@@ -987,8 +987,9 @@ get_testdata(Key) ->
end.
%%%-----------------------------------------------------------------
-%%% @spec abort_current_testcase(Reason) -> ok | {error,no_testcase_running}
+%%% @spec abort_current_testcase(Reason) -> ok | {error,ErrorReason}
%%% Reason = term()
+%%% ErrorReason = no_testcase_running | parallel_group
%%%
%%% @doc <p>When calling this function, the currently executing test case will be aborted.
%%% It is the user's responsibility to know for sure which test case is currently
diff --git a/lib/common_test/src/ct_config.erl b/lib/common_test/src/ct_config.erl
index 30bf5925c0..b1d709bc75 100644
--- a/lib/common_test/src/ct_config.erl
+++ b/lib/common_test/src/ct_config.erl
@@ -171,7 +171,7 @@ process_default_configs(Opts) ->
lists:flatmap(fun({config,[_|_] = FileOrFiles}) ->
case {io_lib:printable_list(FileOrFiles),
io_lib:printable_list(hd(FileOrFiles))} of
- {true,true} ->
+ {false,true} ->
FileOrFiles;
{true,false} ->
[FileOrFiles];
@@ -532,7 +532,8 @@ do_require(Name,Key) ->
case get_key_from_name(Name) of
{error,_} ->
allocate(Name,Key);
- {ok,Key} ->
+ {ok,NameKey} when NameKey == Key;
+ is_tuple(Key) andalso element(1,Key) == NameKey ->
%% already allocated - check that it has all required subkeys
R = make_ref(),
case get_config(Key,R,[]) of
diff --git a/lib/common_test/src/ct_conn_log_h.erl b/lib/common_test/src/ct_conn_log_h.erl
index bf27238121..d7bd18606b 100644
--- a/lib/common_test/src/ct_conn_log_h.erl
+++ b/lib/common_test/src/ct_conn_log_h.erl
@@ -64,10 +64,16 @@ do_open_files([],Acc) ->
handle_event({_Type, GL, _Msg}, State) when node(GL) /= node() ->
{ok, State};
handle_event({_Type,_GL,{Pid,{ct_connection,Action,ConnName},Report}},State) ->
+ %% NOTE: if the format of this event is changed
+ %% ({ct_connection,Action,ConnName}) then remember to change
+ %% test_server_h:report_receiver as well!!!
Info = conn_info(Pid,#conn_log{name=ConnName,action=Action}),
write_report(now(),Info,Report,State),
{ok, State};
handle_event({_Type,_GL,{Pid,Info=#conn_log{},Report}},State) ->
+ %% NOTE: if the format of this event is changed
+ %% (Info=#conn_log{}) then remember to change
+ %% test_server_h:report_receiver as well!!!
write_report(now(),conn_info(Pid,Info),Report,State),
{ok, State};
handle_event({error_report,_,{Pid,_,[{ct_connection,ConnName}|R]}},State) ->
diff --git a/lib/common_test/src/ct_framework.erl b/lib/common_test/src/ct_framework.erl
index bec3368869..c1abf27e9f 100644
--- a/lib/common_test/src/ct_framework.erl
+++ b/lib/common_test/src/ct_framework.erl
@@ -32,8 +32,6 @@
-export([error_in_suite/1, init_per_suite/1, end_per_suite/1,
init_per_group/2, end_per_group/2]).
--export([make_all_conf/3, make_conf/5]).
-
-include("ct_event.hrl").
-include("ct_util.hrl").
@@ -876,13 +874,13 @@ get_suite(Mod, all) ->
{'EXIT',_} ->
get_all(Mod, []);
GroupDefs when is_list(GroupDefs) ->
- case catch find_groups(Mod, all, all, GroupDefs) of
+ case catch ct_groups:find_groups(Mod, all, all, GroupDefs) of
{error,_} = Error ->
%% this makes test_server call error_in_suite as first
%% (and only) test case so we can report Error properly
[{?MODULE,error_in_suite,[[Error]]}];
ConfTests ->
- get_all(Mod, ConfTests)
+ get_all(Mod, ConfTests)
end;
_ ->
E = "Bad return value from "++atom_to_list(Mod)++":groups/0",
@@ -901,7 +899,7 @@ get_suite(Mod, Group={conf,Props,_Init,TCs,_End}) ->
{'EXIT',_} ->
[Group];
GroupDefs when is_list(GroupDefs) ->
- case catch find_groups(Mod, Name, TCs, GroupDefs) of
+ case catch ct_groups:find_groups(Mod, Name, TCs, GroupDefs) of
{error,_} = Error ->
%% this makes test_server call error_in_suite as first
%% (and only) test case so we can report Error properly
@@ -916,12 +914,13 @@ get_suite(Mod, Group={conf,Props,_Init,TCs,_End}) ->
%% init/end functions for top groups will be executed
case catch ?val(name, element(2, hd(ConfTests))) of
Name -> % top group
- delete_subs(ConfTests, ConfTests);
+ ct_groups:delete_subs(ConfTests, ConfTests);
_ ->
[]
end;
false ->
- ConfTests1 = delete_subs(ConfTests, ConfTests),
+ ConfTests1 = ct_groups:delete_subs(ConfTests,
+ ConfTests),
case ?val(override, Props) of
undefined ->
ConfTests1;
@@ -930,9 +929,9 @@ get_suite(Mod, Group={conf,Props,_Init,TCs,_End}) ->
ORSpec ->
ORSpec1 = if is_tuple(ORSpec) -> [ORSpec];
true -> ORSpec end,
- search_and_override(ConfTests1,
- ORSpec1, Mod)
- end
+ ct_groups:search_and_override(ConfTests1,
+ ORSpec1, Mod)
+ end
end
end;
_ ->
@@ -976,234 +975,6 @@ get_all_cases1(_, []) ->
%%%-----------------------------------------------------------------
-find_groups(Mod, Name, TCs, GroupDefs) ->
- Found = find(Mod, Name, TCs, GroupDefs, [], GroupDefs, false),
- trim(Found).
-
-find(Mod, all, _TCs, [{Name,Props,Tests} | Gs], Known, Defs, _)
- when is_atom(Name), is_list(Props), is_list(Tests) ->
- cyclic_test(Mod, Name, Known),
- [make_conf(Mod, Name, Props,
- find(Mod, all, all, Tests, [Name | Known], Defs, true)) |
- find(Mod, all, all, Gs, [], Defs, true)];
-
-find(Mod, Name, TCs, [{Name,Props,Tests} | _Gs], Known, Defs, false)
- when is_atom(Name), is_list(Props), is_list(Tests) ->
- cyclic_test(Mod, Name, Known),
- case TCs of
- all ->
- [make_conf(Mod, Name, Props,
- find(Mod, Name, TCs, Tests, [Name | Known], Defs, true))];
- _ ->
- Tests1 = [TC || TC <- TCs,
- lists:member(TC, Tests) == true],
- [make_conf(Mod, Name, Props, Tests1)]
- end;
-
-find(Mod, Name, TCs, [{Name1,Props,Tests} | Gs], Known, Defs, false)
- when is_atom(Name1), is_list(Props), is_list(Tests) ->
- cyclic_test(Mod, Name1, Known),
- [make_conf(Mod,Name1,Props,
- find(Mod, Name, TCs, Tests, [Name1 | Known], Defs, false)) |
- find(Mod, Name, TCs, Gs, [], Defs, false)];
-
-find(Mod, Name, _TCs, [{Name,_Props,_Tests} | _Gs], _Known, _Defs, true)
- when is_atom(Name) ->
- E = "Duplicate groups named "++atom_to_list(Name)++" in "++
- atom_to_list(Mod)++":groups/0",
- throw({error,list_to_atom(E)});
-
-find(Mod, Name, all, [{Name1,Props,Tests} | Gs], Known, Defs, true)
- when is_atom(Name1), is_list(Props), is_list(Tests) ->
- cyclic_test(Mod, Name1, Known),
- [make_conf(Mod, Name1, Props,
- find(Mod, Name, all, Tests, [Name1 | Known], Defs, true)) |
- find(Mod, Name, all, Gs, [], Defs, true)];
-
-find(Mod, Name, TCs, [{group,Name1} | Gs], Known, Defs, Found)
- when is_atom(Name1) ->
- find(Mod, Name, TCs, [expand(Mod, Name1, Defs) | Gs], Known, Defs, Found);
-
-%% Undocumented remote group feature, use with caution
-find(Mod, Name, TCs, [{group, ExtMod, ExtGrp} | Gs], Known, Defs, true)
- when is_atom(ExtMod), is_atom(ExtGrp) ->
- ExternalDefs = ExtMod:groups(),
- ExternalTCs = find(ExtMod, ExtGrp, TCs, [{group, ExtGrp}],
- [], ExternalDefs, false),
- ExternalTCs ++ find(Mod, Name, TCs, Gs, Known, Defs, true);
-
-find(Mod, Name, TCs, [{Name1,Tests} | Gs], Known, Defs, Found)
- when is_atom(Name1), is_list(Tests) ->
- find(Mod, Name, TCs, [{Name1,[],Tests} | Gs], Known, Defs, Found);
-
-find(Mod, Name, TCs, [_TC | Gs], Known, Defs, false) ->
- find(Mod, Name, TCs, Gs, Known, Defs, false);
-
-find(Mod, Name, TCs, [TC | Gs], Known, Defs, true) when is_atom(TC) ->
- [{Mod, TC} | find(Mod, Name, TCs, Gs, Known, Defs, true)];
-
-find(Mod, Name, TCs, [{ExternalTC, Case} = TC | Gs], Known, Defs, true)
- when is_atom(ExternalTC),
- is_atom(Case) ->
- [TC | find(Mod, Name, TCs, Gs, Known, Defs, true)];
-
-find(Mod, _Name, _TCs, [BadTerm | _Gs], Known, _Defs, _Found) ->
- Where = if length(Known) == 0 ->
- atom_to_list(Mod)++":groups/0";
- true ->
- "group "++atom_to_list(lists:last(Known))++
- " in "++atom_to_list(Mod)++":groups/0"
- end,
- Term = io_lib:format("~p", [BadTerm]),
- E = "Bad term "++lists:flatten(Term)++" in "++Where,
- throw({error,list_to_atom(E)});
-
-find(_Mod, _Name, _TCs, [], _Known, _Defs, false) ->
- ['$NOMATCH'];
-
-find(_Mod, _Name, _TCs, [], _Known, _Defs, _Found) ->
- [].
-
-delete_subs([{conf, _,_,_,_} = Conf | Confs], All) ->
- All1 = delete_conf(Conf, All),
- case is_sub(Conf, All1) of
- true ->
- delete_subs(Confs, All1);
- false ->
- delete_subs(Confs, All)
- end;
-delete_subs([_Else | Confs], All) ->
- delete_subs(Confs, All);
-delete_subs([], All) ->
- All.
-
-delete_conf({conf,Props,_,_,_}, Confs) ->
- Name = ?val(name, Props),
- [Conf || Conf = {conf,Props0,_,_,_} <- Confs,
- Name =/= ?val(name, Props0)].
-
-is_sub({conf,Props,_,_,_}=Conf, [{conf,_,_,Tests,_} | Confs]) ->
- Name = ?val(name, Props),
- case lists:any(fun({conf,Props0,_,_,_}) ->
- case ?val(name, Props0) of
- N when N == Name ->
- true;
- _ ->
- false
- end;
- (_) ->
- false
- end, Tests) of
- true ->
- true;
- false ->
- is_sub(Conf, Tests) or is_sub(Conf, Confs)
- end;
-
-is_sub(Conf, [_TC | Tests]) ->
- is_sub(Conf, Tests);
-
-is_sub(_Conf, []) ->
- false.
-
-trim(['$NOMATCH' | Tests]) ->
- trim(Tests);
-
-trim([{conf,Props,Init,Tests,End} | Confs]) ->
- case trim(Tests) of
- [] ->
- trim(Confs);
- Trimmed ->
- [{conf,Props,Init,Trimmed,End} | trim(Confs)]
- end;
-
-trim([TC | Tests]) ->
- [TC | trim(Tests)];
-
-trim([]) ->
- [].
-
-cyclic_test(Mod, Name, Names) ->
- case lists:member(Name, Names) of
- true ->
- E = "Cyclic reference to group "++atom_to_list(Name)++
- " in "++atom_to_list(Mod)++":groups/0",
- throw({error,list_to_atom(E)});
- false ->
- ok
- end.
-
-expand(Mod, Name, Defs) ->
- case lists:keysearch(Name, 1, Defs) of
- {value,Def} ->
- Def;
- false ->
- E = "Invalid group "++atom_to_list(Name)++
- " in "++atom_to_list(Mod)++":groups/0",
- throw({error,list_to_atom(E)})
- end.
-
-make_all_conf(Dir, Mod, _Props) ->
- case code:is_loaded(Mod) of
- false ->
- code:load_abs(filename:join(Dir,atom_to_list(Mod)));
- _ ->
- ok
- end,
- make_all_conf(Mod).
-
-make_all_conf(Mod) ->
- case catch apply(Mod, groups, []) of
- {'EXIT',_} ->
- {error,{invalid_group_definition,Mod}};
- GroupDefs when is_list(GroupDefs) ->
- case catch find_groups(Mod, all, all, GroupDefs) of
- {error,_} = Error ->
- %% this makes test_server call error_in_suite as first
- %% (and only) test case so we can report Error properly
- [{?MODULE,error_in_suite,[[Error]]}];
- [] ->
- {error,{invalid_group_spec,Mod}};
- ConfTests ->
- [{conf,Props,Init,all,End} ||
- {conf,Props,Init,_,End}
- <- delete_subs(ConfTests, ConfTests)]
- end
- end.
-
-make_conf(Dir, Mod, Name, Props, TestSpec) ->
- case code:is_loaded(Mod) of
- false ->
- code:load_abs(filename:join(Dir,atom_to_list(Mod)));
- _ ->
- ok
- end,
- make_conf(Mod, Name, Props, TestSpec).
-
-make_conf(Mod, Name, Props, TestSpec) ->
- case code:is_loaded(Mod) of
- false ->
- code:load_file(Mod);
- _ ->
- ok
- end,
- {InitConf,EndConf,ExtraProps} =
- case erlang:function_exported(Mod,init_per_group,2) of
- true ->
- {{Mod,init_per_group},{Mod,end_per_group},[]};
- false ->
- ct_logs:log("TEST INFO", "init_per_group/2 and "
- "end_per_group/2 missing for group "
- "~p in ~p, using default.",
- [Name,Mod]),
- {{?MODULE,init_per_group},
- {?MODULE,end_per_group},
- [{suite,Mod}]}
- end,
- {conf,[{name,Name}|Props++ExtraProps],InitConf,TestSpec,EndConf}.
-
-%%%-----------------------------------------------------------------
-
get_all(Mod, ConfTests) ->
case catch apply(Mod, all, []) of
{'EXIT',_} ->
@@ -1218,133 +989,24 @@ get_all(Mod, ConfTests) ->
[{?MODULE,error_in_suite,[[{error,What}]]}];
SeqsAndTCs ->
%% expand group references in all() using ConfTests
- case catch expand_groups(SeqsAndTCs, ConfTests, Mod) of
+ case catch ct_groups:expand_groups(SeqsAndTCs,
+ ConfTests,
+ Mod) of
{error,_} = Error ->
[{?MODULE,error_in_suite,[[Error]]}];
Tests ->
- delete_subs(Tests, Tests)
+ ct_groups:delete_subs(Tests, Tests)
end
end;
Skip = {skip,_Reason} ->
Skip;
_ ->
Reason =
- list_to_atom("Bad return value from "++atom_to_list(Mod)++":all/0"),
+ list_to_atom("Bad return value from "++
+ atom_to_list(Mod)++":all/0"),
[{?MODULE,error_in_suite,[[{error,Reason}]]}]
end.
-expand_groups([H | T], ConfTests, Mod) ->
- [expand_groups(H, ConfTests, Mod) | expand_groups(T, ConfTests, Mod)];
-expand_groups([], _ConfTests, _Mod) ->
- [];
-expand_groups({group,Name}, ConfTests, Mod) ->
- expand_groups({group,Name,default,[]}, ConfTests, Mod);
-expand_groups({group,Name,default}, ConfTests, Mod) ->
- expand_groups({group,Name,default,[]}, ConfTests, Mod);
-expand_groups({group,Name,ORProps}, ConfTests, Mod) when is_list(ORProps) ->
- expand_groups({group,Name,ORProps,[]}, ConfTests, Mod);
-expand_groups({group,Name,ORProps,SubORSpec}, ConfTests, Mod) ->
- FindConf =
- fun(Conf = {conf,Props,Init,Ts,End}) ->
- case ?val(name, Props) of
- Name when ORProps == default ->
- [Conf];
- Name ->
- [{conf,[{name,Name}|ORProps],Init,Ts,End}];
- _ ->
- []
- end
- end,
- case lists:flatmap(FindConf, ConfTests) of
- [] ->
- throw({error,invalid_ref_msg(Name, Mod)});
- Matching when SubORSpec == [] ->
- Matching;
- Matching ->
- override_props(Matching, SubORSpec, Name,Mod)
- end;
-expand_groups(SeqOrTC, _ConfTests, _Mod) ->
- SeqOrTC.
-
-%% search deep for the matching conf test and modify it and any
-%% sub tests according to the override specification
-search_and_override([Conf = {conf,Props,Init,Tests,End}], ORSpec, Mod) ->
- Name = ?val(name, Props),
- case lists:keysearch(Name, 1, ORSpec) of
- {value,{Name,default}} ->
- [Conf];
- {value,{Name,ORProps}} ->
- [{conf,[{name,Name}|ORProps],Init,Tests,End}];
- {value,{Name,default,[]}} ->
- [Conf];
- {value,{Name,default,SubORSpec}} ->
- override_props([Conf], SubORSpec, Name,Mod);
- {value,{Name,ORProps,SubORSpec}} ->
- override_props([{conf,[{name,Name}|ORProps],
- Init,Tests,End}], SubORSpec, Name,Mod);
- _ ->
- [{conf,Props,Init,search_and_override(Tests,ORSpec,Mod),End}]
- end.
-
-%% Modify the Tests element according to the override specification
-override_props([{conf,Props,Init,Tests,End} | Confs], SubORSpec, Name,Mod) ->
- {Subs,SubORSpec1} = override_sub_props(Tests, [], SubORSpec, Mod),
- [{conf,Props,Init,Subs,End} | override_props(Confs, SubORSpec1, Name,Mod)];
-override_props([], [], _,_) ->
- [];
-override_props([], SubORSpec, Name,Mod) ->
- Es = [invalid_ref_msg(Name, element(1,Spec), Mod) || Spec <- SubORSpec],
- throw({error,Es}).
-
-override_sub_props([], New, ORSpec, _) ->
- {?rev(New),ORSpec};
-override_sub_props([T = {conf,Props,Init,Tests,End} | Ts],
- New, ORSpec, Mod) ->
- Name = ?val(name, Props),
- case lists:keysearch(Name, 1, ORSpec) of
- {value,Spec} -> % group found in spec
- Props1 =
- case element(2, Spec) of
- default -> Props;
- ORProps -> [{name,Name} | ORProps]
- end,
- case catch element(3, Spec) of
- Undef when Undef == [] ; 'EXIT' == element(1, Undef) ->
- override_sub_props(Ts, [{conf,Props1,Init,Tests,End} | New],
- lists:keydelete(Name, 1, ORSpec), Mod);
- SubORSpec when is_list(SubORSpec) ->
- case override_sub_props(Tests, [], SubORSpec, Mod) of
- {Subs,[]} ->
- override_sub_props(Ts, [{conf,Props1,Init,
- Subs,End} | New],
- lists:keydelete(Name, 1, ORSpec),
- Mod);
- {_,NonEmptySpec} ->
- Es = [invalid_ref_msg(Name, element(1, GrRef),
- Mod) || GrRef <- NonEmptySpec],
- throw({error,Es})
- end;
- BadGrSpec ->
- throw({error,{invalid_form,BadGrSpec}})
- end;
- _ -> % not a group in spec
- override_sub_props(Ts, [T | New], ORSpec, Mod)
- end;
-override_sub_props([TC | Ts], New, ORSpec, Mod) ->
- override_sub_props(Ts, [TC | New], ORSpec, Mod).
-
-invalid_ref_msg(Name, Mod) ->
- E = "Invalid reference to group "++
- atom_to_list(Name)++" in "++
- atom_to_list(Mod)++":all/0",
- list_to_atom(E).
-
-invalid_ref_msg(Name0, Name1, Mod) ->
- E = "Invalid reference to group "++
- atom_to_list(Name1)++" from "++atom_to_list(Name0)++
- " in "++atom_to_list(Mod)++":all/0",
- list_to_atom(E).
-
%%!============================================================
%%! The support for sequences by means of using sequences/0
%%! will be removed in OTP R15. The code below is only kept
diff --git a/lib/common_test/src/ct_groups.erl b/lib/common_test/src/ct_groups.erl
new file mode 100644
index 0000000000..24ca3826a8
--- /dev/null
+++ b/lib/common_test/src/ct_groups.erl
@@ -0,0 +1,599 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2004-2012. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%% @doc Common Test Framework callback module.
+%%%
+%%% <p>This module contains CT internal help functions for searching
+%%% through groups specification trees and producing resulting
+%%% tests.</p>
+
+-module(ct_groups).
+
+-export([find_groups/4]).
+-export([make_all_conf/3, make_all_conf/4, make_conf/5]).
+-export([delete_subs/2]).
+-export([expand_groups/3, search_and_override/3]).
+
+-define(val(Key, List), proplists:get_value(Key, List)).
+-define(val(Key, List, Def), proplists:get_value(Key, List, Def)).
+-define(rev(L), lists:reverse(L)).
+
+find_groups(Mod, GrNames, TCs, GroupDefs) when is_atom(GrNames) ;
+ (length(GrNames) == 1) ->
+ find_groups1(Mod, GrNames, TCs, GroupDefs);
+
+find_groups(Mod, Groups, TCs, GroupDefs) when Groups /= [] ->
+ lists:append([find_groups1(Mod, [GrNames], TCs, GroupDefs) ||
+ GrNames <- Groups]);
+
+find_groups(_Mod, [], _TCs, _GroupDefs) ->
+ [].
+
+%% GrNames == atom(): Single group name, perform full search
+%% GrNames == list(): List of groups, find all matching paths
+%% GrNames == [list()]: Search path terminated by last group in GrNames
+find_groups1(Mod, GrNames, TCs, GroupDefs) ->
+ {GrNames1,FindAll} =
+ case GrNames of
+ Name when is_atom(Name), Name /= all ->
+ {[Name],true};
+ [Path] when is_list(Path) ->
+ {Path,false};
+ Path ->
+ {Path,true}
+ end,
+ TCs1 = if (is_atom(TCs) and (TCs /= all)) or is_tuple(TCs) ->
+ [TCs];
+ true ->
+ TCs
+ end,
+ Found = find(Mod, GrNames1, TCs1, GroupDefs, [],
+ GroupDefs, FindAll),
+ [Conf || Conf <- Found, Conf /= 'NOMATCH'].
+
+%% Locate all groups
+find(Mod, all, all, [{Name,Props,Tests} | Gs], Known, Defs, _)
+ when is_atom(Name), is_list(Props), is_list(Tests) ->
+ cyclic_test(Mod, Name, Known),
+ trim(make_conf(Mod, Name, Props,
+ find(Mod, all, all, Tests, [Name | Known],
+ Defs, true))) ++
+ find(Mod, all, all, Gs, Known, Defs, true);
+
+%% Locate particular TCs in all groups
+find(Mod, all, TCs, [{Name,Props,Tests} | Gs], Known, Defs, _)
+ when is_atom(Name), is_list(Props), is_list(Tests) ->
+ cyclic_test(Mod, Name, Known),
+ Tests1 = rm_unwanted_tcs(Tests, TCs, []),
+ trim(make_conf(Mod, Name, Props,
+ find(Mod, all, TCs, Tests1, [Name | Known],
+ Defs, true))) ++
+ find(Mod, all, TCs, Gs, Known, Defs, true);
+
+%% Found next group is in search path
+find(Mod, [Name|GrNames]=SPath, TCs, [{Name,Props,Tests} | Gs], Known,
+ Defs, FindAll) when is_atom(Name), is_list(Props), is_list(Tests) ->
+ cyclic_test(Mod, Name, Known),
+ Tests1 = rm_unwanted_tcs(Tests, TCs, GrNames),
+ trim(make_conf(Mod, Name, Props,
+ find(Mod, GrNames, TCs, Tests1, [Name|Known],
+ Defs, FindAll))) ++
+ find(Mod, SPath, TCs, Gs, Known, Defs, FindAll);
+
+%% Group path terminated, stop the search
+find(Mod, [], TCs, Tests, _Known, _Defs, false) ->
+ Cases = lists:flatmap(fun(TC) when is_atom(TC), TCs == all ->
+ [{Mod,TC}];
+ ({group,_}) ->
+ [];
+ ({_,_}=TC) when TCs == all ->
+ [TC];
+ (TC) ->
+ if is_atom(TC) ->
+ Tuple = {Mod,TC},
+ case lists:member(Tuple, TCs) of
+ true ->
+ [Tuple];
+ false ->
+ case lists:member(TC, TCs) of
+ true -> [{Mod,TC}];
+ false -> []
+ end
+ end;
+ true ->
+ []
+ end
+ end, Tests),
+ if Cases == [] -> ['NOMATCH'];
+ true -> Cases
+ end;
+
+%% No more groups
+find(_Mod, [_|_], _TCs, [], _Known, _Defs, _) ->
+ ['NOMATCH'];
+
+%% Found group not next in search path
+find(Mod, GrNames, TCs, [{Name,Props,Tests} | Gs], Known,
+ Defs, FindAll) when is_atom(Name), is_list(Props), is_list(Tests) ->
+ cyclic_test(Mod, Name, Known),
+ Tests1 = rm_unwanted_tcs(Tests, TCs, GrNames),
+ trim(make_conf(Mod, Name, Props,
+ find(Mod, GrNames, TCs, Tests1, [Name|Known],
+ Defs, FindAll))) ++
+ find(Mod, GrNames, TCs, Gs, Known, Defs, FindAll);
+
+%% A nested group defined on top level found
+find(Mod, GrNames, TCs, [{group,Name1} | Gs], Known, Defs, FindAll)
+ when is_atom(Name1) ->
+ find(Mod, GrNames, TCs, [expand(Mod, Name1, Defs) | Gs], Known,
+ Defs, FindAll);
+
+%% Undocumented remote group feature, use with caution
+find(Mod, GrNames, TCs, [{group, ExtMod, ExtGrp} | Gs], Known,
+ Defs, FindAll) when is_atom(ExtMod), is_atom(ExtGrp) ->
+ ExternalDefs = ExtMod:groups(),
+ ExternalTCs = find(ExtMod, ExtGrp, TCs, [{group, ExtGrp}],
+ [], ExternalDefs, FindAll),
+ ExternalTCs ++ find(Mod, GrNames, TCs, Gs, Known, Defs, FindAll);
+
+%% Group definition without properties, add an empty property list
+find(Mod, GrNames, TCs, [{Name1,Tests} | Gs], Known, Defs, FindAll)
+ when is_atom(Name1), is_list(Tests) ->
+ find(Mod, GrNames, TCs, [{Name1,[],Tests} | Gs], Known, Defs, FindAll);
+
+%% Save, and keep searching
+find(Mod, GrNames, TCs, [{ExternalTC, Case} = TC | Gs], Known,
+ Defs, FindAll) when is_atom(ExternalTC),
+ is_atom(Case) ->
+ [TC | find(Mod, GrNames, TCs, Gs, Known, Defs, FindAll)];
+
+%% Save test case
+find(Mod, GrNames, all, [TC | Gs], Known,
+ Defs, FindAll) when is_atom(TC) ->
+ [{Mod,TC} | find(Mod, GrNames, all, Gs, Known, Defs, FindAll)];
+
+%% Save test case
+find(Mod, GrNames, all, [{M,TC} | Gs], Known,
+ Defs, FindAll) when is_atom(M), M /= group, is_atom(TC) ->
+ [{M,TC} | find(Mod, GrNames, all, Gs, Known, Defs, FindAll)];
+
+%% Check if test case should be saved
+find(Mod, GrNames, TCs, [TC | Gs], Known,
+ Defs, FindAll) when is_atom(TC) orelse
+ ((size(TC) == 2) and (hd(TC) /= group)) ->
+ Case =
+ if is_atom(TC) ->
+ Tuple = {Mod,TC},
+ case lists:member(Tuple, TCs) of
+ true ->
+ Tuple;
+ false ->
+ case lists:member(TC, TCs) of
+ true -> {Mod,TC};
+ false -> []
+ end
+ end;
+ true ->
+ case lists:member(TC, TCs) of
+ true -> {Mod,TC};
+ false -> []
+ end
+ end,
+ if Case == [] ->
+ find(Mod, GrNames, TCs, Gs, Known, Defs, FindAll);
+ true ->
+ [Case | find(Mod, GrNames, TCs, Gs, Known, Defs, FindAll)]
+ end;
+
+%% Unexpeted term in group list
+find(Mod, _GrNames, _TCs, [BadTerm | _Gs], Known, _Defs, _FindAll) ->
+ Where = if length(Known) == 0 ->
+ atom_to_list(Mod)++":groups/0";
+ true ->
+ "group "++atom_to_list(lists:last(Known))++
+ " in "++atom_to_list(Mod)++":groups/0"
+ end,
+ Term = io_lib:format("~p", [BadTerm]),
+ E = "Bad term "++lists:flatten(Term)++" in "++Where,
+ throw({error,list_to_atom(E)});
+
+%% No more groups
+find(_Mod, _GrNames, _TCs, [], _Known, _Defs, _) ->
+ [].
+
+%%%-----------------------------------------------------------------
+
+%% We have to always search bottom up to only remove a branch
+%% if there's 'NOMATCH' in the leaf (otherwise, the branch should
+%% be kept)
+
+trim({conf,Props,Init,Tests,End}) ->
+ try trim(Tests) of
+ [] -> [];
+ Tests1 -> [{conf,Props,Init,Tests1,End}]
+ catch
+ throw:_ -> []
+ end;
+
+trim(Tests) when is_list(Tests) ->
+ %% we need to compare the result of trimming each test on this
+ %% level, and only let a 'NOMATCH' fail the level if no
+ %% successful sub group can be found
+ Tests1 =
+ lists:flatmap(fun(Test) ->
+ IsConf = case Test of
+ {conf,_,_,_,_} ->
+ true;
+ _ ->
+ false
+ end,
+ try trim_test(Test) of
+ [] -> [];
+ Test1 when IsConf -> [{conf,Test1}];
+ Test1 -> [Test1]
+ catch
+ throw:_ -> ['NOMATCH']
+ end
+ end, Tests),
+ case lists:keymember(conf, 1, Tests1) of
+ true -> % at least one successful group
+ lists:flatmap(fun({conf,Test}) -> [Test];
+ ('NOMATCH') -> []; % ignore any 'NOMATCH'
+ (Test) -> [Test]
+ end, Tests1);
+ false ->
+ case lists:member('NOMATCH', Tests1) of
+ true ->
+ throw('NOMATCH');
+ false ->
+ Tests1
+ end
+ end.
+
+trim_test({conf,Props,Init,Tests,End}) ->
+ case trim(Tests) of
+ [] ->
+ [];
+ Tests1 ->
+ {conf,Props,Init,Tests1,End}
+ end;
+
+trim_test('NOMATCH') ->
+ throw('NOMATCH');
+
+trim_test(Test) ->
+ Test.
+
+%% GrNames is [] if the terminating group has been found. From
+%% that point, all specified test should be included (as well as
+%% sub groups for deeper search).
+rm_unwanted_tcs(Tests, all, []) ->
+ Tests;
+
+rm_unwanted_tcs(Tests, TCs, []) ->
+ sort_tests(lists:flatmap(fun(Test) when is_tuple(Test),
+ (size(Test) > 2) ->
+ [Test];
+ (Test={group,_}) ->
+ [Test];
+ (Test={_M,TC}) ->
+ case lists:member(TC, TCs) of
+ true -> [Test];
+ false -> []
+ end;
+ (Test) when is_atom(Test) ->
+ case lists:keysearch(Test, 2, TCs) of
+ {value,_} ->
+ [Test];
+ _ ->
+ case lists:member(Test, TCs) of
+ true -> [Test];
+ false -> []
+ end
+ end;
+ (Test) -> [Test]
+ end, Tests), TCs);
+
+rm_unwanted_tcs(Tests, _TCs, _) ->
+ [Test || Test <- Tests, not is_atom(Test)].
+
+%% make sure the order of tests is according to the order in TCs
+sort_tests(Tests, TCs) when is_list(TCs)->
+ lists:sort(fun(T1, T2) ->
+ case {is_tc(T1),is_tc(T2)} of
+ {true,true} ->
+ (position(T1, TCs) =<
+ position(T2, TCs));
+ {false,true} ->
+ (position(T2, TCs) == (length(TCs)+1));
+ _ -> true
+
+ end
+ end, Tests);
+sort_tests(Tests, _) ->
+ Tests.
+
+is_tc(T) when is_atom(T) -> true;
+is_tc({group,_}) -> false;
+is_tc({_M,T}) when is_atom(T) -> true;
+is_tc(_) -> false.
+
+position(T, TCs) ->
+ position(T, TCs, 1).
+
+position(T, [T|_TCs], Pos) ->
+ Pos;
+position(T, [{_,T}|_TCs], Pos) ->
+ Pos;
+position({M,T}, [T|_TCs], Pos) when M /= group ->
+ Pos;
+position(T, [_|TCs], Pos) ->
+ position(T, TCs, Pos+1);
+position(_, [], Pos) ->
+ Pos.
+
+%%%-----------------------------------------------------------------
+
+delete_subs([{conf, _,_,_,_} = Conf | Confs], All) ->
+ All1 = delete_conf(Conf, All),
+ case is_sub(Conf, All1) of
+ true ->
+ delete_subs(Confs, All1);
+ false ->
+ delete_subs(Confs, All)
+ end;
+delete_subs([_Else | Confs], All) ->
+ delete_subs(Confs, All);
+delete_subs([], All) ->
+ All.
+
+delete_conf({conf,Props,_,_,_}, Confs) ->
+ Name = ?val(name, Props),
+ [Conf || Conf = {conf,Props0,_,_,_} <- Confs,
+ Name =/= ?val(name, Props0)].
+
+is_sub({conf,Props,_,_,_}=Conf, [{conf,_,_,Tests,_} | Confs]) ->
+ Name = ?val(name, Props),
+ case lists:any(fun({conf,Props0,_,_,_}) ->
+ case ?val(name, Props0) of
+ N when N == Name ->
+ true;
+ _ ->
+ false
+ end;
+ (_) ->
+ false
+ end, Tests) of
+ true ->
+ true;
+ false ->
+ is_sub(Conf, Tests) orelse is_sub(Conf, Confs)
+ end;
+
+is_sub(Conf, [_TC | Tests]) ->
+ is_sub(Conf, Tests);
+
+is_sub(_Conf, []) ->
+ false.
+
+
+cyclic_test(Mod, Name, Names) ->
+ case lists:member(Name, Names) of
+ true ->
+ E = "Cyclic reference to group "++atom_to_list(Name)++
+ " in "++atom_to_list(Mod)++":groups/0",
+ throw({error,list_to_atom(E)});
+ false ->
+ ok
+ end.
+
+expand(Mod, Name, Defs) ->
+ case lists:keysearch(Name, 1, Defs) of
+ {value,Def} ->
+ Def;
+ false ->
+ E = "Invalid group "++atom_to_list(Name)++
+ " in "++atom_to_list(Mod)++":groups/0",
+ throw({error,list_to_atom(E)})
+ end.
+
+make_all_conf(Dir, Mod, Props, TestSpec) ->
+ case code:is_loaded(Mod) of
+ false ->
+ code:load_abs(filename:join(Dir,atom_to_list(Mod)));
+ _ ->
+ ok
+ end,
+ make_all_conf(Mod, Props, TestSpec).
+
+make_all_conf(Mod, Props, TestSpec) ->
+ case catch apply(Mod, groups, []) of
+ {'EXIT',_} ->
+ exit({invalid_group_definition,Mod});
+ GroupDefs when is_list(GroupDefs) ->
+ case catch find_groups(Mod, all, TestSpec, GroupDefs) of
+ {error,_} = Error ->
+ %% this makes test_server call error_in_suite as first
+ %% (and only) test case so we can report Error properly
+ [{ct_framework,error_in_suite,[[Error]]}];
+ [] ->
+ exit({invalid_group_spec,Mod});
+ _ConfTests ->
+ make_conf(Mod, all, Props, TestSpec)
+ end
+ end.
+
+make_conf(Dir, Mod, Name, Props, TestSpec) ->
+ case code:is_loaded(Mod) of
+ false ->
+ code:load_abs(filename:join(Dir,atom_to_list(Mod)));
+ _ ->
+ ok
+ end,
+ make_conf(Mod, Name, Props, TestSpec).
+
+make_conf(Mod, Name, Props, TestSpec) ->
+ case code:is_loaded(Mod) of
+ false ->
+ code:load_file(Mod);
+ _ ->
+ ok
+ end,
+ {InitConf,EndConf,ExtraProps} =
+ case erlang:function_exported(Mod,init_per_group,2) of
+ true ->
+ {{Mod,init_per_group},{Mod,end_per_group},[]};
+ false ->
+ ct_logs:log("TEST INFO", "init_per_group/2 and "
+ "end_per_group/2 missing for group "
+ "~p in ~p, using default.",
+ [Name,Mod]),
+ {{ct_framework,init_per_group},
+ {ct_framework,end_per_group},
+ [{suite,Mod}]}
+ end,
+ {conf,[{name,Name}|Props++ExtraProps],InitConf,TestSpec,EndConf}.
+
+%%%-----------------------------------------------------------------
+
+expand_groups([H | T], ConfTests, Mod) ->
+ [expand_groups(H, ConfTests, Mod) | expand_groups(T, ConfTests, Mod)];
+expand_groups([], _ConfTests, _Mod) ->
+ [];
+expand_groups({group,Name}, ConfTests, Mod) ->
+ expand_groups({group,Name,default,[]}, ConfTests, Mod);
+expand_groups({group,Name,default}, ConfTests, Mod) ->
+ expand_groups({group,Name,default,[]}, ConfTests, Mod);
+expand_groups({group,Name,ORProps}, ConfTests, Mod) when is_list(ORProps) ->
+ expand_groups({group,Name,ORProps,[]}, ConfTests, Mod);
+expand_groups({group,Name,ORProps,SubORSpec}, ConfTests, Mod) ->
+ FindConf =
+ fun(Conf = {conf,Props,Init,Ts,End}) ->
+ case ?val(name, Props) of
+ Name when ORProps == default ->
+ [Conf];
+ Name ->
+ Props1 = case ?val(suite, Props) of
+ undefined ->
+ ORProps;
+ SuiteName ->
+ [{suite,SuiteName}|ORProps]
+ end,
+ [{conf,[{name,Name}|Props1],Init,Ts,End}];
+ _ ->
+ []
+ end
+ end,
+ case lists:flatmap(FindConf, ConfTests) of
+ [] ->
+ throw({error,invalid_ref_msg(Name, Mod)});
+ Matching when SubORSpec == [] ->
+ Matching;
+ Matching ->
+ override_props(Matching, SubORSpec, Name,Mod)
+ end;
+expand_groups(SeqOrTC, _ConfTests, _Mod) ->
+ SeqOrTC.
+
+%% search deep for the matching conf test and modify it and any
+%% sub tests according to the override specification
+search_and_override([Conf = {conf,Props,Init,Tests,End}], ORSpec, Mod) ->
+ InsProps = fun(GrName, undefined, Ps) ->
+ [{name,GrName} | Ps];
+ (GrName, Suite, Ps) ->
+ [{name,GrName}, {suite,Suite} | Ps]
+ end,
+ Name = ?val(name, Props),
+ Suite = ?val(suite, Props),
+ case lists:keysearch(Name, 1, ORSpec) of
+ {value,{Name,default}} ->
+ [Conf];
+ {value,{Name,ORProps}} ->
+ [{conf,InsProps(Name,Suite,ORProps),Init,Tests,End}];
+ {value,{Name,default,[]}} ->
+ [Conf];
+ {value,{Name,default,SubORSpec}} ->
+ override_props([Conf], SubORSpec, Name,Mod);
+ {value,{Name,ORProps,SubORSpec}} ->
+ override_props([{conf,InsProps(Name,Suite,ORProps),
+ Init,Tests,End}], SubORSpec, Name,Mod);
+ _ ->
+ [{conf,Props,Init,search_and_override(Tests,ORSpec,Mod),End}]
+ end.
+
+%% Modify the Tests element according to the override specification
+override_props([{conf,Props,Init,Tests,End} | Confs], SubORSpec, Name,Mod) ->
+ {Subs,SubORSpec1} = override_sub_props(Tests, [], SubORSpec, Mod),
+ [{conf,Props,Init,Subs,End} | override_props(Confs, SubORSpec1, Name,Mod)];
+override_props([], [], _,_) ->
+ [];
+override_props([], SubORSpec, Name,Mod) ->
+ Es = [invalid_ref_msg(Name, element(1,Spec), Mod) || Spec <- SubORSpec],
+ throw({error,Es}).
+
+override_sub_props([], New, ORSpec, _) ->
+ {?rev(New),ORSpec};
+override_sub_props([T = {conf,Props,Init,Tests,End} | Ts],
+ New, ORSpec, Mod) ->
+ Name = ?val(name, Props),
+ Suite = ?val(suite, Props),
+ case lists:keysearch(Name, 1, ORSpec) of
+ {value,Spec} -> % group found in spec
+ Props1 =
+ case element(2, Spec) of
+ default -> Props;
+ ORProps when Suite == undefined -> [{name,Name} | ORProps];
+ ORProps -> [{name,Name}, {suite,Suite} | ORProps]
+ end,
+ case catch element(3, Spec) of
+ Undef when Undef == [] ; 'EXIT' == element(1, Undef) ->
+ override_sub_props(Ts, [{conf,Props1,Init,Tests,End} | New],
+ lists:keydelete(Name, 1, ORSpec), Mod);
+ SubORSpec when is_list(SubORSpec) ->
+ case override_sub_props(Tests, [], SubORSpec, Mod) of
+ {Subs,[]} ->
+ override_sub_props(Ts, [{conf,Props1,Init,
+ Subs,End} | New],
+ lists:keydelete(Name, 1, ORSpec),
+ Mod);
+ {_,NonEmptySpec} ->
+ Es = [invalid_ref_msg(Name, element(1, GrRef),
+ Mod) || GrRef <- NonEmptySpec],
+ throw({error,Es})
+ end;
+ BadGrSpec ->
+ throw({error,{invalid_form,BadGrSpec}})
+ end;
+ _ -> % not a group in spec
+ override_sub_props(Ts, [T | New], ORSpec, Mod)
+ end;
+override_sub_props([TC | Ts], New, ORSpec, Mod) ->
+ override_sub_props(Ts, [TC | New], ORSpec, Mod).
+
+invalid_ref_msg(Name, Mod) ->
+ E = "Invalid reference to group "++
+ atom_to_list(Name)++" in "++
+ atom_to_list(Mod)++":all/0",
+ list_to_atom(E).
+
+invalid_ref_msg(Name0, Name1, Mod) ->
+ E = "Invalid reference to group "++
+ atom_to_list(Name1)++" from "++atom_to_list(Name0)++
+ " in "++atom_to_list(Mod)++":all/0",
+ list_to_atom(E).
diff --git a/lib/common_test/src/ct_master.erl b/lib/common_test/src/ct_master.erl
index 99bec3ea09..f29eba605c 100644
--- a/lib/common_test/src/ct_master.erl
+++ b/lib/common_test/src/ct_master.erl
@@ -51,7 +51,7 @@
%%% {testcase,Cases} | {spec,TestSpecs} | {allow_user_terms,Bool} |
%%% {logdir,LogDir} | {event_handler,EventHandlers} |
%%% {silent_connections,Conns} | {cover,CoverSpecFile} |
-%%% {userconfig, UserCfgFiles}
+%%% {cover_stop,Bool} | {userconfig, UserCfgFiles}
%%% CfgFiles = string() | [string()]
%%% TestDirs = string() | [string()]
%%% Suites = atom() | [atom()]
diff --git a/lib/common_test/src/ct_master_logs.erl b/lib/common_test/src/ct_master_logs.erl
index d76288feef..84f175c0a9 100644
--- a/lib/common_test/src/ct_master_logs.erl
+++ b/lib/common_test/src/ct_master_logs.erl
@@ -204,7 +204,7 @@ open_ct_master_log(Dir) ->
{ok,Fd} = file:open(FullName,[write]),
io:put_chars(Fd,header("Common Test Master Log", {[],[1,2],[]})),
%% maybe add config info here later
- io:put_chars(config_table([])),
+ io:put_chars(Fd,config_table([])),
io:put_chars(Fd,
"<style>\n"
"div.ct_internal { background:lightgrey; color:black }\n"
diff --git a/lib/common_test/src/ct_netconfc.erl b/lib/common_test/src/ct_netconfc.erl
index 52fe9599ce..294b82bff6 100644
--- a/lib/common_test/src/ct_netconfc.erl
+++ b/lib/common_test/src/ct_netconfc.erl
@@ -968,7 +968,7 @@ close_session(Client) ->
%% @end
%%----------------------------------------------------------------------
close_session(Client, Timeout) ->
- call(Client,{send_rpc_op, close_session, [], Timeout}).
+ call(Client,{send_rpc_op, close_session, [], Timeout}, true).
%%----------------------------------------------------------------------
@@ -1121,17 +1121,38 @@ close(Client) ->
%% Internal functions
%%----------------------------------------------------------------------
call(Client, Msg) ->
- call(Client, Msg, infinity).
-call(Client, Msg, Timeout) ->
+ call(Client, Msg, infinity, false).
+call(Client, Msg, Timeout) when is_integer(Timeout); Timeout==infinity ->
+ call(Client, Msg, Timeout, false);
+call(Client, Msg, WaitStop) when is_boolean(WaitStop) ->
+ call(Client, Msg, infinity, WaitStop).
+call(Client, Msg, Timeout, WaitStop) ->
case get_handle(Client) of
{ok,Pid} ->
case ct_gen_conn:call(Pid,Msg,Timeout) of
- {error,{process_down,Client,noproc}} ->
+ {error,{process_down,Pid,noproc}} ->
{error,no_such_client};
- {error,{process_down,Client,normal}} ->
+ {error,{process_down,Pid,normal}} when WaitStop ->
+ %% This will happen when server closes connection
+ %% before clien received rpc-reply on
+ %% close-session.
+ ok;
+ {error,{process_down,Pid,normal}} ->
{error,closed};
- {error,{process_down,Client,Reason}} ->
+ {error,{process_down,Pid,Reason}} ->
{error,{closed,Reason}};
+ Other when WaitStop ->
+ MRef = erlang:monitor(process,Pid),
+ receive
+ {'DOWN',MRef,process,Pid,Normal} when Normal==normal;
+ Normal==noproc ->
+ Other;
+ {'DOWN',MRef,process,Pid,Reason} ->
+ {error,{{closed,Reason},Other}}
+ after Timeout ->
+ erlang:demonitor(MRef, [flush]),
+ {error,{timeout,Other}}
+ end;
Other ->
Other
end;
diff --git a/lib/common_test/src/ct_run.erl b/lib/common_test/src/ct_run.erl
index 4a6a3cdcac..eb05c90ba8 100644
--- a/lib/common_test/src/ct_run.erl
+++ b/lib/common_test/src/ct_run.erl
@@ -58,6 +58,7 @@
vts,
shell,
cover,
+ cover_stop,
coverspec,
step,
logdir,
@@ -245,6 +246,7 @@ script_start1(Parent, Args) ->
Vts = get_start_opt(vts, true, Args),
Shell = get_start_opt(shell, true, Args),
Cover = get_start_opt(cover, fun([CoverFile]) -> ?abs(CoverFile) end, Args),
+ CoverStop = get_start_opt(cover_stop, fun([CS]) -> list_to_atom(CS) end, Args),
LogDir = get_start_opt(logdir, fun([LogD]) -> LogD end, Args),
LogOpts = get_start_opt(logopts, fun(Os) -> [list_to_atom(O) || O <- Os] end,
[], Args),
@@ -329,7 +331,8 @@ script_start1(Parent, Args) ->
end,
StartOpts = #opts{label = Label, profile = Profile,
- vts = Vts, shell = Shell, cover = Cover,
+ vts = Vts, shell = Shell,
+ cover = Cover, cover_stop = CoverStop,
logdir = LogDir, logopts = LogOpts,
basic_html = BasicHtml,
verbosity = Verbosity,
@@ -416,6 +419,9 @@ script_start2(StartOpts = #opts{vts = undefined,
Cover =
choose_val(StartOpts#opts.cover,
SpecStartOpts#opts.cover),
+ CoverStop =
+ choose_val(StartOpts#opts.cover_stop,
+ SpecStartOpts#opts.cover_stop),
MultTT =
choose_val(StartOpts#opts.multiply_timetraps,
SpecStartOpts#opts.multiply_timetraps),
@@ -475,6 +481,7 @@ script_start2(StartOpts = #opts{vts = undefined,
profile = Profile,
testspecs = Specs,
cover = Cover,
+ cover_stop = CoverStop,
logdir = LogDir,
logopts = AllLogOpts,
basic_html = BasicHtml,
@@ -723,6 +730,7 @@ script_usage() ->
"\n\t[-silent_connections [ConnType1 ConnType2 .. ConnTypeN]]"
"\n\t[-stylesheet CSSFile]"
"\n\t[-cover CoverCfgFile]"
+ "\n\t[-cover_stop Bool]"
"\n\t[-event_handler EvHandler1 EvHandler2 .. EvHandlerN]"
"\n\t[-ct_hooks CTHook1 CTHook2 .. CTHookN]"
"\n\t[-include InclDir1 InclDir2 .. InclDirN]"
@@ -745,6 +753,7 @@ script_usage() ->
"\n\t[-silent_connections [ConnType1 ConnType2 .. ConnTypeN]]"
"\n\t[-stylesheet CSSFile]"
"\n\t[-cover CoverCfgFile]"
+ "\n\t[-cover_stop Bool]"
"\n\t[-event_handler EvHandler1 EvHandler2 .. EvHandlerN]"
"\n\t[-ct_hooks CTHook1 CTHook2 .. CTHookN]"
"\n\t[-include InclDir1 InclDir2 .. InclDirN]"
@@ -938,6 +947,7 @@ run_test2(StartOpts) ->
%% code coverage
Cover = get_start_opt(cover,
fun(CoverFile) -> ?abs(CoverFile) end, StartOpts),
+ CoverStop = get_start_opt(cover_stop, value, StartOpts),
%% timetrap manipulation
MultiplyTT = get_start_opt(multiply_timetraps, value, 1, StartOpts),
@@ -1000,7 +1010,8 @@ run_test2(StartOpts) ->
Step = get_start_opt(step, value, StartOpts),
Opts = #opts{label = Label, profile = Profile,
- cover = Cover, step = Step, logdir = LogDir,
+ cover = Cover, cover_stop = CoverStop,
+ step = Step, logdir = LogDir,
logopts = LogOpts, basic_html = BasicHtml,
config = CfgFiles,
verbosity = Verbosity,
@@ -1063,6 +1074,8 @@ run_spec_file(Relaxed,
AllConfig = merge_vals([CfgFiles, SpecOpts#opts.config]),
Cover = choose_val(Opts#opts.cover,
SpecOpts#opts.cover),
+ CoverStop = choose_val(Opts#opts.cover_stop,
+ SpecOpts#opts.cover_stop),
MultTT = choose_val(Opts#opts.multiply_timetraps,
SpecOpts#opts.multiply_timetraps),
ScaleTT = choose_val(Opts#opts.scale_timetraps,
@@ -1103,6 +1116,7 @@ run_spec_file(Relaxed,
Opts1 = Opts#opts{label = Label,
profile = Profile,
cover = Cover,
+ cover_stop = CoverStop,
logdir = which(logdir, LogDir),
logopts = AllLogOpts,
stylesheet = Stylesheet,
@@ -1272,7 +1286,8 @@ run_dir(Opts = #opts{logdir = LogDir,
reformat_result(catch do_run(tests(Dir2, Mod),
[], Opts1, StartOpts));
_ ->
- reformat_result(catch do_run(tests(Dir2, Mod, GsAndCs),
+ reformat_result(catch do_run(tests(Dir2, Mod,
+ GsAndCs),
[], Opts1, StartOpts))
end;
@@ -1281,7 +1296,8 @@ run_dir(Opts = #opts{logdir = LogDir,
[_,_|_] when GsAndCs /= [] ->
exit({error,multiple_suites_and_cases});
[{Dir2,Mod}] when GsAndCs /= [] ->
- reformat_result(catch do_run(tests(Dir2, Mod, GsAndCs),
+ reformat_result(catch do_run(tests(Dir2, Mod,
+ GsAndCs),
[], Opts1, StartOpts));
DirMods ->
reformat_result(catch do_run(tests(DirMods),
@@ -1374,6 +1390,7 @@ get_data_for_node(#testspec{label = Labels,
verbosity = VLvls,
silent_connections = SilentConnsList,
cover = CoverFs,
+ cover_stop = CoverStops,
config = Cfgs,
userconfig = UsrCfgs,
event_handler = EvHs,
@@ -1405,6 +1422,7 @@ get_data_for_node(#testspec{label = Labels,
SCs -> SCs
end,
Cover = proplists:get_value(Node, CoverFs),
+ CoverStop = proplists:get_value(Node, CoverStops),
MT = proplists:get_value(Node, MTs),
ST = proplists:get_value(Node, STs),
CreatePrivDir = proplists:get_value(Node, PDs),
@@ -1423,6 +1441,7 @@ get_data_for_node(#testspec{label = Labels,
verbosity = Verbosity,
silent_connections = SilentConns,
cover = Cover,
+ cover_stop = CoverStop,
config = ConfigFiles,
event_handlers = EvHandlers,
ct_hooks = FiltCTHooks,
@@ -1536,17 +1555,36 @@ groups_and_cases(Gs, Cs) when ((Gs == undefined) or (Gs == [])) and
((Cs == undefined) or (Cs == [])) ->
[];
groups_and_cases(Gs, Cs) when Gs == undefined ; Gs == [] ->
- [ensure_atom(C) || C <- listify(Cs)];
-groups_and_cases(Gs, Cs) when Cs == undefined ; Cs == [] ->
- [{ensure_atom(G),all} || G <- listify(Gs)];
-groups_and_cases(G, Cs) when is_atom(G) ->
- [{G,[ensure_atom(C) || C <- listify(Cs)]}];
-groups_and_cases([G], Cs) ->
- [{ensure_atom(G),[ensure_atom(C) || C <- listify(Cs)]}];
-groups_and_cases([_,_|_] , Cs) when Cs =/= [] ->
- {error,multiple_groups_and_cases};
-groups_and_cases(_Gs, _Cs) ->
- {error,incorrect_group_or_case_option}.
+ if (Cs == all) or (Cs == [all]) or (Cs == ["all"]) -> all;
+ true -> [ensure_atom(C) || C <- listify(Cs)]
+ end;
+groups_and_cases(GOrGs, Cs) when (is_atom(GOrGs) orelse
+ (is_list(GOrGs) andalso
+ (is_atom(hd(GOrGs)) orelse
+ (is_list(hd(GOrGs)) andalso
+ is_atom(hd(hd(GOrGs))))))) ->
+ if (Cs == undefined) or (Cs == []) or
+ (Cs == all) or (Cs == [all]) or (Cs == ["all"]) ->
+ [{GOrGs,all}];
+ true ->
+ [{GOrGs,[ensure_atom(C) || C <- listify(Cs)]}]
+ end;
+groups_and_cases(Gs, Cs) when is_integer(hd(hd(Gs))) ->
+ %% if list of strings, this comes from 'ct_run -group G1 G2 ...' and
+ %% we need to parse the strings
+ Gs1 =
+ if (Gs == [all]) or (Gs == ["all"]) ->
+ all;
+ true ->
+ lists:map(fun(G) ->
+ {ok,Ts,_} = erl_scan:string(G++"."),
+ {ok,Term} = erl_parse:parse_term(Ts),
+ Term
+ end, Gs)
+ end,
+ groups_and_cases(Gs1, Cs);
+groups_and_cases(Gs, Cs) ->
+ {error,{incorrect_group_or_case_option,Gs,Cs}}.
tests(TestDir, Suites, []) when is_list(TestDir), is_integer(hd(TestDir)) ->
[{?testdir(TestDir,Suites),ensure_atom(Suites),all}];
@@ -1576,14 +1614,7 @@ do_run(Tests, Misc, LogDir, LogOpts) when is_list(Misc),
StepOpts ->
#opts{step = StepOpts}
end,
- Opts1 =
- case proplists:get_value(cover, Misc) of
- undefined ->
- Opts;
- CoverFile ->
- Opts#opts{cover = CoverFile}
- end,
- do_run(Tests, [], Opts1#opts{logdir = LogDir}, []);
+ do_run(Tests, [], Opts#opts{logdir = LogDir}, []);
do_run(Tests, Skip, Opts, Args) when is_record(Opts, opts) ->
#opts{label = Label, profile = Profile, cover = Cover,
@@ -1617,7 +1648,13 @@ do_run(Tests, Skip, Opts, Args) when is_record(Opts, opts) ->
{error,Reason} ->
exit({error,Reason});
CoverSpec ->
- Opts#opts{coverspec = CoverSpec}
+ CoverStop =
+ case Opts#opts.cover_stop of
+ undefined -> true;
+ Stop -> Stop
+ end,
+ Opts#opts{coverspec = CoverSpec,
+ cover_stop = CoverStop}
end
end,
%% This env variable is used by test_server to determine
@@ -1687,11 +1724,15 @@ compile_and_run(Tests, Skip, Opts, Args) ->
SavedErrors = save_make_errors(SuiteMakeErrors),
ct_repeat:log_loop_info(Args),
- {Tests1,Skip1} = final_tests(Tests,Skip,SavedErrors),
-
- ReleaseSh = proplists:get_value(release_shell, Args),
- ct_util:set_testdata({release_shell,ReleaseSh}),
- possibly_spawn(ReleaseSh == true, Tests1, Skip1, Opts);
+ try final_tests(Tests,Skip,SavedErrors) of
+ {Tests1,Skip1} ->
+ ReleaseSh = proplists:get_value(release_shell, Args),
+ ct_util:set_testdata({release_shell,ReleaseSh}),
+ possibly_spawn(ReleaseSh == true, Tests1, Skip1, Opts)
+ catch
+ _:BadFormat ->
+ {error,BadFormat}
+ end;
false ->
io:nl(),
ct_util:stop(clean),
@@ -1961,22 +2002,21 @@ final_tests1([{TestDir,Suite,GrsOrCs}|Tests], Final, Skip, Bad) when
%% for now, only flat group defs are allowed as
%% start options and test spec terms
fun({all,all}) ->
- ct_framework:make_all_conf(TestDir,
- Suite, []);
+ [ct_groups:make_conf(TestDir, Suite, all, [], all)];
({skipped,Group,TCs}) ->
- [ct_framework:make_conf(TestDir, Suite,
- Group, [skipped], TCs)];
- ({GrSpec = {Group,_},TCs}) ->
+ [ct_groups:make_conf(TestDir, Suite,
+ Group, [skipped], TCs)];
+ ({GrSpec = {GroupName,_},TCs}) ->
Props = [{override,GrSpec}],
- [ct_framework:make_conf(TestDir, Suite,
- Group, Props, TCs)];
- ({GrSpec = {Group,_,_},TCs}) ->
+ [ct_groups:make_conf(TestDir, Suite,
+ GroupName, Props, TCs)];
+ ({GrSpec = {GroupName,_,_},TCs}) ->
Props = [{override,GrSpec}],
- [ct_framework:make_conf(TestDir, Suite,
- Group, Props, TCs)];
- ({Group,TCs}) ->
- [ct_framework:make_conf(TestDir, Suite,
- Group, [], TCs)];
+ [ct_groups:make_conf(TestDir, Suite,
+ GroupName, Props, TCs)];
+ ({GroupOrGroups,TCs}) ->
+ [ct_groups:make_conf(TestDir, Suite,
+ GroupOrGroups, [], TCs)];
(TC) ->
[TC]
end, GrsOrCs),
@@ -1988,12 +2028,12 @@ final_tests1([], Final, Skip, _Bad) ->
{lists:reverse(Final),Skip}.
final_skip([{TestDir,Suite,{all,all},Reason}|Skips], Final) ->
- SkipConf = ct_framework:make_conf(TestDir, Suite, all, [], all),
+ SkipConf = ct_groups:make_conf(TestDir, Suite, all, [], all),
Skip = {TestDir,Suite,SkipConf,Reason},
final_skip(Skips, [Skip|Final]);
final_skip([{TestDir,Suite,{Group,TCs},Reason}|Skips], Final) ->
- Conf = ct_framework:make_conf(TestDir, Suite, Group, [], TCs),
+ Conf = ct_groups:make_conf(TestDir, Suite, Group, [], TCs),
Skip = {TestDir,Suite,Conf,Reason},
final_skip(Skips, [Skip|Final]);
@@ -2120,7 +2160,8 @@ do_run_test(Tests, Skip, Opts) ->
%% tell test_server which modules should be cover compiled
%% note that actual compilation is done when tests start
test_server_ctrl:cover(CovApp, CovFile, CovExcl, CovIncl,
- CovCross, CovExport, CovLevel),
+ CovCross, CovExport, CovLevel,
+ Opts#opts.cover_stop),
%% save cover data (used e.g. to add nodes dynamically)
ct_util:set_testdata({cover,CovData}),
%% start cover on specified nodes
@@ -2265,9 +2306,11 @@ add_jobs([{TestDir,all,_}|Tests], Skip, Opts, CleanUp) ->
wait_for_idle(),
add_jobs(Tests, Skip, Opts, CleanUp)
end;
-add_jobs([{TestDir,[Suite],all}|Tests], Skip, Opts, CleanUp) when is_atom(Suite) ->
+add_jobs([{TestDir,[Suite],all}|Tests], Skip,
+ Opts, CleanUp) when is_atom(Suite) ->
add_jobs([{TestDir,Suite,all}|Tests], Skip, Opts, CleanUp);
-add_jobs([{TestDir,Suites,all}|Tests], Skip, Opts, CleanUp) when is_list(Suites) ->
+add_jobs([{TestDir,Suites,all}|Tests], Skip,
+ Opts, CleanUp) when is_list(Suites) ->
Name = get_name(TestDir) ++ ".suites",
case catch test_server_ctrl:add_module_with_skip(Name, Suites,
skiplist(TestDir,Skip)) of
@@ -2282,7 +2325,8 @@ add_jobs([{TestDir,Suite,all}|Tests], Skip, Opts, CleanUp) ->
ok ->
Name = get_name(TestDir) ++ "." ++ atom_to_list(Suite),
case catch test_server_ctrl:add_module_with_skip(Name, [Suite],
- skiplist(TestDir,Skip)) of
+ skiplist(TestDir,
+ Skip)) of
{'EXIT',_} ->
CleanUp;
_ ->
@@ -2305,15 +2349,24 @@ add_jobs([{TestDir,Suite,Confs}|Tests], Skip, Opts, CleanUp) when
GrTestName =
case Confs of
[Conf] ->
- "." ++ atom_to_list(Group(Conf)) ++ TCTestName(TestCases(Conf));
+ case Group(Conf) of
+ GrName when is_atom(GrName) ->
+ "." ++ atom_to_list(GrName) ++
+ TCTestName(TestCases(Conf));
+ _ ->
+ ".groups" ++ TCTestName(TestCases(Conf))
+ end;
_ ->
".groups"
end,
TestName = get_name(TestDir) ++ "." ++ atom_to_list(Suite) ++ GrTestName,
case maybe_interpret(Suite, init_per_group, Opts) of
ok ->
- case catch test_server_ctrl:add_conf_with_skip(TestName, Suite, Confs,
- skiplist(TestDir,Skip)) of
+ case catch test_server_ctrl:add_conf_with_skip(TestName,
+ Suite,
+ Confs,
+ skiplist(TestDir,
+ Skip)) of
{'EXIT',_} ->
CleanUp;
_ ->
@@ -2325,18 +2378,21 @@ add_jobs([{TestDir,Suite,Confs}|Tests], Skip, Opts, CleanUp) when
end;
%% test case
-add_jobs([{TestDir,Suite,[Case]}|Tests], Skip, Opts, CleanUp) when is_atom(Case) ->
+add_jobs([{TestDir,Suite,[Case]}|Tests],
+ Skip, Opts, CleanUp) when is_atom(Case) ->
add_jobs([{TestDir,Suite,Case}|Tests], Skip, Opts, CleanUp);
-add_jobs([{TestDir,Suite,Cases}|Tests], Skip, Opts, CleanUp) when is_list(Cases) ->
+add_jobs([{TestDir,Suite,Cases}|Tests],
+ Skip, Opts, CleanUp) when is_list(Cases) ->
Cases1 = lists:map(fun({GroupName,_}) when is_atom(GroupName) -> GroupName;
(Case) -> Case
end, Cases),
case maybe_interpret(Suite, Cases1, Opts) of
ok ->
- Name = get_name(TestDir) ++ "." ++ atom_to_list(Suite) ++ ".cases",
+ Name = get_name(TestDir) ++ "." ++ atom_to_list(Suite) ++ ".cases",
case catch test_server_ctrl:add_cases_with_skip(Name, Suite, Cases1,
- skiplist(TestDir,Skip)) of
+ skiplist(TestDir,
+ Skip)) of
{'EXIT',_} ->
CleanUp;
_ ->
@@ -2352,7 +2408,8 @@ add_jobs([{TestDir,Suite,Case}|Tests], Skip, Opts, CleanUp) when is_atom(Case) -
Name = get_name(TestDir) ++ "." ++ atom_to_list(Suite) ++ "." ++
atom_to_list(Case),
case catch test_server_ctrl:add_case_with_skip(Name, Suite, Case,
- skiplist(TestDir,Skip)) of
+ skiplist(TestDir,
+ Skip)) of
{'EXIT',_} ->
CleanUp;
_ ->
@@ -2387,7 +2444,8 @@ skiplist(Dir, [{Dir,all,Cmt}|Skip]) ->
%% we need to turn 'all' into list of modules since
%% test_server doesn't do skips on Dir level
Ss = filelib:wildcard(filename:join(Dir, "*_SUITE.beam")),
- [{list_to_atom(filename:basename(S,".beam")),Cmt} || S <- Ss] ++ skiplist(Dir,Skip);
+ [{list_to_atom(filename:basename(S,".beam")),Cmt} || S <- Ss] ++
+ skiplist(Dir,Skip);
skiplist(Dir, [{Dir,S,Cmt}|Skip]) ->
[{S,Cmt} | skiplist(Dir, Skip)];
skiplist(Dir, [{Dir,S,C,Cmt}|Skip]) ->
@@ -2447,8 +2505,10 @@ run_make(Targets, TestDir0, Mod, UserInclude) ->
FileTest = fun(F, suites) -> is_suite(F);
(F, helpmods) -> not is_suite(F)
end,
- Files = lists:flatmap(fun({F,out_of_date}) ->
- case FileTest(F, Targets) of
+ Files =
+ lists:flatmap(fun({F,out_of_date}) ->
+ case FileTest(F,
+ Targets) of
true -> [F];
false -> []
end;
@@ -2584,6 +2644,9 @@ merge_arguments([LogDir={logdir,_}|Args], Merged) ->
merge_arguments([CoverFile={cover,_}|Args], Merged) ->
merge_arguments(Args, handle_arg(replace, CoverFile, Merged));
+merge_arguments([CoverStop={cover_stop,_}|Args], Merged) ->
+ merge_arguments(Args, handle_arg(replace, CoverStop, Merged));
+
merge_arguments([{'case',TC}|Args], Merged) ->
merge_arguments(Args, handle_arg(merge, {testcase,TC}, Merged));
@@ -2792,11 +2855,14 @@ opts2args(EnvStartOpts) ->
lists:flatmap(fun({exit_status,ExitStatusOpt}) when is_atom(ExitStatusOpt) ->
[{exit_status,[atom_to_list(ExitStatusOpt)]}];
({halt_with,{HaltM,HaltF}}) ->
- [{halt_with,[atom_to_list(HaltM),atom_to_list(HaltF)]}];
+ [{halt_with,[atom_to_list(HaltM),
+ atom_to_list(HaltF)]}];
({interactive_mode,true}) ->
[{shell,[]}];
- ({config,CfgFiles}) ->
- [{ct_config,[CfgFiles]}];
+ ({config,CfgFile}) when is_integer(hd(CfgFile)) ->
+ [{ct_config,[CfgFile]}];
+ ({config,CfgFiles}) when is_list(hd(CfgFiles)) ->
+ [{ct_config,CfgFiles}];
({userconfig,{CBM,CfgStr=[X|_]}}) when is_integer(X) ->
[{userconfig,[atom_to_list(CBM),CfgStr]}];
({userconfig,{CBM,CfgStrs}}) when is_list(CfgStrs) ->
@@ -2814,6 +2880,12 @@ opts2args(EnvStartOpts) ->
end, UserCfg),
[_LastAnd|StrsR] = lists:reverse(lists:flatten(Strs)),
[{userconfig,lists:reverse(StrsR)}];
+ ({group,G}) when is_atom(G) ->
+ [{group,[atom_to_list(G)]}];
+ ({group,Gs}) when is_list(Gs) ->
+ LOfGStrs = [lists:flatten(io_lib:format("~w",[G])) ||
+ G <- Gs],
+ [{group,LOfGStrs}];
({testcase,Case}) when is_atom(Case) ->
[{'case',[atom_to_list(Case)]}];
({testcase,Cases}) ->
diff --git a/lib/common_test/src/ct_slave.erl b/lib/common_test/src/ct_slave.erl
index aa3413fa89..cb05423497 100644
--- a/lib/common_test/src/ct_slave.erl
+++ b/lib/common_test/src/ct_slave.erl
@@ -1,7 +1,7 @@
%%--------------------------------------------------------------------
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -311,6 +311,13 @@ do_start(Host, Node, Options) ->
StartupTimeout = Options#options.startup_timeout,
Result = case wait_for_node_alive(ENode, BootTimeout) of
pong->
+ case test_server:is_cover() of
+ true ->
+ MainCoverNode = cover:get_main_node(),
+ rpc:call(MainCoverNode,cover,start,[ENode]);
+ false ->
+ ok
+ end,
call_functions(ENode, Functions2),
receive
{node_started, ENode}->
@@ -423,6 +430,13 @@ wait_for_node_alive(Node, N) ->
% call init:stop on a remote node
do_stop(ENode) ->
+ case test_server:is_cover() of
+ true ->
+ MainCoverNode = cover:get_main_node(),
+ rpc:call(MainCoverNode,cover,flush,[ENode]);
+ false ->
+ ok
+ end,
spawn(ENode, init, stop, []),
wait_for_node_dead(ENode, 5).
diff --git a/lib/common_test/src/ct_testspec.erl b/lib/common_test/src/ct_testspec.erl
index a8b67d0329..202d8f9373 100644
--- a/lib/common_test/src/ct_testspec.erl
+++ b/lib/common_test/src/ct_testspec.erl
@@ -903,6 +903,8 @@ handle_data(logdir,Node,Dir,Spec) ->
[{Node,ref2dir(Dir,Spec)}];
handle_data(cover,Node,File,Spec) ->
[{Node,get_absfile(File,Spec)}];
+handle_data(cover_stop,Node,Stop,_Spec) ->
+ [{Node,Stop}];
handle_data(include,Node,Dirs=[D|_],Spec) when is_list(D) ->
[{Node,ref2dir(Dir,Spec)} || Dir <- Dirs];
handle_data(include,Node,Dir=[Ch|_],Spec) when is_integer(Ch) ->
@@ -1026,20 +1028,24 @@ insert_groups(Node,Dir,Suite,Group,Cases,Tests,MergeTests)
insert_groups(Node,Dir,Suite,[Group],Cases,Tests,MergeTests);
insert_groups(Node,Dir,Suite,Groups,Cases,Tests,false) when
((Cases == all) or is_list(Cases)) and is_list(Groups) ->
- Groups1 = [{Gr,Cases} || Gr <- Groups],
+ Groups1 = [if is_list(Gr) -> % preserve group path
+ {[Gr],Cases};
+ true ->
+ {Gr,Cases} end || Gr <- Groups],
append({{Node,Dir},[{Suite,Groups1}]},Tests);
insert_groups(Node,Dir,Suite,Groups,Cases,Tests,true) when
((Cases == all) or is_list(Cases)) and is_list(Groups) ->
+ Groups1 = [if is_list(Gr) -> % preserve group path
+ {[Gr],Cases};
+ true ->
+ {Gr,Cases} end || Gr <- Groups],
case lists:keysearch({Node,Dir},1,Tests) of
{value,{{Node,Dir},[{all,_}]}} ->
Tests;
{value,{{Node,Dir},Suites0}} ->
- Suites1 = insert_groups1(Suite,
- [{Gr,Cases} || Gr <- Groups],
- Suites0),
+ Suites1 = insert_groups1(Suite,Groups1,Suites0),
insert_in_order({{Node,Dir},Suites1},Tests);
false ->
- Groups1 = [{Gr,Cases} || Gr <- Groups],
insert_in_order({{Node,Dir},[{Suite,Groups1}]},Tests)
end;
insert_groups(Node,Dir,Suite,Groups,Case,Tests, MergeTests)
@@ -1062,13 +1068,13 @@ insert_groups1(Suite,Groups,Suites0) ->
insert_groups2(_Groups,all) ->
all;
-insert_groups2([Group={GrName,Cases}|Groups],GrAndCases) ->
- case lists:keysearch(GrName,1,GrAndCases) of
- {value,{GrName,all}} ->
+insert_groups2([Group={Gr,Cases}|Groups],GrAndCases) ->
+ case lists:keysearch(Gr,1,GrAndCases) of
+ {value,{Gr,all}} ->
GrAndCases;
- {value,{GrName,Cases0}} ->
+ {value,{Gr,Cases0}} ->
Cases1 = insert_in_order(Cases,Cases0),
- insert_groups2(Groups,insert_in_order({GrName,Cases1},GrAndCases));
+ insert_groups2(Groups,insert_in_order({Gr,Cases1},GrAndCases));
false ->
insert_groups2(Groups,insert_in_order(Group,GrAndCases))
end;
@@ -1258,6 +1264,8 @@ valid_terms() ->
{node,3},
{cover,2},
{cover,3},
+ {cover_stop,2},
+ {cover_stop,3},
{config,2},
{config,3},
{config,4},
diff --git a/lib/common_test/src/ct_util.hrl b/lib/common_test/src/ct_util.hrl
index 196b5e46d0..c9c6514fa4 100644
--- a/lib/common_test/src/ct_util.hrl
+++ b/lib/common_test/src/ct_util.hrl
@@ -38,6 +38,7 @@
verbosity=[],
silent_connections=[],
cover=[],
+ cover_stop=[],
config=[],
userconfig=[],
event_handler=[],
diff --git a/lib/common_test/src/cth_log_redirect.erl b/lib/common_test/src/cth_log_redirect.erl
index 77f57c6195..78ae70f37e 100644
--- a/lib/common_test/src/cth_log_redirect.erl
+++ b/lib/common_test/src/cth_log_redirect.erl
@@ -54,7 +54,7 @@ post_init_per_group(_Group, _Config, Result, State) ->
post_end_per_testcase(_TC, _Config, Result, State) ->
%% Make sure that the event queue is flushed
%% before ending this test case.
- gen_event:call(error_logger, ?MODULE, flush),
+ gen_event:call(error_logger, ?MODULE, flush, 300000),
{Result, State}.
pre_end_per_group(Group, Config, {ct_log, Group}) ->
diff --git a/lib/common_test/test/Makefile b/lib/common_test/test/Makefile
index 7691920993..df816f9a61 100644
--- a/lib/common_test/test/Makefile
+++ b/lib/common_test/test/Makefile
@@ -53,7 +53,10 @@ MODULES= \
ct_verbosity_SUITE \
ct_shell_SUITE \
ct_system_error_SUITE \
- ct_snmp_SUITE
+ ct_snmp_SUITE \
+ ct_group_leader_SUITE \
+ ct_cover_SUITE \
+ ct_groups_search_SUITE
ERL_FILES= $(MODULES:%=%.erl)
@@ -107,7 +110,7 @@ release_spec: opt
release_tests_spec:
$(INSTALL_DIR) "$(RELSYSDIR)"
$(INSTALL_DATA) $(ERL_FILES) $(COVERFILE) "$(RELSYSDIR)"
- $(INSTALL_DATA) common_test.spec "$(RELSYSDIR)"
+ $(INSTALL_DATA) common_test.spec common_test.cover "$(RELSYSDIR)"
chmod -R u+w "$(RELSYSDIR)"
@tar cf - *_SUITE_data | (cd "$(RELSYSDIR)"; tar xf -)
diff --git a/lib/common_test/test/common_test.cover b/lib/common_test/test/common_test.cover
new file mode 100644
index 0000000000..66697854ea
--- /dev/null
+++ b/lib/common_test/test/common_test.cover
@@ -0,0 +1,10 @@
+%% -*- erlang -*-
+{incl_app,common_test,details}.
+{cross_apps,common_test,[erl2html2,
+ test_server,
+ test_server_ctrl,
+ test_server_gl,
+ test_server_h,
+ test_server_io,
+ test_server_node,
+ test_server_sup]}.
diff --git a/lib/common_test/test/ct_config_SUITE.erl b/lib/common_test/test/ct_config_SUITE.erl
index 0b1abae757..d92be9ec6e 100644
--- a/lib/common_test/test/ct_config_SUITE.erl
+++ b/lib/common_test/test/ct_config_SUITE.erl
@@ -88,8 +88,8 @@ require(Config) when is_list(Config) ->
DataDir = ?config(data_dir, Config),
run_test(config_static_SUITE,
Config,
- [{config, filename:join(DataDir, "config/shadow.txt")},
- {config, filename:join(DataDir, "config/config.txt")}],
+ [{config, [filename:join(DataDir, "config/shadow.txt"),
+ filename:join(DataDir, "config/config.txt")]}],
["config_static_SUITE"]).
install_config(Config) when is_list(Config) ->
@@ -174,6 +174,7 @@ run_test(Name, Config, CTConfig, SuiteNames)->
Joiner = fun(Suite) -> filename:join(DataDir, "config/test/"++Suite) end,
Suites = lists:map(Joiner, SuiteNames),
{Opts,ERPid} = setup_env({suite,Suites}, Config, CTConfig),
+
ok = ct_test_support:run(Opts, Config),
TestEvents = ct_test_support:get_events(ERPid, Config),
ct_test_support:log_events(Name,
@@ -251,6 +252,7 @@ expected_events(config_static_SUITE)->
?sok(test_alias_tclocal_nested,{14,0,{2,1}}),
?sok(test_alias_tclocal_nested_backward_compat,{15,0,{2,1}}),
?sok(test_alias_tclocal_nested_backward_compat_subvals,{16,0,{2,1}}),
+ ?sok(test_config_same_name_already_in_use,{17,0,{2,1}}),
{?eh,tc_start,{config_static_SUITE,end_per_suite}},
{?eh,tc_done,{config_static_SUITE,end_per_suite,ok}},
{?eh,test_done,{'DEF','STOP_TIME'}},
diff --git a/lib/common_test/test/ct_config_SUITE_data/config/test/config_static_SUITE.erl b/lib/common_test/test/ct_config_SUITE_data/config/test/config_static_SUITE.erl
index 2e1ad651e8..19f1dab4af 100644
--- a/lib/common_test/test/ct_config_SUITE_data/config/test/config_static_SUITE.erl
+++ b/lib/common_test/test/ct_config_SUITE_data/config/test/config_static_SUITE.erl
@@ -64,7 +64,8 @@ all() -> [test_get_config_simple, test_get_config_nested,
test_shadow_all,test_element,test_shadow_all_element,
test_internal_deep, test_alias_tclocal_nested,
test_alias_tclocal_nested_backward_compat,
- test_alias_tclocal_nested_backward_compat_subvals
+ test_alias_tclocal_nested_backward_compat_subvals,
+ test_config_same_name_already_in_use
].
init_per_testcase(_,Config) ->
@@ -124,6 +125,13 @@ test_config_name_already_in_use2(_) ->
ct:fail("Test should've been skipped, you shouldn't see this!"),
ok.
+
+test_config_same_name_already_in_use() ->
+ [].
+test_config_same_name_already_in_use(_) ->
+ ok = ct:require(x2,{gen_cfg,c}),
+ ok = ct:require(x2,{gen_cfg,c}).
+
%% test aliases
test_alias_tclocal() ->
[{require,newalias,gen_cfg}].
diff --git a/lib/common_test/test/ct_config_info_SUITE.erl b/lib/common_test/test/ct_config_info_SUITE.erl
index 40da377ee5..10fe8286dd 100644
--- a/lib/common_test/test/ct_config_info_SUITE.erl
+++ b/lib/common_test/test/ct_config_info_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2009-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2009-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -123,8 +123,7 @@ test_events(config_info) ->
{?eh,tc_done,{config_info_1_SUITE,init_per_suite,ok}},
[{?eh,tc_start,{config_info_1_SUITE,{init_per_group,g1,[]}}},
- {?eh,tc_done,{config_info_1_SUITE,
- {init_per_group,unknown,[]},
+ {?eh,tc_done,{config_info_1_SUITE,{init_per_group,g1,[]},
{failed,{timetrap_timeout,350}}}},
{?eh,tc_auto_skip,{config_info_1_SUITE,t11,
{failed,{config_info_1_SUITE,init_per_group,{timetrap_timeout,350}}}}},
@@ -136,14 +135,12 @@ test_events(config_info) ->
{?eh,tc_done,{config_info_1_SUITE,{init_per_group,g2,[]},ok}},
{?eh,tc_done,{config_info_1_SUITE,t21,ok}},
{?eh,tc_start,{config_info_1_SUITE,{end_per_group,g2,[]}}},
- {?eh,tc_done,{config_info_1_SUITE,
- {end_per_group,unknown,[]},
+ {?eh,tc_done,{config_info_1_SUITE,{end_per_group,g2,[]},
{failed,{timetrap_timeout,450}}}}],
[{?eh,tc_start,{config_info_1_SUITE,{init_per_group,g3,[]}}},
{?eh,tc_done,{config_info_1_SUITE,{init_per_group,g3,[]},ok}},
[{?eh,tc_start,{config_info_1_SUITE,{init_per_group,g4,[]}}},
- {?eh,tc_done,{config_info_1_SUITE,
- {init_per_group,unknown,[]},
+ {?eh,tc_done,{config_info_1_SUITE,{init_per_group,g4,[]},
{failed,{timetrap_timeout,400}}}},
{?eh,tc_auto_skip,{config_info_1_SUITE,t41,
{failed,{config_info_1_SUITE,init_per_group,
@@ -164,8 +161,7 @@ test_events(config_info) ->
{?eh,tc_done,{config_info_1_SUITE,{init_per_group,g5,[]},ok}},
{?eh,tc_done,{config_info_1_SUITE,t51,ok}},
{?eh,tc_start,{config_info_1_SUITE,{end_per_group,g5,[]}}},
- {?eh,tc_done,{config_info_1_SUITE,
- {end_per_group,unknown,[]},
+ {?eh,tc_done,{config_info_1_SUITE,{end_per_group,g5,[]},
{failed,{timetrap_timeout,400}}}}],
{?eh,tc_start,{config_info_1_SUITE,{end_per_group,g3,[]}}},
{?eh,tc_done,{config_info_1_SUITE,{end_per_group,g3,[]},ok}}],
diff --git a/lib/common_test/test/ct_cover_SUITE.erl b/lib/common_test/test/ct_cover_SUITE.erl
new file mode 100644
index 0000000000..bebfce70d0
--- /dev/null
+++ b/lib/common_test/test/ct_cover_SUITE.erl
@@ -0,0 +1,271 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2012. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%%-------------------------------------------------------------------
+%%% File: ct_cover_SUITE
+%%%
+%%% Description:
+%%% Test code cover analysis support
+%%%
+%%%-------------------------------------------------------------------
+-module(ct_cover_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("common_test/include/ct_event.hrl").
+
+-define(eh, ct_test_support_eh).
+-define(suite, cover_SUITE).
+-define(mod, cover_test_mod).
+
+%%--------------------------------------------------------------------
+%% TEST SERVER CALLBACK FUNCTIONS
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% Description: Since Common Test starts another Test Server
+%% instance, the tests need to be performed on a separate node (or
+%% there will be clashes with logging processes etc).
+%%--------------------------------------------------------------------
+init_per_suite(Config) ->
+ case test_server:is_cover() of
+ true ->
+ {skip,"Test server is running cover already - skipping"};
+ false ->
+ ct_test_support:init_per_suite(Config)
+ end.
+
+end_per_suite(Config) ->
+ ct_test_support:end_per_suite(Config).
+
+init_per_testcase(TestCase, Config) ->
+ ct_test_support:init_per_testcase(TestCase, Config).
+
+end_per_testcase(TestCase, Config) ->
+ Node = fullname(existing_node),
+ case lists:member(Node,nodes()) of
+ true -> rpc:call(Node,erlang,halt,[]);
+ false -> ok
+ end,
+ ct_test_support:end_per_testcase(TestCase, Config).
+
+suite() -> [{ct_hooks,[ts_install_cth]}].
+
+all() ->
+ [
+ default,
+ cover_stop_true,
+ cover_stop_false,
+ slave,
+ slave_start_slave,
+ cover_node_option,
+ ct_cover_add_remove_nodes,
+ otp_9956
+ ].
+
+%%--------------------------------------------------------------------
+%% TEST CASES
+%%--------------------------------------------------------------------
+
+%% Check that cover is collected from test node
+%% Also check that cover is by default stopped after test is completed
+default(Config) ->
+ {ok,Events} = run_test(default,Config),
+ false = check_cover(Config),
+ check_calls(Events,1),
+ ok.
+
+%% Check that cover is stopped when cover_stop option is set to true
+cover_stop_true(Config) ->
+ {ok,_Events} = run_test(cover_stop_true,[{cover_stop,true}],Config),
+ false = check_cover(Config).
+
+%% Check that cover is not stopped when cover_stop option is set to false
+cover_stop_false(Config) ->
+ {ok,_Events} = run_test(cover_stop_false,[{cover_stop,false}],Config),
+ {true,[],[?mod]} = check_cover(Config),
+ CTNode = proplists:get_value(ct_node, Config),
+ ok = rpc:call(CTNode,cover,stop,[]),
+ false = check_cover(Config),
+ ok.
+
+%% Let test node start a slave node - check that cover is collected
+%% from both nodes
+slave(Config) ->
+ {ok,Events} = run_test(slave,slave,[],Config),
+ check_calls(Events,2),
+ ok.
+
+%% Let test node start a slave node which in turn starts another slave
+%% node - check that cover is collected from all three nodes
+slave_start_slave(Config) ->
+ {ok,Events} = run_test(slave_start_slave,slave_start_slave,[],Config),
+ check_calls(Events,3),
+ ok.
+
+%% Start a slave node before test starts - the node is listed in cover
+%% spec file.
+%% Check that cover is collected from test node and slave node.
+cover_node_option(Config) ->
+ {ok, HostStr}=inet:gethostname(),
+ Host = list_to_atom(HostStr),
+ DataDir = ?config(data_dir,Config),
+ {ok,Node} = ct_slave:start(Host,existing_node,
+ [{erl_flags,"-pa " ++ DataDir}]),
+ false = check_cover(Node),
+ CoverSpec = default_cover_file_content() ++ [{nodes,[Node]}],
+ CoverFile = create_cover_file(cover_node_option,CoverSpec,Config),
+ {ok,Events} = run_test(cover_node_option,cover_node_option,
+ [{cover,CoverFile}],Config),
+ check_calls(Events,2),
+ {ok,Node} = ct_slave:stop(existing_node),
+ ok.
+
+%% Test ct_cover:add_nodes/1 and ct_cover:remove_nodes/1
+%% Check that cover is collected from added node
+ct_cover_add_remove_nodes(Config) ->
+ {ok, HostStr}=inet:gethostname(),
+ Host = list_to_atom(HostStr),
+ DataDir = ?config(data_dir,Config),
+ {ok,Node} = ct_slave:start(Host,existing_node,
+ [{erl_flags,"-pa " ++ DataDir}]),
+ false = check_cover(Node),
+ {ok,Events} = run_test(ct_cover_add_remove_nodes,ct_cover_add_remove_nodes,
+ [],Config),
+ check_calls(Events,2),
+ {ok,Node} = ct_slave:stop(existing_node),
+ ok.
+
+%% Test that the test suite itself can be cover compiled and that
+%% data_dir is set correctly (OTP-9956)
+otp_9956(Config) ->
+ CoverFile = create_cover_file(otp_9956,[{incl_mods,[?suite]}],Config),
+ {ok,Events} = run_test(otp_9956,otp_9956,[{cover,CoverFile}],Config),
+ check_calls(Events,{?suite,otp_9956,1},1),
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% HELP FUNCTIONS
+%%%-----------------------------------------------------------------
+run_test(Label,Config) ->
+ run_test(Label,[],Config).
+run_test(Label,ExtraOpts,Config) ->
+ run_test(Label,default,ExtraOpts,Config).
+run_test(Label,Testcase,ExtraOpts,Config) ->
+ DataDir = ?config(data_dir, Config),
+ Suite = filename:join(DataDir, ?suite),
+ CoverFile =
+ case proplists:get_value(cover,ExtraOpts) of
+ undefined ->
+ create_default_cover_file(Label,Config);
+ CF ->
+ CF
+ end,
+ RestOpts = lists:keydelete(cover,1,ExtraOpts),
+ {Opts,ERPid} = setup([{suite,Suite},{testcase,Testcase},
+ {cover,CoverFile},{label,Label}] ++ RestOpts, Config),
+ execute(Label, Testcase, Opts, ERPid, Config).
+
+setup(Test, Config) ->
+ Opts0 = ct_test_support:get_opts(Config),
+ Level = ?config(trace_level, Config),
+ EvHArgs = [{cbm,ct_test_support},{trace_level,Level}],
+ Opts = Opts0 ++ [{event_handler,{?eh,EvHArgs}}|Test],
+ ERPid = ct_test_support:start_event_receiver(Config),
+ {Opts,ERPid}.
+
+execute(Name, Testcase, Opts, ERPid, Config) ->
+ ok = ct_test_support:run(Opts, Config),
+ Events = ct_test_support:get_events(ERPid, Config),
+
+ ct_test_support:log_events(Name,
+ reformat(Events, ?eh),
+ ?config(priv_dir, Config),
+ Opts),
+ TestEvents = events_to_check(Testcase),
+ R = ct_test_support:verify_events(TestEvents, Events, Config),
+ {R,Events}.
+
+reformat(Events, EH) ->
+ ct_test_support:reformat(Events, EH).
+
+events_to_check(Testcase) ->
+ OneTest =
+ [{?eh,start_logging,{'DEF','RUNDIR'}}] ++
+ [{?eh,tc_done,{?suite,Testcase,ok}}] ++
+ [{?eh,stop_logging,[]}],
+
+ %% 2 tests (ct:run_test + script_start) is default
+ OneTest ++ OneTest.
+
+check_cover(Config) when is_list(Config) ->
+ CTNode = proplists:get_value(ct_node, Config),
+ check_cover(CTNode);
+check_cover(Node) when is_atom(Node) ->
+ case rpc:call(Node,test_server,is_cover,[]) of
+ true ->
+ {true,
+ rpc:call(Node,cover,which_nodes,[]),
+ rpc:call(Node,cover,modules,[])};
+ false ->
+ false
+ end.
+
+%% Check that each coverlog includes N calls to ?mod:foo/0
+check_calls(Events,N) ->
+ check_calls(Events,{?mod,foo,0},N).
+check_calls(Events,MFA,N) ->
+ CoverLogs =
+ [filename:join(filename:dirname(TCLog),"all.coverdata") ||
+ {ct_test_support_eh,
+ {event,tc_logfile,ct@falco,
+ {{?suite,init_per_suite},TCLog}}} <- Events],
+ do_check_logs(CoverLogs,MFA,N).
+
+do_check_logs([CoverLog|CoverLogs],{Mod,_,_} = MFA,N) ->
+ {ok,_} = cover:start(),
+ ok = cover:import(CoverLog),
+ {ok,Calls} = cover:analyse(Mod,calls,function),
+ ok = cover:stop(),
+ {MFA,N} = lists:keyfind(MFA,1,Calls),
+ do_check_logs(CoverLogs,MFA,N);
+do_check_logs([],_,_) ->
+ ok.
+
+fullname(Name) ->
+ {ok,Host} = inet:gethostname(),
+ list_to_atom(atom_to_list(Name) ++ "@" ++ Host).
+
+default_cover_file_content() ->
+ [{incl_mods,[?mod]}].
+
+create_default_cover_file(Filename,Config) ->
+ create_cover_file(Filename,default_cover_file_content(),Config).
+
+create_cover_file(Filename,Terms,Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ File = filename:join(PrivDir,Filename) ++ ".cover",
+ {ok,Fd} = file:open(File,[write]),
+ lists:foreach(fun(Term) ->
+ file:write(Fd,io_lib:format("~p.~n",[Term]))
+ end,Terms),
+ ok = file:close(Fd),
+ File.
diff --git a/lib/common_test/test/ct_cover_SUITE_data/cover_SUITE.erl b/lib/common_test/test/ct_cover_SUITE_data/cover_SUITE.erl
new file mode 100644
index 0000000000..fdc3323f0a
--- /dev/null
+++ b/lib/common_test/test/ct_cover_SUITE_data/cover_SUITE.erl
@@ -0,0 +1,156 @@
+%%--------------------------------------------------------------------
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2012. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%%----------------------------------------------------------------------
+%% File: cover_SUITE.erl
+%%
+%% Description:
+%% This file contains the test cases for the code coverage support
+%%
+%% @author Support
+%% @doc Test of code coverage support in common_test
+%% @end
+%%----------------------------------------------------------------------
+%%----------------------------------------------------------------------
+-module(cover_SUITE).
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+%% Default timetrap timeout (set in init_per_testcase).
+-define(default_timeout, ?t:minutes(1)).
+
+suite() ->
+ [].
+
+all() ->
+ [].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(Config) ->
+ Config.
+
+init_per_testcase(_Case, Config) ->
+ Dog = test_server:timetrap(?default_timeout),
+ [{watchdog, Dog}|Config].
+
+end_per_testcase(Case, Config) ->
+ %% try apply(?MODULE,Case,[cleanup,Config])
+ %% catch error:undef -> ok
+ %% end,
+
+ kill_slaves(Case,nodes()),
+ Dog=?config(watchdog, Config),
+ test_server:timetrap_cancel(Dog),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Test cases
+break(_Config) ->
+ test_server:break(""),
+ ok.
+
+default(Config) ->
+ cover_compiled = code:which(cover_test_mod),
+ cover_test_mod:foo(),
+ ok.
+
+slave(Config) ->
+ cover_compiled = code:which(cover_test_mod),
+ cover_test_mod:foo(),
+ N1 = nodename(slave,1),
+ {ok,Node} = ct_slave:start(N1),
+ cover_compiled = rpc:call(Node,code,which,[cover_test_mod]),
+ rpc:call(Node,cover_test_mod,foo,[]),
+ {ok,Node} = ct_slave:stop(N1),
+ ok.
+
+slave_start_slave(Config) ->
+ cover_compiled = code:which(cover_test_mod),
+ cover_test_mod:foo(),
+ N1 = nodename(slave_start_slave,1),
+ N2 = nodename(slave_start_slave,2),
+ {ok,Node} = ct_slave:start(N1),
+ cover_compiled = rpc:call(Node,code,which,[cover_test_mod]),
+ rpc:call(Node,cover_test_mod,foo,[]),
+ {ok,Node2} = rpc:call(Node,ct_slave,start,[N2]),
+ rpc:call(Node2,cover_test_mod,foo,[]),
+ {ok,Node2} = rpc:call(Node,ct_slave,stop,[N2]),
+ {ok,Node} = ct_slave:stop(N1),
+ ok.
+
+cover_node_option(Config) ->
+ cover_compiled = code:which(cover_test_mod),
+ cover_test_mod:foo(),
+ Node = fullname(existing_node),
+ cover_compiled = rpc:call(Node,code,which,[cover_test_mod]),
+ rpc:call(Node,cover_test_mod,foo,[]),
+ ok.
+
+ct_cover_add_remove_nodes(Config) ->
+ cover_compiled = code:which(cover_test_mod),
+ cover_test_mod:foo(),
+ Node = fullname(existing_node),
+ Beam = rpc:call(Node,code,which,[cover_test_mod]),
+ false = (Beam == cover_compiled),
+
+ rpc:call(Node,cover_test_mod,foo,[]), % should not be collected
+ {ok,[Node]} = ct_cover:add_nodes([Node]),
+ cover_compiled = rpc:call(Node,code,which,[cover_test_mod]),
+ rpc:call(Node,cover_test_mod,foo,[]), % should be collected
+ ok = ct_cover:remove_nodes([Node]),
+ rpc:call(Node,cover_test_mod,foo,[]), % should not be collected
+
+ Beam = rpc:call(Node,code,which,[cover_test_mod]),
+
+ ok.
+
+otp_9956(Config) ->
+ cover_compiled = code:which(?MODULE),
+ DataDir = ?config(data_dir,Config),
+ absolute = filename:pathtype(DataDir),
+ true = filelib:is_dir(DataDir),
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% Internal
+nodename(Case,N) ->
+ list_to_atom(nodeprefix(Case) ++ integer_to_list(N)).
+
+nodeprefix(Case) ->
+ atom_to_list(?MODULE) ++ "_" ++ atom_to_list(Case) ++ "_node".
+
+
+fullname(Name) ->
+ {ok,Host} = inet:gethostname(),
+ list_to_atom(atom_to_list(Name) ++ "@" ++ Host).
+
+kill_slaves(Case, [Node|Nodes]) ->
+ Prefix = nodeprefix(Case),
+ case lists:prefix(Prefix,atom_to_list(Node)) of
+ true ->
+ rpc:call(Node,erlang,halt,[]);
+ _ ->
+ ok
+ end,
+ kill_slaves(Case,Nodes);
+kill_slaves(_,[]) ->
+ ok.
diff --git a/lib/common_test/test/ct_cover_SUITE_data/cover_SUITE_data/.gitignore b/lib/common_test/test/ct_cover_SUITE_data/cover_SUITE_data/.gitignore
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/common_test/test/ct_cover_SUITE_data/cover_SUITE_data/.gitignore
diff --git a/lib/common_test/test/ct_cover_SUITE_data/cover_test_mod.erl b/lib/common_test/test/ct_cover_SUITE_data/cover_test_mod.erl
new file mode 100644
index 0000000000..d4f69452c3
--- /dev/null
+++ b/lib/common_test/test/ct_cover_SUITE_data/cover_test_mod.erl
@@ -0,0 +1,4 @@
+-module(cover_test_mod).
+-compile(export_all).
+foo() ->
+ ok.
diff --git a/lib/common_test/test/ct_error_SUITE.erl b/lib/common_test/test/ct_error_SUITE.erl
index 338e76264e..6d90b29f41 100644
--- a/lib/common_test/test/ct_error_SUITE.erl
+++ b/lib/common_test/test/ct_error_SUITE.erl
@@ -61,7 +61,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[cfg_error, lib_error, no_compile, timetrap_end_conf,
timetrap_normal, timetrap_extended, timetrap_parallel,
- timetrap_fun, misc_errors].
+ timetrap_fun, timetrap_fun_group, misc_errors].
groups() ->
[].
@@ -251,6 +251,24 @@ timetrap_fun(Config) when is_list(Config) ->
%%%-----------------------------------------------------------------
%%%
+timetrap_fun_group(Config) when is_list(Config) ->
+ DataDir = ?config(data_dir, Config),
+ Join = fun(D, S) -> filename:join(D, "error/test/"++S) end,
+ Suites = [Join(DataDir, "timetrap_8_SUITE")],
+ {Opts,ERPid} = setup([{suite,Suites}], Config),
+ ok = ct_test_support:run(Opts, Config),
+ Events = ct_test_support:get_events(ERPid, Config),
+
+ ct_test_support:log_events(timetrap_fun_group,
+ reformat(Events, ?eh),
+ ?config(priv_dir, Config),
+ Opts),
+
+ TestEvents = events_to_check(timetrap_fun_group),
+ ok = ct_test_support:verify_events(TestEvents, Events, Config).
+
+%%%-----------------------------------------------------------------
+%%%
misc_errors(Config) when is_list(Config) ->
DataDir = ?config(data_dir, Config),
Join = fun(D, S) -> filename:join(D, "error/test/"++S) end,
@@ -429,8 +447,7 @@ test_events(cfg_error) ->
{'EXIT',{init_per_group_fails,g1}}}}}}],
[{?eh,tc_start,{cfg_error_8_SUITE,{init_per_group,g2,[]}}},
- {?eh,tc_done,{cfg_error_8_SUITE,
- {init_per_group,unknown,[]},
+ {?eh,tc_done,{cfg_error_8_SUITE,{init_per_group,g2,[]},
{failed,{timetrap_timeout,2000}}}},
{?eh,tc_auto_skip,{cfg_error_8_SUITE,tc1,
{failed,{cfg_error_8_SUITE,init_per_group,
@@ -500,7 +517,7 @@ test_events(cfg_error) ->
{?eh,tc_done,{cfg_error_8_SUITE,tc1,ok}},
{?eh,test_stats,{9,0,{0,14}}},
{?eh,tc_start,{cfg_error_8_SUITE,{end_per_group,g12,[]}}},
- {?eh,tc_done,{cfg_error_8_SUITE,{end_per_group,unknown,[]},
+ {?eh,tc_done,{cfg_error_8_SUITE,{end_per_group,g12,[]},
{failed,{timetrap_timeout,2000}}}}],
{?eh,tc_start,{cfg_error_8_SUITE,end_per_suite}},
@@ -971,11 +988,423 @@ test_events(timetrap_fun) ->
{?eh,stop_logging,[]}
];
+test_events(timetrap_fun_group) ->
+ [
+ {?eh,start_logging,{'DEF','RUNDIR'}},
+ {?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}},
+ {?eh,start_info,{1,1,58}},
+ {?eh,tc_start,{timetrap_8_SUITE,init_per_suite}},
+ {?eh,tc_done,{timetrap_8_SUITE,init_per_suite,ok}},
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g0,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g0,[]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_timeout,{'$approx',1000}}}}},
+ {?eh,test_stats,{0,1,{0,0}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{0,2,{0,0}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,g0,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,g0,[]},ok}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g1,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g1,[]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_timeout,{'$approx',1000}}}}},
+ {?eh,test_stats,{0,3,{0,0}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{0,4,{0,0}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,g1,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,g1,[]},ok}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g2,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g2,[]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc1}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc1,
+ {failed,{timetrap_timeout,{'$approx',1000}}}}},
+ {?eh,test_stats,{0,5,{0,0}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{0,6,{0,0}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,g2,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,g2,[]},ok}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g3,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g3,[]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc4}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc4,
+ {failed,{timetrap_timeout,{'$approx',2000}}}}},
+ {?eh,test_stats,{0,7,{0,0}}},
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g1,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g1,[]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_timeout,{'$approx',1000}}}}},
+ {?eh,test_stats,{0,8,{0,0}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{0,9,{0,0}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,g1,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,g1,[]},ok}}],
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g2,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g2,[]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc1}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc1,
+ {failed,{timetrap_timeout,{'$approx',1000}}}}},
+ {?eh,test_stats,{0,10,{0,0}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{0,11,{0,0}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,g2,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,g2,[]},ok}}],
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,g3,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,g3,[]},ok}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g4,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g4,[]},
+ {user_timetrap_error,{kaboom,'_'}}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}},
+ {?eh,test_stats,{0,11,{0,1}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}},
+ {?eh,test_stats,{0,11,{0,2}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,end_per_group,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g5,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g5,[]},
+ {user_timetrap_error,{kaboom,'_'}}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}},
+ {?eh,test_stats,{0,11,{0,3}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}},
+ {?eh,test_stats,{0,11,{0,4}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,end_per_group,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g6,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g6,[]},
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {timetrap_timeout,'_'}}}}},
+ {?eh,test_stats,{0,11,{0,5}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {timetrap_timeout,'_'}}}}},
+ {?eh,test_stats,{0,11,{0,6}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,end_per_group,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {timetrap_timeout,'_'}}}}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g7,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g7,[]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc5}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc5,ok}},
+ {?eh,test_stats,{1,11,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,g7,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,g7,[]},
+ {user_timetrap_error,{kaboom,'_'}}}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g8,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g8,[]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc5}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc5,ok}},
+ {?eh,test_stats,{2,11,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,g8,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,g8,[]},
+ {failed,{timetrap_timeout,{'$approx',500}}}}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g9,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g9,[]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc5}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc5,ok}},
+ {?eh,test_stats,{3,11,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {user_timetrap_error,{kaboom,'_'}}}},
+ {?eh,test_stats,{3,12,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,g9,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,g9,[]},ok}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g10,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g10,[]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {user_timetrap_error,{kaboom,'_'}}}},
+ {?eh,test_stats,{3,13,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc5}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc5,ok}},
+ {?eh,test_stats,{4,13,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,g10,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,g10,[]},ok}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g11,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g11,[]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc3}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc3,
+ {failed,{timetrap_timeout,{'$approx',4000}}}}},
+ {?eh,test_stats,{4,14,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{4,15,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,g11,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,g11,[]},ok}}],
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg0,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg0,[parallel]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_timeout,{'$approx',1000}}}}},
+ {?eh,test_stats,{4,16,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{4,17,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,pg0,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,pg0,[parallel]},ok}}]},
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg1,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg1,[parallel]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_timeout,{'$approx',1000}}}}},
+ {?eh,test_stats,{4,18,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{4,19,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,pg1,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,pg1,[parallel]},ok}}]},
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg2,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg2,[parallel]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc1}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc1,
+ {failed,{timetrap_timeout,{'$approx',1000}}}}},
+ {?eh,test_stats,{4,20,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{4,21,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,pg2,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,pg2,[parallel]},ok}}]},
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg3,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg3,[parallel]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc4}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc4,
+ {failed,{timetrap_timeout,{'$approx',2000}}}}},
+ {?eh,test_stats,{4,22,{0,6}}},
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg1,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg1,[parallel]},
+ ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_timeout,{'$approx',1000}}}}},
+ {?eh,test_stats,{4,23,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{4,24,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,pg1,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,pg1,[parallel]},
+ ok}}]},
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg2,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg2,[parallel]},
+ ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc1}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc1,
+ {failed,{timetrap_timeout,{'$approx',1000}}}}},
+ {?eh,test_stats,{4,25,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{4,26,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,pg2,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,pg2,[parallel]},
+ ok}}]},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,pg3,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,pg3,[parallel]},ok}}]},
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg4,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg4,[parallel]},
+ {user_timetrap_error,{kaboom,'_'}}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}},
+ {?eh,test_stats,{4,26,{0,7}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}},
+ {?eh,test_stats,{4,26,{0,8}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,end_per_group,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}}]},
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg5,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg5,[parallel]},
+ {user_timetrap_error,{kaboom,'_'}}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}},
+ {?eh,test_stats,{4,26,{0,9}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}},
+ {?eh,test_stats,{4,26,{0,10}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,end_per_group,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}}]},
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg6,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg6,[parallel]},
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {timetrap_timeout,'_'}}}}},
+ {?eh,test_stats,{4,26,{0,11}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {timetrap_timeout,'_'}}}}},
+ {?eh,test_stats,{4,26,{0,12}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,end_per_group,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {timetrap_timeout,'_'}}}}}]},
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg7,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg7,[parallel]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc5}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc5,ok}},
+ {?eh,test_stats,{5,26,{0,12}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,pg7,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,pg7,[parallel]},
+ {user_timetrap_error,{kaboom,'_'}}}}]},
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg8,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg8,[parallel]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc5}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc5,ok}},
+ {?eh,test_stats,{6,26,{0,12}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,pg8,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,pg8,[parallel]},
+ {failed,{timetrap_timeout,{'$approx',500}}}}}]},
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg9,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg9,[parallel]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc5}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc5,ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {user_timetrap_error,{kaboom,'_'}}}},
+ %% Due to parallelism only checking final test stat in group
+ {?eh,test_stats,{7,27,{0,12}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,pg9,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,pg9,[parallel]},ok}}]},
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg10,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg10,[parallel]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {user_timetrap_error,{kaboom,'_'}}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc5}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc5,ok}},
+ %% Due to parallelism only checking final test stat in group
+ {?eh,test_stats,{8,28,{0,12}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,pg10,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,pg10,[parallel]},ok}}]},
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg11,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg11,[parallel]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc3}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc3,
+ {failed,{timetrap_timeout,{'$approx',4000}}}}},
+ {?eh,test_stats,{8,29,{0,12}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{8,30,{0,12}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,pg11,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,pg11,[parallel]},ok}}]},
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,sg1,[sequence]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,sg1,[sequence]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc5}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc5,ok}},
+ {?eh,test_stats,{9,30,{0,12}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {user_timetrap_error,{kaboom,'_'}}}},
+ {?eh,test_stats,{9,31,{0,12}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc1,
+ {failed,{timetrap_8_SUITE,tc0}}}},
+ {?eh,test_stats,{9,31,{0,13}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_8_SUITE,tc0}}}},
+ {?eh,test_stats,{9,31,{0,14}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,sg1,[sequence]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,sg1,[sequence]},ok}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,sg2,[sequence]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,sg2,[sequence]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc5}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc5,ok}},
+ {?eh,test_stats,{10,31,{0,14}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_timeout,{'$approx',1000}}}}},
+ {?eh,test_stats,{10,32,{0,14}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc1,
+ {failed,{timetrap_8_SUITE,tc0}}}},
+ {?eh,test_stats,{10,32,{0,15}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_8_SUITE,tc0}}}},
+ {?eh,test_stats,{10,32,{0,16}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,sg2,[sequence]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,sg2,[sequence]},ok}}],
+
+ {?eh,tc_start,{timetrap_8_SUITE,end_per_suite}},
+ {?eh,tc_done,{timetrap_8_SUITE,end_per_suite,ok}},
+ {?eh,test_done,{'DEF','STOP_TIME'}},
+ {?eh,stop_logging,[]}
+ ];
+
test_events(misc_errors) ->
[
{?eh,start_logging,{'DEF','RUNDIR'}},
{?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}},
- {?eh,start_info,{1,1,7}},
+ {?eh,start_info,{1,1,9}},
{?eh,tc_start,{misc_error_1_SUITE,ct_fail_1}},
{?eh,tc_done,{misc_error_1_SUITE,ct_fail_1,
{failed,{error,{test_case_failed,{error,this_is_expected}}}}}},
@@ -1002,7 +1431,12 @@ test_events(misc_errors) ->
{?eh,tc_start,{misc_error_1_SUITE,killed_by_signal_2}},
{?eh,tc_done,{misc_error_1_SUITE,killed_by_signal_2,
{failed,testcase_aborted_or_killed}}},
- {?eh,test_stats,{0,7,{0,0}}},
+ {parallel,
+ [{?eh,tc_start,{misc_error_1_SUITE,p1}},
+ {?eh,tc_done,{misc_error_1_SUITE,p1,ok}},
+ {?eh,tc_start,{misc_error_1_SUITE,p2}},
+ {?eh,tc_done,{misc_error_1_SUITE,p2,ok}}]},
+ {?eh,test_stats,{2,7,{0,0}}},
{?eh,test_done,{'DEF','STOP_TIME'}},
{?eh,stop_logging,[]}
].
diff --git a/lib/common_test/test/ct_error_SUITE_data/error/test/misc_error_1_SUITE.erl b/lib/common_test/test/ct_error_SUITE_data/error/test/misc_error_1_SUITE.erl
index 99c3ed05ec..61f3fa7e59 100644
--- a/lib/common_test/test/ct_error_SUITE_data/error/test/misc_error_1_SUITE.erl
+++ b/lib/common_test/test/ct_error_SUITE_data/error/test/misc_error_1_SUITE.erl
@@ -96,7 +96,7 @@ end_per_testcase(_TestCase, _Config) ->
%% N = integer() | forever
%%--------------------------------------------------------------------
groups() ->
- [].
+ [{p,[parallel],[p1,p2]}].
%%--------------------------------------------------------------------
%% Function: all() -> GroupsAndTestCases | {skip,Reason}
@@ -107,7 +107,8 @@ groups() ->
%%--------------------------------------------------------------------
all() ->
[ct_fail_1, ct_fail_2, ct_fail_3, ts_fail_1, ts_fail_2,
- killed_by_signal_1, killed_by_signal_2].
+ killed_by_signal_1, killed_by_signal_2,
+ {group,p}].
ct_fail_1(_) ->
ct:fail({error,this_is_expected}),
@@ -152,3 +153,10 @@ killed_by_signal_2(_) ->
end),
ct:sleep(1000),
exit(this_should_not_be_seen).
+
+p1(_) ->
+ {error,parallel_group} = ct:abort_current_testcase(aborted),
+ ok.
+
+p2(_) ->
+ receive after 1000 -> ok end.
diff --git a/lib/common_test/test/ct_error_SUITE_data/error/test/timetrap_8_SUITE.erl b/lib/common_test/test/ct_error_SUITE_data/error/test/timetrap_8_SUITE.erl
new file mode 100644
index 0000000000..ff138f38b5
--- /dev/null
+++ b/lib/common_test/test/ct_error_SUITE_data/error/test/timetrap_8_SUITE.erl
@@ -0,0 +1,258 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2012. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(timetrap_8_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+
+-define(TO, 4).
+
+%%--------------------------------------------------------------------
+%% Function: suite() -> Info
+%% Info = [tuple()]
+%%--------------------------------------------------------------------
+suite() ->
+ [{timetrap,{timetrap_utils,timetrap_val,[{seconds,?TO}]}}].
+
+%%--------------------------------------------------------------------
+%% Function: init_per_suite(Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%%--------------------------------------------------------------------
+init_per_suite(Config) ->
+ Config.
+
+%%--------------------------------------------------------------------
+%% Function: end_per_suite(Config0) -> void() | {save_config,Config1}
+%% Config0 = Config1 = [tuple()]
+%%--------------------------------------------------------------------
+end_per_suite(_Config) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% Function: init_per_group(GroupName, Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+%% GroupName = atom()
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%%--------------------------------------------------------------------
+init_per_group(G6, Config) when G6==g6; G6==pg6 ->
+ ct:sleep({seconds,1}),
+ Config;
+init_per_group(_GroupName, Config) ->
+ Config.
+
+%%--------------------------------------------------------------------
+%% Function: end_per_group(GroupName, Config0) ->
+%% void() | {save_config,Config1}
+%% GroupName = atom()
+%% Config0 = Config1 = [tuple()]
+%%--------------------------------------------------------------------
+end_per_group(G7or8, _Config) when G7or8==g7; G7or8==pg7; G7or8==g8; G7or8==pg8 ->
+ ct:sleep({seconds,5}),
+ ok;
+end_per_group(_GroupName, _Config) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% Function: init_per_testcase(TestCase, Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+%% TestCase = atom()
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%%--------------------------------------------------------------------
+init_per_testcase(_, Config) ->
+ Config.
+
+%%--------------------------------------------------------------------
+%% Function: end_per_testcase(TestCase, Config0) ->
+%% void() | {save_config,Config1}
+%% TestCase = atom()
+%% Config0 = Config1 = [tuple()]
+%%--------------------------------------------------------------------
+end_per_testcase(_, _Config) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% Function: groups() -> [Group]
+%% Group = {GroupName,Properties,GroupsAndTestCases}
+%% GroupName = atom()
+%% Properties = [parallel | sequence | Shuffle | {RepeatType,N}]
+%% GroupsAndTestCases = [Group | {group,GroupName} | TestCase]
+%% TestCase = atom()
+%% Shuffle = shuffle | {shuffle,{integer(),integer(),integer()}}
+%% RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail |
+%% repeat_until_any_ok | repeat_until_any_fail
+%% N = integer() | forever
+%%--------------------------------------------------------------------
+groups() ->
+ [
+ {g0,[],[tc0,tc2]}, % group override suite and tc overrides group
+ {g1,[],[tc0,tc2]}, % group override suite and tc overrides group
+ {g2,[],[tc1,tc2]}, % tc override group
+ {g3,[],[tc4,{group,g1},{group,g2}]}, % subgroup override group
+ {g4,[],[tc0,tc2]}, % exit during init_per_group
+ {g5,[],[tc0,tc2]}, % exit during init_per_group
+ {g6,[],[tc0,tc2]}, % timeout during init_per_group
+ {g7,[],[tc5]}, % exit during end_per_group
+ {g8,[],[tc5]}, % timeout during end_per_group
+ {g9,[],[tc5,tc0]}, % exit during testcase
+ {g10,[],[tc0,tc5]}, % exit during testcase
+ {g11,[],[tc3,tc2]}, % suite is valid if nothing else is specified
+ {pg0,[parallel],[tc0,tc2]}, % group override suite and tc overrides group
+ {pg1,[parallel],[tc0,tc2]}, % group override suite and tc overrides group
+ {pg2,[parallel],[tc1,tc2]}, % tc override group
+ {pg3,[parallel],[tc4,{group,pg1},{group,pg2}]}, % subgroup override group
+ {pg4,[parallel],[tc0,tc2]}, % exit during init_per_group
+ {pg5,[parallel],[tc0,tc2]}, % exit during init_per_group
+ {pg6,[parallel],[tc0,tc2]}, % timeout during init_per_group
+ {pg7,[parallel],[tc5]}, % exit during end_per_group
+ {pg8,[parallel],[tc5]}, % timeout during end_per_group
+ {pg9,[parallel],[tc5,tc0]}, % exit during testcase
+ {pg10,[parallel],[tc0,tc5]},% exit during testcase
+ {pg11,[parallel],[tc3,tc2]},% suite is valid if nothing else is specified
+ {sg1,[sequence],[tc5,tc0,tc1,tc2]}, % exit during sequencial testcase
+ {sg2,[sequence],[tc5,tc0,tc1,tc2]}].% timeout during sequencial testcase
+
+group(g0) ->
+ [{timetrap,{timetrap_utils,timetrap_val,[{seconds,1}]}}];
+group(g1) ->
+ [{timetrap,fun() -> timetrap_utils:timetrap_val(1000) end}];
+group(g2) ->
+ [{timetrap,fun() -> timetrap_utils:timetrap_val(3000) end}];
+group(g3) ->
+ [{timetrap,fun() -> timetrap_utils:timetrap_val(2000) end}];
+group(g4) ->
+ [{timetrap,{timetrap_utils,timetrap_exit,[kaboom]}}];
+group(g5) ->
+ [{timetrap,fun() -> exit(kaboom) end}];
+group(g6) ->
+ [{timetrap,{timetrap_utils,timetrap_val,[500]}}];
+group(g7) ->
+ [{timetrap,fun() -> ct:sleep(1000),exit(kaboom) end}];
+group(g8) ->
+ [{timetrap,{timetrap_utils,timetrap_val,[500]}}];
+group(g9) ->
+ [{timetrap,fun() -> ct:sleep(1000),exit(kaboom) end}];
+group(g10) ->
+ [{timetrap,fun() -> ct:sleep(1000),exit(kaboom) end}];
+group(g11) ->
+ [];
+group(pg0) ->
+ [{timetrap,{timetrap_utils,timetrap_val,[{seconds,1}]}}];
+group(pg1) ->
+ [{timetrap,fun() -> timetrap_utils:timetrap_val(1000) end}];
+group(pg2) ->
+ [{timetrap,fun() -> timetrap_utils:timetrap_val(3000) end}];
+group(pg3) ->
+ [{timetrap,fun() -> timetrap_utils:timetrap_val(2000) end}];
+group(pg4) ->
+ [{timetrap,{timetrap_utils,timetrap_exit,[kaboom]}}];
+group(pg5) ->
+ [{timetrap,fun() -> exit(kaboom) end}];
+group(pg6) ->
+ [{timetrap,{timetrap_utils,timetrap_val,[500]}}];
+group(pg7) ->
+ [{timetrap,fun() -> ct:sleep(1000),exit(kaboom) end}];
+group(pg8) ->
+ [{timetrap,{timetrap_utils,timetrap_val,[500]}}];
+group(pg9) ->
+ [{timetrap,fun() -> ct:sleep(1000),exit(kaboom) end}];
+group(pg10) ->
+ [{timetrap,fun() -> ct:sleep(1000),exit(kaboom) end}];
+group(pg11) ->
+ [];
+group(sg1) ->
+ [{timetrap,fun() -> ct:sleep(1000),exit(kaboom) end}];
+group(sg2) ->
+ [{timetrap,{timetrap_utils,timetrap_val,[{seconds,1}]}}].
+
+
+%%--------------------------------------------------------------------
+%% Function: all() -> GroupsAndTestCases | {skip,Reason}
+%% GroupsAndTestCases = [{group,GroupName} | TestCase]
+%% GroupName = atom()
+%% TestCase = atom()
+%% Reason = term()
+%%--------------------------------------------------------------------
+all() ->
+ [
+ {group,g0},
+ {group,g1},
+ {group,g2},
+ {group,g3},
+ {group,g4},
+ {group,g5},
+ {group,g6},
+ {group,g7},
+ {group,g8},
+ {group,g9},
+ {group,g10},
+ {group,g11},
+ {group,pg0},
+ {group,pg1},
+ {group,pg2},
+ {group,pg3},
+ {group,pg4},
+ {group,pg5},
+ {group,pg6},
+ {group,pg7},
+ {group,pg8},
+ {group,pg9},
+ {group,pg10},
+ {group,pg11},
+ {group,sg1},
+ {group,sg2}].
+
+
+
+tc0(_) ->
+ ct:comment("TO set by group"),
+ ct:sleep({seconds,5}),
+ ok.
+
+tc1() ->
+ [{timetrap,{timetrap_utils,timetrap_val,[1000]}}].
+tc1(_) ->
+ ct:comment("TO after 1 sec"),
+ ct:sleep({seconds,2}),
+ ok.
+
+tc2() ->
+ [{timetrap,fun() -> timetrap_utils:timetrap_val(500) end}].
+tc2(_) ->
+ ct:comment("TO after 0.5 sec"),
+ ct:sleep({seconds,2}),
+ ok.
+
+tc3(_) ->
+ ct:comment(io_lib:format("TO after ~w sec", [?TO])),
+ ct:sleep({seconds,5}),
+ ok.
+
+tc4(_) ->
+ ct:comment("TO set by group"),
+ ct:sleep({seconds,5}),
+ ok.
+
+tc5(_) ->
+ ct:comment("No TO in this testcase, maybe later"),
+ ok.
diff --git a/lib/common_test/test/ct_group_leader_SUITE.erl b/lib/common_test/test/ct_group_leader_SUITE.erl
new file mode 100644
index 0000000000..cde3061d6a
--- /dev/null
+++ b/lib/common_test/test/ct_group_leader_SUITE.erl
@@ -0,0 +1,181 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2012. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%%-------------------------------------------------------------------
+%%% File: ct_system_error_SUITE
+%%%
+%%% Description:
+%%%
+%%% Test the group leader functionality in the test_server application.
+%%%-------------------------------------------------------------------
+-module(ct_group_leader_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("common_test/include/ct_event.hrl").
+
+-define(eh, ct_test_support_eh).
+
+%%--------------------------------------------------------------------
+%% TEST SERVER CALLBACK FUNCTIONS
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% Description: Since Common Test starts another Test Server
+%% instance, the tests need to be performed on a separate node (or
+%% there will be clashes with logging processes etc).
+%%--------------------------------------------------------------------
+init_per_suite(Config) ->
+ Config1 = ct_test_support:init_per_suite(Config),
+ Config1.
+
+end_per_suite(Config) ->
+ ct_test_support:end_per_suite(Config).
+
+init_per_testcase(TestCase, Config) ->
+ ct_test_support:init_per_testcase(TestCase, Config).
+
+end_per_testcase(TestCase, Config) ->
+ ct_test_support:end_per_testcase(TestCase, Config).
+
+suite() -> [{ct_hooks,[ts_install_cth]}].
+
+all() ->
+ [
+ basic
+ ].
+
+%%--------------------------------------------------------------------
+%% TEST CASES
+%%--------------------------------------------------------------------
+
+%%%-----------------------------------------------------------------
+%%%
+basic(Config) ->
+ TC = basic,
+ DataDir = ?config(data_dir, Config),
+ Suite = filename:join(DataDir, "group_leader_SUITE"),
+ {Opts,ERPid} = setup([{suite,Suite},{label,TC}], Config),
+ SuiteLog = execute(TC, Opts, ERPid, Config),
+ {ok,Data} = file:read_file(SuiteLog),
+ Lines = binary:split(Data, <<"\n">>, [global]),
+ {ok,RE} = re:compile("(\\S+):(\\S+)$"),
+ Cases0 = [begin
+ {match,[M,F]} = re:run(Case, RE, [{capture,all_but_first,list}]),
+ {list_to_atom(M),list_to_atom(F)}
+ end || <<"=case ",Case/binary>> <- Lines],
+ Cases = [MF || {_,F}=MF <- Cases0,
+ F =/= init_per_suite,
+ F =/= end_per_suite,
+ F =/= init_per_group,
+ F =/= end_per_group],
+ io:format("~p\n", [Cases]),
+ [] = verify_cases(events_to_check(TC), Cases, false),
+ ok.
+
+verify_cases([{parallel,P}|Ts], Cases0, Par) ->
+ Cases = verify_cases(P, Cases0, true),
+ verify_cases(Ts, Cases, Par);
+verify_cases([{?eh,tc_done,{M,F,_}}|Ts], Cases0, false) ->
+ [{M,F}|Cases] = Cases0,
+ verify_cases(Ts, Cases, false);
+verify_cases([{?eh,tc_done,{M,F,_}}|Ts], Cases0, true) ->
+ case lists:member({M,F}, Cases0) of
+ true ->
+ Cases = Cases0 -- [{M,F}],
+ verify_cases(Ts, Cases, true);
+ false ->
+ io:format("~p not found\n", [{M,F}]),
+ ?t:fail()
+ end;
+verify_cases([{?eh,_,_}|Ts], Cases, Par) ->
+ verify_cases(Ts, Cases, Par);
+verify_cases([], Cases, _) ->
+ Cases;
+verify_cases([List|Ts], Cases0, Par) when is_list(List) ->
+ Cases = verify_cases(List, Cases0, false),
+ verify_cases(Ts, Cases, Par).
+
+%%%-----------------------------------------------------------------
+%%% HELP FUNCTIONS
+%%%-----------------------------------------------------------------
+
+setup(Test, Config) ->
+ Opts0 = ct_test_support:get_opts(Config),
+ Level = ?config(trace_level, Config),
+ EvHArgs = [{cbm,ct_test_support},{trace_level,Level}],
+ Opts = Opts0 ++ [{event_handler,{?eh,EvHArgs}}|Test],
+ ERPid = ct_test_support:start_event_receiver(Config),
+ {Opts,ERPid}.
+
+execute(Name, Opts, ERPid, Config) ->
+ ok = ct_test_support:run(Opts, Config),
+ Events = ct_test_support:get_events(ERPid, Config),
+
+ ct_test_support:log_events(Name,
+ reformat(Events, ?eh),
+ ?config(priv_dir, Config),
+ Opts),
+
+ TestEvents = events_to_check(Name),
+ ok = ct_test_support:verify_events(TestEvents, Events, Config),
+ {event,tc_logfile,_,{_,File}} =
+ lists:keyfind(tc_logfile, 2, [Ev || {?eh,Ev} <- Events]),
+ LogDir = filename:dirname(File),
+ filename:join(LogDir, "suite.log").
+
+reformat(Events, EH) ->
+ ct_test_support:reformat(Events, EH).
+
+%%%-----------------------------------------------------------------
+%%% TEST EVENTS
+%%%-----------------------------------------------------------------
+
+events_to_check(_Test) ->
+ [{?eh,tc_done,{group_leader_SUITE,tc1,ok}},
+ {parallel,[{?eh,tc_start,{group_leader_SUITE,p1}},
+ {?eh,tc_done,{group_leader_SUITE,p1,ok}},
+ {?eh,tc_start,{group_leader_SUITE,p2}},
+ {?eh,tc_done,{group_leader_SUITE,p2,ok}}]},
+ {?eh,tc_done,{group_leader_SUITE,p_restart_my_io_server,ok}},
+ {?eh,tc_done,{group_leader_SUITE,p3,ok}},
+ {parallel,[
+ {?eh,tc_start,{group_leader_SUITE,p10}},
+ {?eh,tc_start,{group_leader_SUITE,p11}},
+ {?eh,tc_done,{group_leader_SUITE,p10,ok}},
+ {?eh,tc_done,{group_leader_SUITE,p11,ok}},
+ [{?eh,tc_done,{group_leader_SUITE,s1,ok}},
+ {?eh,tc_done,{group_leader_SUITE,s2,ok}},
+ {?eh,tc_done,{group_leader_SUITE,s3,ok}}],
+ {?eh,tc_start,{group_leader_SUITE,p12}},
+ {?eh,tc_done,{group_leader_SUITE,p12,ok}},
+ [{?eh,tc_done,{group_leader_SUITE,s4,ok}},
+ {?eh,tc_done,{group_leader_SUITE,s5,ok}}],
+ {?eh,tc_start,{group_leader_SUITE,p13}},
+ {?eh,tc_done,{group_leader_SUITE,p13,ok}} ]},
+ {?eh,tc_done,{group_leader_SUITE,cap1,ok}},
+ {?eh,tc_done,{group_leader_SUITE,cap2,ok}},
+ {parallel,[{?eh,tc_start,{group_leader_SUITE,cap1}},
+ {?eh,tc_done,{group_leader_SUITE,cap1,ok}},
+ {?eh,tc_start,{group_leader_SUITE,cap2}},
+ {?eh,tc_done,{group_leader_SUITE,cap2,ok}}]},
+ {?eh,test_done,{'DEF','STOP_TIME'}},
+ {?eh,stop_logging,[]}
+ ].
diff --git a/lib/common_test/test/ct_group_leader_SUITE_data/group_leader_SUITE.erl b/lib/common_test/test/ct_group_leader_SUITE_data/group_leader_SUITE.erl
new file mode 100644
index 0000000000..3f1844b4ae
--- /dev/null
+++ b/lib/common_test/test/ct_group_leader_SUITE_data/group_leader_SUITE.erl
@@ -0,0 +1,252 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2012. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(group_leader_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+
+%%--------------------------------------------------------------------
+%% @spec suite() -> Info
+%% Info = [tuple()]
+%% @end
+%%--------------------------------------------------------------------
+suite() ->
+ [{timetrap,{seconds,10}}].
+
+%%--------------------------------------------------------------------
+%% @spec init_per_suite(Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%% @end
+%%--------------------------------------------------------------------
+init_per_suite(Config) ->
+ start_my_io_server(),
+ Config.
+
+%%--------------------------------------------------------------------
+%% @spec end_per_suite(Config0) -> void() | {save_config,Config1}
+%% Config0 = Config1 = [tuple()]
+%% @end
+%%--------------------------------------------------------------------
+end_per_suite(_Config) ->
+ my_io_server ! die,
+ ok.
+
+%%--------------------------------------------------------------------
+%% @spec init_per_group(GroupName, Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+%% GroupName = atom()
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%% @end
+%%--------------------------------------------------------------------
+init_per_group(_GroupName, Config) ->
+ Config.
+
+%%--------------------------------------------------------------------
+%% @spec end_per_group(GroupName, Config0) ->
+%% void() | {save_config,Config1}
+%% GroupName = atom()
+%% Config0 = Config1 = [tuple()]
+%% @end
+%%--------------------------------------------------------------------
+end_per_group(_GroupName, _Config) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% @spec init_per_testcase(TestCase, Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+%% TestCase = atom()
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%% @end
+%%--------------------------------------------------------------------
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+%%--------------------------------------------------------------------
+%% @spec end_per_testcase(TestCase, Config0) ->
+%% void() | {save_config,Config1} | {fail,Reason}
+%% TestCase = atom()
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%% @end
+%%--------------------------------------------------------------------
+end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% @spec groups() -> [Group]
+%% Group = {GroupName,Properties,GroupsAndTestCases}
+%% GroupName = atom()
+%% Properties = [parallel | sequence | Shuffle | {RepeatType,N}]
+%% GroupsAndTestCases = [Group | {group,GroupName} | TestCase]
+%% TestCase = atom()
+%% Shuffle = shuffle | {shuffle,{integer(),integer(),integer()}}
+%% RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail |
+%% repeat_until_any_ok | repeat_until_any_fail
+%% N = integer() | forever
+%% @end
+%%--------------------------------------------------------------------
+groups() ->
+ [{p,[parallel],[p1,p2]},
+ {p_restart,[parallel],[p_restart_my_io_server]},
+ {seq,[],[s1,s2,s3]},
+ {seq2,[],[s4,s5]},
+ {seq_in_par,[parallel],[p10,p11,{group,seq},p12,{group,seq2},p13]},
+ {capture_io,[parallel],[cap1,cap2]}].
+
+%%--------------------------------------------------------------------
+%% @spec all() -> GroupsAndTestCases | {skip,Reason}
+%% GroupsAndTestCases = [{group,GroupName} | TestCase]
+%% GroupName = atom()
+%% TestCase = atom()
+%% Reason = term()
+%% @end
+%%--------------------------------------------------------------------
+all() ->
+ [tc1,{group,p},{group,p_restart},p3,
+ {group,seq_in_par},
+ cap1,cap2,
+ {group,capture_io}].
+
+tc1(_C) ->
+ ok.
+
+p1(_) ->
+ %% OTP-10101:
+ %%
+ %% External apps/processes started by init_per_suite (common operation),
+ %% will inherit the group leader of the init_per_suite process, i.e. the
+ %% test_server test case control process (executing run_test_case_msgloop/7).
+ %% If, later, a parallel test case triggers the external app to print with
+ %% e.g. io:format() (also common operation), the calling process will hang!
+ %% The reason for this is that a parallel test case has a dedicated IO
+ %% server process, other than the central test case control process. The
+ %% latter process is not executing run_test_case_msgloop/7 and will not
+ %% respond to IO messages. The process is still group leader for the
+ %% external app, however, which is wrong. It's the IO process for the
+ %% parallel test case that should be group leader - but only for the
+ %% particular invokation, since other parallel test cases could be
+ %% invoking the external app too.
+ print("hej\n").
+
+p2(_) ->
+ print("hopp\n").
+
+p_restart_my_io_server(_) ->
+ %% Restart the IO server and change its group leader. This used
+ %% to set to the group leader to a process that would soon die.
+ Ref = erlang:monitor(process, my_io_server),
+ my_io_server ! die,
+ receive
+ {'DOWN',Ref,_,_,_} ->
+ start_my_io_server()
+ end.
+
+p3(_) ->
+ %% OTP-10125. This would crash since the group leader process
+ %% for the my_io_server had died.
+ print("hoppsan\n").
+
+print(String) ->
+ my_io_server ! {print,self(),String},
+ receive
+ {printed,String} ->
+ ok
+ end.
+
+start_my_io_server() ->
+ Parent = self(),
+ Pid = spawn(fun() -> my_io_server(Parent) end),
+ receive
+ {Pid,started} ->
+ io:format("~p\n", [process_info(Pid)]),
+ ok
+ end.
+
+my_io_server(Parent) ->
+ register(my_io_server, self()),
+ Parent ! {self(),started},
+ my_io_server_loop().
+
+my_io_server_loop() ->
+ receive
+ {print,From,String} ->
+ io:put_chars(String),
+ From ! {printed,String},
+ my_io_server_loop();
+ die ->
+ ok
+ end.
+
+p10(_) ->
+ receive after 1 -> ok end.
+
+p11(_) ->
+ ok.
+
+p12(_) ->
+ ok.
+
+p13(_) ->
+ ok.
+
+s1(_) ->
+ ok.
+
+s2(_) ->
+ ok.
+
+s3(_) ->
+ ok.
+
+s4(_) ->
+ ok.
+
+s5(_) ->
+ ok.
+
+cap1(_) ->
+ ct:capture_start(),
+ IO = gen_io(cap1, 10, []),
+ ct:capture_stop(),
+ IO = ct:capture_get(),
+ ok.
+
+cap2(_) ->
+ ct:capture_start(),
+ {Pid,Ref} = spawn_monitor(fun() ->
+ exit(gen_io(cap2, 42, []))
+ end),
+ receive
+ {'DOWN',Ref,process,Pid,IO} ->
+ ct:capture_stop(),
+ IO = ct:capture_get(),
+ ok
+ end.
+
+gen_io(_, 0, Acc) ->
+ lists:reverse(Acc);
+gen_io(Label, N, Acc) ->
+ S = lists:flatten(io_lib:format("~s: ~p\n", [Label,N])),
+ io:put_chars(S),
+ gen_io(Label, N-1, [S|Acc]).
diff --git a/lib/common_test/test/ct_groups_search_SUITE.erl b/lib/common_test/test/ct_groups_search_SUITE.erl
new file mode 100644
index 0000000000..6b1c1f4634
--- /dev/null
+++ b/lib/common_test/test/ct_groups_search_SUITE.erl
@@ -0,0 +1,1245 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2009-2012. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%%-------------------------------------------------------------------
+%%% File:
+%%%
+%%% Description:
+%%%
+%%%
+%%% The suites used for the test are located in the data directory.
+%%%
+%%% The group(s) and case(s) are specified according to this:
+%%%
+%%% Tests = ct_groups:find_groups(Mod, GroupPaths, TestCases, GroupDef)
+%%%
+%%% GroupPaths = GroupPath | [GroupPath]
+%%% GroupPath = atom() | [atom()]
+%%%
+%%% CT will find all paths that include GroupPath. GroupPath can be a
+%%% single group, or a list of groups along the path to TestCases.
+%%% If GroupPath is the latter, the last group in the list must be
+%%% the "terminating" group in the path, or it will be impossible to
+%%% execute test cases in higher level groups *only*, as in this case:
+%%% groups() -> [{g1,[],[tc1,{g2,[],[tc2]}]}].
+%%% Compare: find_groups(x, g1, all, groups()), and
+%%% find_groups(x, [[g1]], all, groups())
+%%%
+%%% Some examples:
+%%%
+%%% GroupPaths = g1, means find all paths with g1 included
+%%% GroupPaths = [g1], -''-
+%%% GroupPaths = [g1,g2], search twice - once for g1 and once for g2
+%%% GroupPaths = [[g1,g2]], find cases under group g1 and sub group g2
+%%% GroupPaths = [[g1,g2],[g1,g3]], find cases for g1-g2 AND g1-g3
+%%%
+%%% TestCases = all | atom() | [atom()]
+%%%
+%%%-------------------------------------------------------------------
+
+-module(ct_groups_search_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("common_test/src/ct_util.hrl").
+
+
+-define(eh, ct_test_support_eh).
+
+-define(M1, groups_search_dummy_1_SUITE).
+-define(M2, groups_search_dummy_2_SUITE).
+
+%%--------------------------------------------------------------------
+%% TEST SERVER CALLBACK FUNCTIONS
+%%--------------------------------------------------------------------
+
+init_per_suite(Config) ->
+ DataDir = proplists:get_value(data_dir, Config),
+ code:add_patha(DataDir),
+ M1Erl = filename:join(DataDir, atom_to_list(?M1)++".erl"),
+ M2Erl = filename:join(DataDir, atom_to_list(?M2)++".erl"),
+ {ok,?M1} = compile:file(M1Erl, [{outdir,DataDir}]),
+ {ok,?M2} = compile:file(M2Erl, [{outdir,DataDir}]),
+ {module,?M1} = code:load_file(?M1),
+ {module,?M2} = code:load_file(?M2),
+
+ Config1 = ct_test_support:init_per_suite(Config),
+ Config1.
+
+end_per_suite(Config) ->
+ ct_test_support:end_per_suite(Config).
+
+init_per_testcase(TestCase, Config) ->
+ ct_test_support:init_per_testcase(TestCase, Config).
+
+end_per_testcase(TestCase, Config) ->
+ ct_test_support:end_per_testcase(TestCase, Config).
+
+suite() -> [{ct_hooks,[ts_install_cth]}].
+
+groups() ->
+ [
+ {find_groups,[],[all_groups,
+ testcases_in_all_groups,
+ all_in_top_group1,
+ all_in_top_group2,
+ all_in_sub_group1,
+ all_in_sub_group2,
+ testcase_in_top_group1,
+ testcase_in_top_group2,
+ testcase_in_sub_group1,
+ testcase_in_sub_group2,
+ testcase_in_top_groups1,
+ testcase_in_top_groups2,
+ testcase_in_top_groups3,
+ testcase_in_top_groups4,
+ testcase_in_top_groups5,
+ testcase_in_top_groups6,
+ testcase_in_top_groups7,
+ testcase_in_sub_groups1,
+ testcase_in_sub_groups2,
+ testcase_in_sub_groups3,
+ testcase_in_sub_groups4,
+ testcase_in_sub_groups5,
+ testcase_in_sub_groups6,
+ testcase_in_sub_groups7,
+ testcase_in_sub_groups8,
+ testcase_in_sub_groups9,
+ testcase_in_sub_groups10,
+ testcase_in_sub_groups11,
+ testcase_in_sub_groups12,
+ testcase_in_sub_groups13,
+ bad_testcase_in_sub_groups1]},
+
+ {run_groups,[sequence],[run_groups_with_options,
+ run_groups_with_testspec]}
+ ].
+
+all() ->
+ [{group,find_groups,[parallel]},
+ {group,run_groups}].
+
+
+
+%%--------------------------------------------------------------------
+%% TEST CASES CHECKING RETURN VALUE ONLY
+%%--------------------------------------------------------------------
+
+all_groups(_) ->
+ GPath = all, TCs = all,
+
+ Found = ct_groups:find_groups(?M1, GPath, TCs, groups1()),
+
+ Top1 = ct_groups:find_groups(?M1, top1, TCs, groups1()),
+ Top2 = ct_groups:find_groups(?M1, top2, TCs, groups1()),
+
+ All = Top1 ++ Top2 ++ [{conf,[{name,sub2}],
+ {?M1,init_per_group},
+ [{?M1,sub2_tc1},{?M1,sub2_tc2}],
+ {?M1,end_per_group}}],
+
+ All = Found,
+
+ {?M1,GPath,TCs,Top1++Top2}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcases_in_all_groups(_) ->
+ GPath = all, TCs = [tc3,sub_tc2],
+
+ Found = ct_groups:find_groups(?M2, GPath, TCs, groups2()),
+
+ [Top1 =
+ {conf,[{name,top1}],{?M2,init_per_group},
+ [{?M2,tc3},
+ {conf,[{name,sub11}],
+ {?M2,init_per_group},[{?M2,tc3},{?M2,sub_tc2}],
+ {?M2,end_per_group}},
+ {conf,[{name,sub12}],
+ {?M2,init_per_group},
+ [{?M2,tc3},{?M2,sub_tc2},
+ {conf,[{name,sub121}],
+ {?M2,init_per_group},[{?M2,tc3},{?M2,sub_tc2}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}},
+
+ Top2 =
+ {conf,[{name,top2}],{?M2,init_per_group},
+ [{?M2,tc3},
+ {conf,[{name,sub21}],
+ {?M2,init_per_group},
+ [{?M2,tc3},{?M2,sub_tc2},
+ {conf,[{name,sub2xx}],
+ {?M2,init_per_group},[{?M2,tc3},{?M2,sub_tc2}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}},
+
+ {conf,[{name,sub22}],
+ {?M2,init_per_group},
+ [{?M2,tc3},{?M2,sub_tc2},
+ {conf,[{name,sub221}],
+ {?M2,init_per_group},[{?M2,tc3},{?M2,sub_tc2}],
+ {?M2,end_per_group}},
+ {conf,[{name,sub2xx}],
+ {?M2,init_per_group},[{?M2,tc3},{?M2,sub_tc2}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}},
+
+ {conf,[{name,sub21}],
+ {?M2,init_per_group},
+ [{?M2,tc3},{?M2,sub_tc2},
+ {conf,[{name,sub2xx}],
+ {?M2,init_per_group},[{?M2,tc3},{?M2,sub_tc2}],{?M2,end_per_group}}],
+ {?M2,end_per_group}},
+
+ {conf,[{name,sub22}],
+ {?M2,init_per_group},
+ [{?M2,tc3},{?M2,sub_tc2},
+ {conf,[{name,sub221}],
+ {?M2,init_per_group},[{?M2,tc3},{?M2,sub_tc2}],{?M2,end_per_group}},
+ {conf,[{name,sub2xx}],
+ {?M2,init_per_group},[{?M2,tc3},{?M2,sub_tc2}],{?M2,end_per_group}}],
+ {?M2,end_per_group}},
+
+ {conf,[{name,sub221}],
+ {?M2,init_per_group},[{?M2,tc3},{?M2,sub_tc2}],{?M2,end_per_group}},
+
+ {conf,[{name,sub2xx}],
+ {?M2,init_per_group},[{?M2,tc3},{?M2,sub_tc2}],{?M2,end_per_group}}]
+
+ = Found,
+
+ {?M2,GPath,TCs,[Top1,Top2]}.
+
+%%%-----------------------------------------------------------------
+%%%
+all_in_top_group1(_) ->
+ GPath= top1, TCs = all,
+
+ Found = ct_groups:find_groups(?M1, GPath, TCs, groups1()),
+
+ [{conf,[{name,top1}],
+ {?M1,init_per_group},
+ [{?M1,top1_tc1},{?M1,top1_tc2},
+ {conf,[{name,sub1}],
+ {?M1,init_per_group},
+ [{?M1,sub1_tc1},{?M1,sub1_tc2}],
+ {?M1,end_per_group}}],
+ {?M1,end_per_group}}] = Found,
+
+ {?M1,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%%
+all_in_top_group2(_) ->
+ GPath= top2, TCs = all,
+
+ Found = ct_groups:find_groups(?M1, GPath, TCs, groups1()),
+
+ [{conf,[{name,top2}],
+ {?M1,init_per_group},
+ [{conf,[{name,sub2}],
+ {?M1,init_per_group},
+ [{?M1,sub2_tc1},{?M1,sub2_tc2}],
+ {?M1,end_per_group}},
+ {?M1,top2_tc1},{?M1,top2_tc2}],
+ {?M1,end_per_group}}] = Found,
+
+ {?M1,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%%
+all_in_sub_group1(_) ->
+ GPath = sub1, TCs = all,
+
+ Found = ct_groups:find_groups(?M1, GPath, TCs, groups1()),
+
+ [{conf,[{name,top1}],
+ {?M1,init_per_group},
+ [{conf,[{name,sub1}],
+ {?M1,init_per_group},
+ [{?M1,sub1_tc1},{?M1,sub1_tc2}],
+ {?M1,end_per_group}}],
+ {?M1,end_per_group}}] = Found,
+
+ {?M1,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%%
+all_in_sub_group2(_) ->
+ GPath = sub2, TCs = all,
+
+ Found = ct_groups:find_groups(?M1, GPath, TCs, groups1()),
+
+ [Top2 =
+ {conf,[{name,top2}],
+ {?M1,init_per_group},
+ [{conf,[{name,sub2}],
+ {?M1,init_per_group},
+ [{?M1,sub2_tc1},{?M1,sub2_tc2}],
+ {?M1,end_per_group}}],
+ {?M1,end_per_group}},
+
+ {conf,[{name,sub2}],
+ {?M1,init_per_group},
+ [{?M1,sub2_tc1},{?M1,sub2_tc2}],
+ {?M1,end_per_group}}] = Found,
+
+ {?M1,GPath,TCs,Top2}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_top_group1(_) ->
+ GPath = top1, TCs = [top1_tc2],
+
+ Found = ct_groups:find_groups(?M1, GPath, TCs, groups1()),
+
+ [{conf,[{name,top1}],
+ {?M1,init_per_group},
+ [{?M1,top1_tc2}],
+ {?M1,end_per_group}}] = Found,
+
+ {?M1,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_top_group2(_) ->
+ GPath = top2, TCs = [top2_tc2],
+
+ Found = ct_groups:find_groups(?M1, GPath, TCs, groups1()),
+
+ [{conf,[{name,top2}],
+ {?M1,init_per_group},
+ [{?M1,top2_tc2}],
+ {?M1,end_per_group}}] = Found,
+
+ {?M1,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_sub_group1(_) ->
+ GPath = sub1, TCs = [sub1_tc2],
+
+ Found = ct_groups:find_groups(?M1, GPath, TCs, groups1()),
+
+ [{conf,[{name,top1}],
+ {?M1,init_per_group},
+ [{conf,[{name,sub1}],
+ {?M1,init_per_group},
+ [{?M1,sub1_tc2}],
+ {?M1,end_per_group}}],
+ {?M1,end_per_group}}] = Found,
+
+ {?M1,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_sub_group2(_) ->
+ GPath = sub2, TCs = [sub2_tc2],
+
+ Found = ct_groups:find_groups(?M1, GPath, TCs, groups1()),
+
+ [Top2 =
+ {conf,[{name,top2}],
+ {?M1,init_per_group},
+ [{conf,[{name,sub2}],
+ {?M1,init_per_group},
+ [{?M1,sub2_tc2}],
+ {?M1,end_per_group}}],
+ {?M1,end_per_group}},
+
+ {conf,[{name,sub2}],
+ {?M1,init_per_group},
+ [{?M1,sub2_tc2}],
+ {?M1,end_per_group}}] = Found,
+
+ {?M1,GPath,TCs,Top2}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_top_groups1(_) ->
+ GPath = [top1,top2], TCs = all,
+
+ Found = ct_groups:find_groups(?M2, GPath, TCs, groups2()),
+
+ [{conf,[{name,top1}],
+ {?M2,init_per_group},
+ [{?M2,top1_tc1},{?M2,top_tc2},{?M2,tc3},
+ {conf,[{name,sub11}],
+ {?M2,init_per_group},
+ [{?M2,sub11_tc1},{?M2,sub_tc2},{?M2,tc3}],
+ {?M2,end_per_group}},
+ {conf,[{name,sub12}],
+ {?M2,init_per_group},
+ [{?M2,sub12_tc1},{?M2,sub_tc2},{?M2,tc3},
+ {conf,[{name,sub121}],
+ {?M2,init_per_group},
+ [{?M2,sub121_tc1},{?M2,sub_tc2},{?M2,tc3}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}},
+
+ {conf,[{name,top2}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub21}],
+ {?M2,init_per_group},
+ [{?M2,sub21_tc1},{?M2,sub_tc2},{?M2,tc3},
+ {conf,[{name,sub2xx}],
+ {?M2,init_per_group},
+ [{?M2,sub2xx_tc1},{?M2,sub_tc2},{?M2,tc3}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}},
+ {?M2,top2_tc1},{?M2,top_tc2},{?M2,tc3},
+ {conf,[{name,sub22}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub221}],
+ {?M2,init_per_group},
+ [{?M2,sub221_tc1},{?M2,sub_tc2},{?M2,tc3}],
+ {?M2,end_per_group}},
+ {?M2,sub22_tc1},{?M2,sub_tc2},{?M2,tc3},
+ {conf,[{name,sub2xx}],
+ {?M2,init_per_group},
+ [{?M2,sub2xx_tc1},{?M2,sub_tc2},{?M2,tc3}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}] = Found,
+
+ {?M2,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_top_groups2(_) ->
+ GPath = [top1,top2], TCs = tc3,
+
+ Found = ct_groups:find_groups(?M2, GPath, TCs, groups2()),
+
+ [{conf,[{name,top1}],
+ {?M2,init_per_group},
+ [{?M2,tc3},
+ {conf,[{name,sub11}],
+ {?M2,init_per_group},
+ [{?M2,tc3}],
+ {?M2,end_per_group}},
+ {conf,[{name,sub12}],
+ {?M2,init_per_group},
+ [{?M2,tc3},
+ {conf,[{name,sub121}],
+ {?M2,init_per_group},
+ [{?M2,tc3}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}},
+
+ {conf,[{name,top2}],
+ {?M2,init_per_group},
+ [{?M2,tc3},
+ {conf,[{name,sub21}],
+ {?M2,init_per_group},
+ [{?M2,tc3},
+ {conf,[{name,sub2xx}],
+ {?M2,init_per_group},
+ [{?M2,tc3}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}},
+
+ {conf,[{name,sub22}],
+ {?M2,init_per_group},
+ [{?M2,tc3},
+ {conf,[{name,sub221}],
+ {?M2,init_per_group},
+ [{?M2,tc3}],
+ {?M2,end_per_group}},
+ {conf,[{name,sub2xx}],
+ {?M2,init_per_group},
+ [{?M2,tc3}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}] = Found,
+
+ {?M2,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_top_groups3(_) ->
+ GPath = [top1,top2], TCs = top1_tc1,
+
+ Found = ct_groups:find_groups(?M2, GPath, TCs, groups2()),
+
+ [{conf,[{name,top1}],
+ {?M2,init_per_group},
+ [{?M2,top1_tc1}],
+ {?M2,end_per_group}}] = Found,
+
+ {?M2,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_top_groups4(_) ->
+ GPath = [top1,top2], TCs = sub2xx_tc1,
+
+ Found = ct_groups:find_groups(?M2, GPath, TCs, groups2()),
+
+ [{conf,[{name,top2}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub21}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub2xx}],
+ {?M2,init_per_group},
+ [{?M2,sub2xx_tc1}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}},
+ {conf,[{name,sub22}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub2xx}],
+ {?M2,init_per_group},
+ [{?M2,sub2xx_tc1}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}] = Found,
+
+ {?M2,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_top_groups5(_) ->
+ GPath = [top1,top2], TCs = [sub21_tc1,sub22_tc1],
+
+ Found = ct_groups:find_groups(?M2, [top1,top2], [sub21_tc1,sub22_tc1],
+ groups2()),
+
+ [{conf,[{name,top2}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub21}],
+ {?M2,init_per_group},
+ [{?M2,sub21_tc1}],
+ {?M2,end_per_group}},
+ {conf,[{name,sub22}],
+ {?M2,init_per_group},
+ [{?M2,sub22_tc1}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}] = Found,
+
+ {?M2,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_top_groups6(_) ->
+ GPath = [[top1],[top2]], TCs = tc3,
+
+ Found = ct_groups:find_groups(?M2, GPath, TCs, groups2()),
+
+ [{conf,[{name,top1}],
+ {?M2,init_per_group},
+ [{?M2,tc3}],
+ {?M2,end_per_group}},
+ {conf,[{name,top2}],
+ {?M2,init_per_group},
+ [{?M2,tc3}],
+ {?M2,end_per_group}}] = Found,
+
+ {?M2,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_top_groups7(_) ->
+ GPath = [[top1],[top2]], TCs = all,
+
+ Found = ct_groups:find_groups(?M2, GPath, TCs, groups2()),
+
+ [{conf,[{name,top1}],
+ {?M2,init_per_group},
+ [{?M2,top1_tc1},
+ {?M2,top_tc2},
+ {?M2,tc3}],
+ {?M2,end_per_group}},
+ {conf,[{name,top2}],
+ {?M2,init_per_group},
+ [{?M2,top2_tc1},
+ {?M2,top_tc2},
+ {?M2,tc3}],
+ {?M2,end_per_group}}] = Found,
+
+ {?M2,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_sub_groups1(_) ->
+ GPath = [sub121], TCs = tc3,
+
+ Found = ct_groups:find_groups(?M2, sub121, tc3, groups2()),
+ Found = ct_groups:find_groups(?M2, [sub121], tc3, groups2()),
+
+ [{conf,[{name,top1}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub12}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub121}],
+ {?M2,init_per_group},
+ [{?M2,tc3}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}] = Found,
+
+ {?M2,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_sub_groups2(_) ->
+ GPath = sub12, TCs = tc3,
+
+ Found = ct_groups:find_groups(?M2, sub12, tc3, groups2()),
+ Found = ct_groups:find_groups(?M2, [sub12], tc3, groups2()),
+
+ [{conf,[{name,top1}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub12}],
+ {?M2,init_per_group},
+ [{?M2,tc3},
+ {conf,[{name,sub121}],
+ {?M2,init_per_group},
+ [{?M2,tc3}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}] = Found,
+
+ FoundX = ct_groups:find_groups(?M2, [[sub12]], tc3, groups2()),
+
+ [{conf,[{name,top1}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub12}],
+ {?M2,init_per_group},
+ [{?M2,tc3}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}] = FoundX,
+
+ {?M2,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_sub_groups3(_) ->
+ GPath = [sub121,sub221], TCs = all,
+
+ Found = ct_groups:find_groups(?M2, GPath, TCs, groups2()),
+
+ [Top1 =
+ {conf,[{name,top1}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub12}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub121}],
+ {?M2,init_per_group},
+ [{?M2,sub121_tc1},
+ {?M2,sub_tc2},
+ {?M2,tc3}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}},
+
+ Top2 =
+ {conf,[{name,top2}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub22}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub221}],
+ {?M2,init_per_group},
+ [{?M2,sub221_tc1},
+ {?M2,sub_tc2},
+ {?M2,tc3}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}},
+
+ {conf,[{name,sub22}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub221}],
+ {?M2,init_per_group},
+ [{?M2,sub221_tc1},
+ {?M2,sub_tc2},
+ {?M2,tc3}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}},
+
+ {conf,[{name,sub221}],
+ {?M2,init_per_group},
+ [{?M2,sub221_tc1},
+ {?M2,sub_tc2},
+ {?M2,tc3}],
+ {?M2,end_per_group}}] = Found,
+
+ {?M2,GPath,TCs,[Top1,Top2]}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_sub_groups4(_) ->
+ GPath = [top1,sub21], TCs = sub_tc2,
+
+ Found = ct_groups:find_groups(?M2, GPath, TCs, groups2()),
+
+ [Top1 =
+ {conf,[{name,top1}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub11}],
+ {?M2,init_per_group},
+ [{?M2,sub_tc2}],
+ {?M2,end_per_group}},
+ {conf,[{name,sub12}],
+ {?M2,init_per_group},
+ [{?M2,sub_tc2},
+ {conf,[{name,sub121}],
+ {?M2,init_per_group},
+ [{?M2,sub_tc2}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}},
+
+ Top2 =
+ {conf,[{name,top2}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub21}],
+ {?M2,init_per_group},
+ [{?M2,sub_tc2},
+ {conf,[{name,sub2xx}],
+ {?M2,init_per_group},
+ [{?M2,sub_tc2}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}},
+
+ {conf,[{name,sub21}],
+ {?M2,init_per_group},
+ [{?M2,sub_tc2},
+ {conf,[{name,sub2xx}],
+ {?M2,init_per_group},
+ [{?M2,sub_tc2}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}] = Found,
+
+ {?M2,GPath,TCs,[Top1,Top2]}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_sub_groups5(_) ->
+ GPath = [[top1,sub12]], TCs = sub12_tc1,
+
+ Found = ct_groups:find_groups(?M2, GPath, TCs, groups2()),
+
+ [{conf,[{name,top1}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub12}],
+ {?M2,init_per_group},
+ [{?M2,sub12_tc1}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}] = Found,
+
+ {?M2,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_sub_groups6(_) ->
+ GPath = [[top1,sub12]], TCs = [sub_tc2],
+
+ Found = ct_groups:find_groups(?M2, GPath, TCs, groups2()),
+
+ [{conf,[{name,top1}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub12}],
+ {?M2,init_per_group},
+ [{?M2,sub_tc2}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}] = Found,
+
+ {?M2,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_sub_groups7(_) ->
+ GPath = [[top1,sub12]], TCs = [sub12_tc1,sub_tc2],
+
+ Found = ct_groups:find_groups(?M2, GPath, TCs, groups2()),
+
+ [{conf,[{name,top1}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub12}],
+ {?M2,init_per_group},
+ [{?M2,sub12_tc1},
+ {?M2,sub_tc2}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}] = Found,
+
+ {?M2,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_sub_groups8(_) ->
+ GPath = [[top2,sub22]], TCs = [sub22_tc1,sub_tc2],
+
+ Found = ct_groups:find_groups(?M2, GPath, TCs, groups2()),
+
+ [{conf,[{name,top2}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub22}],
+ {?M2,init_per_group},
+ [{?M2,sub22_tc1},
+ {?M2,sub_tc2}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}] = Found,
+
+ {?M2,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_sub_groups9(_) ->
+ GPath = [[sub2xx]], TCs = tc3,
+
+ Found = ct_groups:find_groups(?M2, sub2xx, tc3, groups2()),
+ Found = ct_groups:find_groups(?M2, [[sub2xx]], tc3, groups2()),
+
+ [Top2 =
+ {conf,[{name,top2}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub21}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub2xx}],
+ {?M2,init_per_group},
+ [{?M2,tc3}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}},
+ {conf,[{name,sub22}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub2xx}],
+ {?M2,init_per_group},
+ [{?M2,tc3}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}},
+
+ {conf,[{name,sub21}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub2xx}],
+ {?M2,init_per_group},
+ [{?M2,tc3}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}},
+
+ {conf,[{name,sub22}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub2xx}],
+ {?M2,init_per_group},
+ [{?M2,tc3}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}},
+
+ {conf,[{name,sub2xx}],
+ {?M2,init_per_group},
+ [{?M2,tc3}],
+ {?M2,end_per_group}}] = Found,
+
+ {?M2,GPath,TCs,Top2}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_sub_groups10(_) ->
+ GPath = [[sub22,sub2xx]], TCs = tc3,
+
+ Found = ct_groups:find_groups(?M2, GPath, TCs, groups2()),
+
+ [Top2 =
+ {conf,[{name,top2}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub22}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub2xx}],
+ {?M2,init_per_group},
+ [{?M2,tc3}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}},
+
+ {conf,[{name,sub22}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub2xx}],
+ {?M2,init_per_group},
+ [{?M2,tc3}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}] = Found,
+
+ {?M2,GPath,TCs,Top2}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_sub_groups11(_) ->
+ GPath = [[top1,sub12,sub121]], TCs = all,
+
+ Found = ct_groups:find_groups(?M2, GPath, TCs, groups2()),
+
+ [{conf,[{name,top1}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub12}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub121}],
+ {?M2,init_per_group},
+ [{?M2,sub121_tc1},
+ {?M2,sub_tc2},
+ {?M2,tc3}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}] = Found,
+
+ {?M2,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_sub_groups12(_) ->
+ GPath = [[top2,sub2xx]], TCs = [sub2xx_tc1,tc3],
+
+ Found = ct_groups:find_groups(?M2, GPath, TCs, groups2()),
+
+ [{conf,[{name,top2}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub21}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub2xx}],
+ {?M2,init_per_group},
+ [{?M2,sub2xx_tc1},
+ {?M2,tc3}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}},
+ {conf,[{name,sub22}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub2xx}],
+ {?M2,init_per_group},
+ [{?M2,sub2xx_tc1},
+ {?M2,tc3}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}] = Found,
+
+ {?M2,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%%
+testcase_in_sub_groups13(_) ->
+ GPath = [[top2,sub22,sub2xx]], TCs = [top2_tc1,sub2xx_tc1,tc3],
+
+ Found = ct_groups:find_groups(?M2, GPath, TCs, groups2()),
+
+ [{conf,[{name,top2}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub22}],
+ {?M2,init_per_group},
+ [{conf,[{name,sub2xx}],
+ {?M2,init_per_group},
+ [{?M2,sub2xx_tc1},
+ {?M2,tc3}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}],
+ {?M2,end_per_group}}] = Found,
+
+ {?M2,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%%
+bad_testcase_in_sub_groups1(_) ->
+ GPath = [sub2xx], TCs = [top2_tc1],
+
+ Found = ct_groups:find_groups(?M2, GPath, TCs, groups2()),
+
+ [] = Found,
+
+ {?M2,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%%
+bad_testcase_in_sub_groups2(_) ->
+ GPath = [sub12,sub2xx], TCs = [top1_tc1,top2_tc1],
+
+ Found = ct_groups:find_groups(?M2, GPath, TCs, groups2()),
+
+ [] = Found,
+
+ {?M2,GPath,TCs,Found}.
+
+%%%-----------------------------------------------------------------
+%%% CASES EXECUTING THE TESTS
+%%%-----------------------------------------------------------------
+
+run_groups_with_options(Config) ->
+ DataDir = ?config(data_dir, Config),
+
+ {M1All,M1Rest,M2All,M2Rest} = get_all_groups_and_cases(Config),
+
+ M1AllGrs = lists:flatmap(fun({Path,_,_}) when is_atom(hd(Path)) -> Path;
+ ({Path,_,_}) when is_list(hd(Path)) -> Path;
+ ({Path,_,_}) -> [Path]
+ end, M1All),
+
+ %% ct:pal("NOW RUNNING M1 TEST: ~p", [M1All]),
+
+ {OptsM11,ERPidM11} = setup([{dir,DataDir},{suite,?M1},
+ {group,M1AllGrs},{label,m1_all_cases}], Config),
+ M1AllGrInfo = {M1AllGrs,lists:flatten([Found || {_,_,Found} <- M1All])},
+ ok = execute(m1_all_cases, M1AllGrInfo, OptsM11, ERPidM11, Config),
+
+ lists:foldl(
+ fun({GrPath,TCs,Found}, N) ->
+ TestName = list_to_atom("m1_spec_cases_" ++ integer_to_list(N)),
+ %% ct:pal("NOW RUNNING M1 TEST ~p: ~p + ~p",
+ %% [TestName,GrPath,TCs]),
+ {OptsM12,ERPidM12} = setup([{dir,DataDir},{suite,?M1},
+ {group,GrPath},{testcase,TCs},
+ {label,TestName}], Config),
+ ok = execute(TestName, {GrPath,TCs,Found},
+ OptsM12, ERPidM12, Config),
+ N+1
+ end, 1, M1Rest),
+
+ %% ct:pal("NOW RUNNING M2 TEST: ~p", [M2All]),
+
+ M2AllGrs = lists:flatmap(fun({Path,_,_}) when is_atom(hd(Path)) -> Path;
+ ({Path,_,_}) when is_list(hd(Path)) -> Path;
+ ({Path,_,_}) -> [Path]
+ end, M2All),
+
+
+ {OptsM21,ERPidM21} = setup([{dir,DataDir},{suite,?M2},
+ {group,M2AllGrs},{testcase,all},
+ {label,m2_all_cases}], Config),
+ M2AllGrInfo = {M2AllGrs,lists:flatten([Found || {_,_,Found} <- M2All])},
+ ok = execute(m2_all_cases, M2AllGrInfo, OptsM21, ERPidM21, Config),
+
+ lists:foldl(
+ fun({GrPath,TCs,Found}, N) ->
+ TestName = list_to_atom("m2_spec_cases_" ++ integer_to_list(N)),
+ %% ct:pal("NOW RUNNING M2 TEST ~p: ~p + ~p", [TestName,GrPath,TCs]),
+ {OptsM22,ERPidM22} = setup([{dir,DataDir},{suite,?M2},
+ {group,GrPath},{testcase,TCs},
+ {label,TestName}], Config),
+ ok = execute(TestName, {GrPath,TCs,Found},
+ OptsM22, ERPidM22, Config),
+ N+1
+ end, 1, M2Rest),
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%%
+run_groups_with_testspec(Config) ->
+ Name = run_groups_with_testspec,
+ DataDir = ?config(data_dir, Config),
+ PrivDir = ?config(priv_dir, Config),
+
+ {M1All,M1Rest,M2All,M2Rest} = get_all_groups_and_cases(Config),
+
+ M1AllGrs = lists:flatmap(fun({Path,_,_}) when is_atom(hd(Path)) -> Path;
+ ({Path,_,_}) when is_list(hd(Path)) -> Path;
+ ({Path,_,_}) -> [Path]
+ end, M1All),
+ M1AllTerm = {groups,DataDir,?M1,M1AllGrs},
+
+ M1RestTerms = lists:map(
+ fun({GrPath,TCs,_}) ->
+ {groups,DataDir,?M1,GrPath,{cases,TCs}}
+ end, M1Rest),
+
+ M2AllGrs = lists:flatmap(fun({Path,_,_}) when is_atom(hd(Path)) -> Path;
+ ({Path,_,_}) when is_list(hd(Path)) -> Path;
+ ({Path,_,_}) -> [Path]
+ end, M2All),
+ M2AllTerm = {groups,DataDir,?M2,M2AllGrs,{cases,all}},
+
+ M2RestTerms = lists:map(
+ fun({GrPath,TCs,_}) ->
+ {groups,DataDir,?M2,GrPath,{cases,TCs}}
+ end, M2Rest),
+
+ GroupTerms = lists:flatten([M1AllTerm,
+ M1RestTerms,
+ M2AllTerm,
+ M2RestTerms]),
+
+ TestSpec = [{merge_tests,false},
+ {label,Name}] ++ GroupTerms,
+
+ ct:pal("Here's the test spec:~n~p", [TestSpec]),
+
+ TestSpecName = ct_test_support:write_testspec(TestSpec, PrivDir,
+ "groups_search_spec"),
+
+ {Opts,ERPid} = setup([{spec,TestSpecName}], Config),
+ GroupInfo =
+ [{M1AllTerm,lists:flatten([Found || {_,_,Found} <- M1All])} |
+ M1Rest] ++
+ [{M2AllTerm,lists:flatten([Found || {_,_,Found} <- M2All])} |
+ M2Rest],
+ ok = execute(Name, GroupInfo, Opts, ERPid, Config).
+
+%%%-----------------------------------------------------------------
+%%% HELP FUNCTIONS
+%%%-----------------------------------------------------------------
+
+groups1() ->
+ [{top1,[],[top1_tc1,top1_tc2,{sub1,[],[sub1_tc1,sub1_tc2]}]},
+ {top2,[],[{group,sub2},top2_tc1,top2_tc2]},
+ {sub2,[],[sub2_tc1,sub2_tc2]}].
+
+groups2() ->
+ [{top1,[],[top1_tc1,top_tc2,tc3,
+ {sub11,[],[sub11_tc1,sub_tc2,tc3]},
+ {sub12,[],[sub12_tc1,sub_tc2,tc3,
+ {sub121,[],[sub121_tc1,sub_tc2,tc3]}]}]},
+ {top2,[],[{group,sub21},top2_tc1,top_tc2,tc3,{group,sub22}]},
+ {sub21,[],[sub21_tc1,sub_tc2,tc3,{group,sub2xx}]},
+ {sub22,[],[{group,sub221},sub22_tc1,sub_tc2,tc3,{group,sub2xx}]},
+ {sub221,[],[sub221_tc1,sub_tc2,tc3]},
+ {sub2xx,[],[sub2xx_tc1,sub_tc2,tc3]}].
+
+get_all_groups_and_cases(Config) ->
+ {value,{_,_,FindGrTCs}} = lists:keysearch(find_groups, 1, groups()),
+
+ MGTFs = [apply(?MODULE, TC, [Config]) || TC <- FindGrTCs],
+
+ ct:pal("Extracted data from ~p test cases", [length(MGTFs)]),
+
+ lists:foldr(fun({M,Gs,TCs,F},
+ {M11,M12,M21,M22}) ->
+ case {M,Gs,TCs} of
+ {?M1,all,_} -> {M11,[{Gs,TCs,F}|M12],M21,M22};
+ {?M1,_,all} -> {[{Gs,all,F}|M11],M12,M21,M22};
+ {?M1,_,_} -> {M11,[{Gs,TCs,F}|M12],M21,M22};
+ {?M2,all,_} -> {M11,M12,M21,[{Gs,TCs,F}|M22]};
+ {?M2,_,all} -> {M11,M12,[{Gs,all,F}|M21],M22};
+ {?M2,_,_} -> {M11,M12,M21,[{Gs,TCs,F}|M22]}
+ end
+ end, {[],[],[],[]}, MGTFs).
+
+%%%-----------------------------------------------------------------
+
+setup(Test, Config) ->
+ Opts0 = ct_test_support:get_opts(Config),
+ Level = ?config(trace_level, Config),
+ EvHArgs = [{cbm,ct_test_support},{trace_level,Level}],
+ Opts = Opts0 ++ [{event_handler,{?eh,EvHArgs}}|Test],
+ ERPid = ct_test_support:start_event_receiver(Config),
+ {Opts,ERPid}.
+
+execute(Name, TestParams, Opts, ERPid, Config) ->
+ ok = ct_test_support:run(Opts, Config),
+ Events = ct_test_support:get_events(ERPid, Config),
+ Events1 = reformat(Events, ?eh),
+ ct_test_support:log_events(Name,
+ Events1,
+ ?config(priv_dir, Config),
+ Opts),
+ verify_events(Name, TestParams, Events1).
+
+reformat(Events, EH) ->
+ ct_test_support:reformat(Events, EH).
+
+%%%-----------------------------------------------------------------
+%%% TEST EVENTS
+verify_events(Name, Params, Events) ->
+ %% 2 tests (ct:run_test + script_start) is default
+ verify_events(Name, Params, Events, 2).
+
+verify_events(_, _, _, 0) ->
+ ok;
+verify_events(Name, Params, Events, N) ->
+ test_events(Name, Params, Events),
+ verify_events(Name, Params, Events, N-1).
+
+%%%-----------------------------------------------------------------
+%%% check run_groups_with_options
+
+test_events(TestName, {GrPath,Found}, Events) ->
+ test_events(TestName, {GrPath,all,Found}, Events);
+
+test_events(TestName, {GrPath,TCs,Found}, Events)
+ when TestName /= run_groups_with_testspec ->
+ try check_events(Events, flatten_tests(Found)) of
+ ok -> ok
+ catch
+ throw:Reason ->
+ ct:pal("Test failed for ~p with group path ~p and cases ~p"
+ "~nReason: ~p", [TestName,GrPath,TCs,Reason]),
+ throw(failed)
+ end;
+
+%%%-----------------------------------------------------------------
+%%% check run_groups_with_testspec
+
+test_events(run_groups_with_testspec, Params, Events) ->
+ AllFound = lists:flatmap(fun({_All,Found}) when is_tuple(Found) ->
+ [Found];
+ ({_All,Found}) ->
+ Found;
+ ({_Gr,_TCs,Found}) when is_tuple(Found) ->
+ [Found];
+ ({_Gr,_TCs,Found}) ->
+ Found
+ end, Params),
+ try check_events(Events, flatten_tests(AllFound)) of
+ ok -> ok
+ catch
+ throw:Reason ->
+ ct:pal("Test failed for run_groups_with_testspec."
+ "~nReason: ~p", [Reason]),
+ throw(failed)
+ end.
+
+flatten_tests({conf,[{name,G}|_],{Mod,_I},Tests,_E}) ->
+ lists:flatten([{group,Mod,G} | flatten_tests(Tests)]);
+flatten_tests([{conf,[{name,G}|_],{Mod,_I},Tests,_E} | Confs]) ->
+ lists:flatten([{group,Mod,G} | flatten_tests(Tests)]) ++
+ lists:flatten(flatten_tests(Confs));
+flatten_tests([{_Mod,_TC} = Case | Tests]) ->
+ lists:flatten([Case | flatten_tests(Tests)]);
+flatten_tests([]) ->
+ [].
+
+check_events([{_,tc_start,{Mod,{init_per_group,G,_}}} | Evs],
+ [{group,Mod,G} | Check]) ->
+ check_events(Evs, Check);
+check_events([{_,tc_start,{Mod,TC}} | Evs],
+ [{Mod,TC} | Check]) when is_atom(TC) ->
+ check_events(Evs, Check);
+check_events([{_,tc_start,{Mod,{init_per_group,G,_}}} | _Evs], Check) ->
+ ct:pal("CHECK FAILED!~nGroup ~p in ~p not found in ~p.",
+ [G,Mod,Check]),
+ throw({test_not_found,{Mod,G}});
+check_events([{_,tc_start,{Mod,TC}} | _Evs], Check)
+ when is_atom(TC), TC /= init_per_suite, TC /= end_per_suite ->
+ ct:pal("CHECK FAILED!~nCase ~p in ~p not found in ~p.",
+ [TC,Mod,Check]),
+ throw({test_not_found,{Mod,TC}});
+check_events([Group | Evs], Check) when is_list(Group) ->
+ Check1 = check_events(Group, Check),
+ check_events(Evs, Check1);
+check_events(_, []) ->
+ ok;
+check_events([Elem | Evs], Check) when is_tuple(Elem) ->
+ check_events(Evs, Check);
+check_events([], Check = [_|_]) ->
+ ct:pal("CHECK FAILED!~nTests remain: ~p", [Check]),
+ throw({tests_remain,Check});
+check_events([Wut | _],_) ->
+ throw({unexpected,Wut}).
+
diff --git a/lib/common_test/test/ct_groups_search_SUITE_data/groups_search_dummy_1_SUITE.erl b/lib/common_test/test/ct_groups_search_SUITE_data/groups_search_dummy_1_SUITE.erl
new file mode 100644
index 0000000000..6f6922e686
--- /dev/null
+++ b/lib/common_test/test/ct_groups_search_SUITE_data/groups_search_dummy_1_SUITE.erl
@@ -0,0 +1,83 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2009-2011. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(groups_search_dummy_1_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+
+
+all() ->
+ [{group,top1},
+ {group,top2}].
+
+groups() ->
+ [{top1,[],[top1_tc1,top1_tc2,{sub1,[],[sub1_tc1,sub1_tc2]}]},
+ {top2,[],[{group,sub2},top2_tc1,top2_tc2]},
+ {sub2,[],[sub2_tc1,sub2_tc2]}].
+
+%%%-----------------------------------------------------------------
+%%% CONFIG FUNCS
+%%%-----------------------------------------------------------------
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, _Config) ->
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% TEST CASES
+%%%-----------------------------------------------------------------
+
+top1_tc1(_) ->
+ ok.
+
+top1_tc2(_) ->
+ ok.
+
+sub1_tc1(_) ->
+ ok.
+
+sub1_tc2(_) ->
+ ok.
+
+top2_tc1(_) ->
+ ok.
+
+top2_tc2(_) ->
+ ok.
+
+sub2_tc1(_) ->
+ ok.
+
+sub2_tc2(_) ->
+ ok.
diff --git a/lib/common_test/test/ct_groups_search_SUITE_data/groups_search_dummy_2_SUITE.erl b/lib/common_test/test/ct_groups_search_SUITE_data/groups_search_dummy_2_SUITE.erl
new file mode 100644
index 0000000000..ac3c000079
--- /dev/null
+++ b/lib/common_test/test/ct_groups_search_SUITE_data/groups_search_dummy_2_SUITE.erl
@@ -0,0 +1,102 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2009-2011. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(groups_search_dummy_2_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+
+
+all() ->
+ [{group,top1},
+ {group,top2}].
+
+groups() ->
+ [{top1,[],[top1_tc1,top_tc2,tc3,
+ {sub11,[],[sub11_tc1,sub_tc2,tc3]},
+ {sub12,[],[sub12_tc1,sub_tc2,tc3,
+ {sub121,[],[sub121_tc1,sub_tc2,tc3]}]}]},
+
+ {top2,[],[{group,sub21},top2_tc1,top_tc2,tc3,{group,sub22}]},
+ {sub21,[],[sub21_tc1,sub_tc2,tc3,{group,sub2xx}]},
+ {sub22,[],[{group,sub221},sub22_tc1,sub_tc2,tc3,{group,sub2xx}]},
+ {sub221,[],[sub221_tc1,sub_tc2,tc3]},
+ {sub2xx,[],[sub2xx_tc1,sub_tc2,tc3]}].
+
+%%%-----------------------------------------------------------------
+%%% CONFIG FUNCS
+%%%-----------------------------------------------------------------
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+init_per_group(_, Config) ->
+ Config.
+
+end_per_group(_, _Config) ->
+ ok.
+
+%%%------------------------------------------------------------------
+%%% TEST CASES
+%%%------------------------------------------------------------------
+
+top1_tc1(_) ->
+ ok.
+
+top_tc2(_) ->
+ ok.
+
+tc3(_) ->
+ ok.
+
+sub_tc2(_) ->
+ ok.
+
+sub11_tc1(_) ->
+ ok.
+
+sub12_tc1(_) ->
+ ok.
+
+sub121_tc1(_) ->
+ ok.
+
+top2_tc1(_) ->
+ ok.
+
+sub21_tc1(_) ->
+ ok.
+
+sub22_tc1(_) ->
+ ok.
+
+sub221_tc1(_) ->
+ ok.
+
+sub2xx_tc1(_) ->
+ ok.
diff --git a/lib/common_test/test/ct_netconfc_SUITE.erl b/lib/common_test/test/ct_netconfc_SUITE.erl
index e6e8d5b09c..3042a924fe 100644
--- a/lib/common_test/test/ct_netconfc_SUITE.erl
+++ b/lib/common_test/test/ct_netconfc_SUITE.erl
@@ -44,7 +44,12 @@
%%--------------------------------------------------------------------
init_per_suite(Config) ->
Config1 = ct_test_support:init_per_suite(Config),
- Config1.
+ case application:load(crypto) of
+ {error,Reason} ->
+ {skip, Reason};
+ _ ->
+ Config1
+ end.
end_per_suite(Config) ->
ct_test_support:end_per_suite(Config).
@@ -108,7 +113,7 @@ reformat(Events, EH) ->
%%%-----------------------------------------------------------------
%%% TEST EVENTS
%%%-----------------------------------------------------------------
-events_to_check(Test,Config) ->
+events_to_check(default,Config) ->
{module,_} = code:load_abs(filename:join(?config(data_dir,Config),
netconfc1_SUITE)),
TCs = netconfc1_SUITE:all(),
diff --git a/lib/common_test/test/ct_test_support.erl b/lib/common_test/test/ct_test_support.erl
index 80cca4a1cc..901e53c71d 100644
--- a/lib/common_test/test/ct_test_support.erl
+++ b/lib/common_test/test/ct_test_support.erl
@@ -117,7 +117,10 @@ end_per_suite(Config) ->
CTNode = proplists:get_value(ct_node, Config),
PrivDir = proplists:get_value(priv_dir, Config),
true = rpc:call(CTNode, code, del_path, [filename:join(PrivDir,"")]),
- cover:stop(CTNode),
+ case test_server:is_cover() of
+ true -> cover:flush(CTNode);
+ false -> ok
+ end,
slave:stop(CTNode),
ok.
@@ -149,7 +152,10 @@ end_per_testcase(_TestCase, Config) ->
case wait_for_ct_stop(CTNode) of
%% Common test was not stopped to we restart node.
false ->
- cover:stop(CTNode),
+ case test_server:is_cover() of
+ true -> cover:flush(CTNode);
+ false -> ok
+ end,
slave:stop(CTNode),
start_slave(Config,proplists:get_value(trace_level,Config)),
{fail, "Could not stop common_test"};
@@ -612,8 +618,11 @@ locate({parallel,TEvs}, Node, Evs, Config) ->
fun({EH,#event{name=tc_auto_skip,
node=EvNode,
data={Mod,end_per_group,Reason}}}) when
- EH == TEH, EvNode == Node, Mod == M, Reason == R ->
- false;
+ EH == TEH, EvNode == Node, Mod == M ->
+ case match_data(R, Reason) of
+ match -> false;
+ _ -> true
+ end;
({EH,#event{name=stop_logging,
node=EvNode,data=_}}) when
EH == TEH, EvNode == Node ->
@@ -627,23 +636,12 @@ locate({parallel,TEvs}, Node, Evs, Config) ->
[_AutoSkip | RemEvs2] ->
{Done,RemEvs2,length(RemEvs2)}
end;
- %% match other event than test case
- (TEv={TEH,N,D}, Acc) when D == '_' ->
- case [E || E={EH,#event{name=Name,
- node=EvNode,
- data=_}} <- Evs1,
- EH == TEH, EvNode == Node, Name == N] of
- [] ->
- exit({unmatched,TEv});
- _ ->
- test_server:format("Found ~p!", [TEv]),
- Acc
- end;
(TEv={TEH,N,D}, Acc) ->
case [E || E={EH,#event{name=Name,
node=EvNode,
data=Data}} <- Evs1,
- EH == TEH, EvNode == Node, Name == N, Data == D] of
+ EH == TEH, EvNode == Node, Name == N,
+ match == match_data(D,Data)] of
[] ->
exit({unmatched,TEv});
_ ->
@@ -1002,33 +1000,39 @@ locate({TEH,Name,Data}, Node, [{TEH,#event{name=Name,
data = EvData,
node = Node}}|Evs],
Config) ->
- try match_data(Data, EvData) of
+ case match_data(Data, EvData) of
match ->
- {Config,Evs}
- catch _:_ ->
+ {Config,Evs};
+ _ ->
nomatch
end;
locate({_TEH,_Name,_Data}, _Node, [_|_Evs], _Config) ->
nomatch.
-match_data(D,D) ->
+match_data(Data, EvData) ->
+ try do_match_data(Data, EvData)
+ catch _:_ ->
+ nomatch
+ end.
+
+do_match_data(D,D) ->
match;
-match_data('_',_) ->
+do_match_data('_',_) ->
match;
-match_data(Fun,Data) when is_function(Fun) ->
+do_match_data(Fun,Data) when is_function(Fun) ->
Fun(Data);
-match_data('$proplist',Proplist) ->
- match_data(
+do_match_data('$proplist',Proplist) ->
+ do_match_data(
fun(List) ->
lists:foreach(fun({_,_}) -> ok end,List)
end,Proplist);
-match_data([H1|MatchT],[H2|ValT]) ->
- match_data(H1,H2),
- match_data(MatchT,ValT);
-match_data(Tuple1,Tuple2) when is_tuple(Tuple1),is_tuple(Tuple2) ->
- match_data(tuple_to_list(Tuple1),tuple_to_list(Tuple2));
-match_data([],[]) ->
+do_match_data([H1|MatchT],[H2|ValT]) ->
+ do_match_data(H1,H2),
+ do_match_data(MatchT,ValT);
+do_match_data(Tuple1,Tuple2) when is_tuple(Tuple1),is_tuple(Tuple2) ->
+ do_match_data(tuple_to_list(Tuple1),tuple_to_list(Tuple2));
+do_match_data([],[]) ->
match.
result_match({SkipOrFail,{ErrorInd,{Why,'_'}}},
@@ -1043,6 +1047,9 @@ result_match({failed,{timetrap_timeout,{'$approx',Num}}},
Value =< trunc(Num+0.02*Num) -> true;
true -> false
end;
+result_match({user_timetrap_error,{Why,'_'}},
+ {user_timetrap_error,{Why,_Stack}}) ->
+ true;
result_match(Result, Result) ->
true;
result_match(_, _) ->
diff --git a/lib/compiler/src/v3_codegen.erl b/lib/compiler/src/v3_codegen.erl
index 3b73269545..6a13495523 100644
--- a/lib/compiler/src/v3_codegen.erl
+++ b/lib/compiler/src/v3_codegen.erl
@@ -379,6 +379,7 @@ bsm_rename_ctx(#l{ke={test,_,_}}=L, _, _, _) -> L;
bsm_rename_ctx(#l{ke={bif,_,_,_}}=L, _, _, _) -> L;
bsm_rename_ctx(#l{ke={gc_bif,_,_,_}}=L, _, _, _) -> L;
bsm_rename_ctx(#l{ke={set,_,_}}=L, _, _, _) -> L;
+bsm_rename_ctx(#l{ke={call,_,_,_}}=L, _, _, _) -> L;
bsm_rename_ctx(#l{ke={block,_}}=L, Old, _, false) ->
%% This block is not inside a protected. The match context variable cannot
%% possibly be live inside the block.
diff --git a/lib/compiler/test/Makefile b/lib/compiler/test/Makefile
index e047166ade..3b065ec3b9 100644
--- a/lib/compiler/test/Makefile
+++ b/lib/compiler/test/Makefile
@@ -10,7 +10,7 @@ MODULES= \
apply_SUITE \
beam_validator_SUITE \
beam_disasm_SUITE \
- beam_expect_SUITE \
+ beam_except_SUITE \
bs_bincomp_SUITE \
bs_bit_binaries_SUITE \
bs_construct_SUITE \
@@ -39,7 +39,7 @@ MODULES= \
NO_OPT= \
andor \
apply \
- beam_expect \
+ beam_except \
bs_construct \
bs_match \
bs_utf \
diff --git a/lib/compiler/test/andor_SUITE.erl b/lib/compiler/test/andor_SUITE.erl
index f7388f1614..fe69aeeb43 100644
--- a/lib/compiler/test/andor_SUITE.erl
+++ b/lib/compiler/test/andor_SUITE.erl
@@ -29,11 +29,12 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
test_lib:recompile(?MODULE),
- [t_case, t_and_or, t_andalso, t_orelse, inside, overlap,
- combined, in_case, before_and_inside_if].
+ [{group,p}].
groups() ->
- [].
+ [{p,test_lib:parallel(),
+ [t_case,t_and_or,t_andalso,t_orelse,inside,overlap,
+ combined,in_case,before_and_inside_if]}].
init_per_suite(Config) ->
Config.
diff --git a/lib/compiler/test/beam_expect_SUITE.erl b/lib/compiler/test/beam_except_SUITE.erl
index 6f216eac4f..6b55224a42 100644
--- a/lib/compiler/test/beam_expect_SUITE.erl
+++ b/lib/compiler/test/beam_except_SUITE.erl
@@ -16,7 +16,7 @@
%%
%% %CopyrightEnd%
%%
--module(beam_expect_SUITE).
+-module(beam_except_SUITE).
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
diff --git a/lib/compiler/test/beam_validator_SUITE.erl b/lib/compiler/test/beam_validator_SUITE.erl
index 902867bc19..c84c83795a 100644
--- a/lib/compiler/test/beam_validator_SUITE.erl
+++ b/lib/compiler/test/beam_validator_SUITE.erl
@@ -47,17 +47,18 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
test_lib:recompile(?MODULE),
- [beam_files, compiler_bug, stupid_but_valid, xrange,
- yrange, stack, call_last, merge_undefined, uninit,
- unsafe_catch, dead_code, mult_labels,
- overwrite_catchtag, overwrite_trytag, accessing_tags,
- bad_catch_try, cons_guard, freg_range, freg_uninit,
- freg_state, bin_match, bin_aligned, bad_dsetel,
- state_after_fault_in_catch, no_exception_in_catch,
- undef_label, illegal_instruction, failing_gc_guard_bif].
+ [beam_files,{group,p}].
groups() ->
- [].
+ [{p,test_lib:parallel(),
+ [compiler_bug,stupid_but_valid,xrange,
+ yrange,stack,call_last,merge_undefined,uninit,
+ unsafe_catch,dead_code,mult_labels,
+ overwrite_catchtag,overwrite_trytag,accessing_tags,
+ bad_catch_try,cons_guard,freg_range,freg_uninit,
+ freg_state,bin_match,bin_aligned,bad_dsetel,
+ state_after_fault_in_catch,no_exception_in_catch,
+ undef_label,illegal_instruction,failing_gc_guard_bif]}].
init_per_suite(Config) ->
Config.
diff --git a/lib/compiler/test/bs_bit_binaries_SUITE.erl b/lib/compiler/test/bs_bit_binaries_SUITE.erl
index 30276f1259..897b4769f1 100644
--- a/lib/compiler/test/bs_bit_binaries_SUITE.erl
+++ b/lib/compiler/test/bs_bit_binaries_SUITE.erl
@@ -34,13 +34,15 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
test_lib:recompile(?MODULE),
- [misc, horrid_match, test_bitstr, test_bit_size,
- asymmetric_tests, big_asymmetric_tests,
- binary_to_and_from_list, big_binary_to_and_from_list,
- send_and_receive, send_and_receive_alot].
+ [{group,p}].
groups() ->
- [].
+ [{p,test_lib:parallel(),
+ [misc,horrid_match,test_bitstr,test_bit_size,
+ asymmetric_tests,big_asymmetric_tests,
+ binary_to_and_from_list,big_binary_to_and_from_list,
+ send_and_receive,send_and_receive_alot]}].
+
init_per_suite(Config) ->
Config.
diff --git a/lib/compiler/test/bs_construct_SUITE.erl b/lib/compiler/test/bs_construct_SUITE.erl
index a393aaeffd..4ea5235bb6 100644
--- a/lib/compiler/test/bs_construct_SUITE.erl
+++ b/lib/compiler/test/bs_construct_SUITE.erl
@@ -36,12 +36,14 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
test_lib:recompile(?MODULE),
- [two, test1, fail, float_bin, in_guard, in_catch,
- nasty_literals, side_effect, opt, otp_7556, float_arith,
- otp_8054].
+ [{group,p}].
groups() ->
- [].
+ [{p,test_lib:parallel(),
+ [two,test1,fail,float_bin,in_guard,in_catch,
+ nasty_literals,side_effect,opt,otp_7556,float_arith,
+ otp_8054]}].
+
init_per_suite(Config) ->
Config.
diff --git a/lib/compiler/test/bs_match_SUITE.erl b/lib/compiler/test/bs_match_SUITE.erl
index 0e9d0bbc17..bcceb889ed 100644
--- a/lib/compiler/test/bs_match_SUITE.erl
+++ b/lib/compiler/test/bs_match_SUITE.erl
@@ -44,19 +44,21 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
test_lib:recompile(?MODULE),
- [fun_shadow, int_float, otp_5269, null_fields, wiger,
- bin_tail, save_restore, shadowed_size_var,
- partitioned_bs_match, function_clause, unit,
- shared_sub_bins, bin_and_float, dec_subidentifiers,
- skip_optional_tag, wfbm, degenerated_match, bs_sum,
- coverage, multiple_uses, zero_label, followed_by_catch,
- matching_meets_construction, simon,
- matching_and_andalso, otp_7188, otp_7233, otp_7240,
- otp_7498, match_string, zero_width, bad_size, haystack,
- cover_beam_bool, matched_out_size].
+ [{group,p}].
groups() ->
- [].
+ [{p,test_lib:parallel(),
+ [fun_shadow,int_float,otp_5269,null_fields,wiger,
+ bin_tail,save_restore,shadowed_size_var,
+ partitioned_bs_match,function_clause,unit,
+ shared_sub_bins,bin_and_float,dec_subidentifiers,
+ skip_optional_tag,wfbm,degenerated_match,bs_sum,
+ coverage,multiple_uses,zero_label,followed_by_catch,
+ matching_meets_construction,simon,
+ matching_and_andalso,otp_7188,otp_7233,otp_7240,
+ otp_7498,match_string,zero_width,bad_size,haystack,
+ cover_beam_bool,matched_out_size]}].
+
init_per_suite(Config) ->
Config.
@@ -800,12 +802,29 @@ matching_and_andalso(Config) when is_list(Config) ->
?line {'EXIT',{function_clause,_}} = (catch matching_and_andalso_1(<<1,2,3>>, -8)),
?line {'EXIT',{function_clause,_}} = (catch matching_and_andalso_1(<<1,2,3>>, blurf)),
?line {'EXIT',{function_clause,_}} = (catch matching_and_andalso_1(<<1,2,3>>, 19)),
+
+ {"abc",<<"xyz">>} = matching_and_andalso_2("abc", <<"-xyz">>),
+ {"abc",<<"">>} = matching_and_andalso_2("abc", <<($a-1)>>),
+ {"abc",<<"">>} = matching_and_andalso_2("abc", <<($z+1)>>),
+ {"abc",<<"">>} = matching_and_andalso_2("abc", <<($A-1)>>),
+ {"abc",<<"">>} = matching_and_andalso_2("abc", <<($Z+1)>>),
+ error = matching_and_andalso_2([], <<>>),
+ error = matching_and_andalso_2([], <<$A>>),
+ error = matching_and_andalso_2([], <<$Z>>),
+ error = matching_and_andalso_2([], <<$a>>),
+ error = matching_and_andalso_2([], <<$z>>),
ok.
matching_and_andalso_1(<<Bitmap/binary>>, K)
when is_integer(K) andalso size(Bitmap) >= K andalso 0 < K ->
ok.
+matching_and_andalso_2(Datetime, <<H,T/binary>>)
+ when not ((H >= $a) andalso (H =< $z)) andalso
+ not ((H >= $A) andalso (H =< $Z)) ->
+ {Datetime,T};
+matching_and_andalso_2(_, _) -> error.
+
%% Thanks to Tomas Stejskal.
otp_7188(Config) when is_list(Config) ->
MP3 = <<84,65,71,68,117,154,105,232,107,121,0,0,0,0,0,0,0,0,0,0,
diff --git a/lib/compiler/test/compilation_SUITE.erl b/lib/compiler/test/compilation_SUITE.erl
index 34f105b5fc..bec97b0199 100644
--- a/lib/compiler/test/compilation_SUITE.erl
+++ b/lib/compiler/test/compilation_SUITE.erl
@@ -28,26 +28,29 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
test_lib:recompile(?MODULE),
- [self_compile_old_inliner, self_compile, compiler_1,
- compiler_3, compiler_5, beam_compiler_1,
- beam_compiler_2, beam_compiler_3, beam_compiler_4,
- beam_compiler_5, beam_compiler_6, beam_compiler_7,
- beam_compiler_8, beam_compiler_9, beam_compiler_10,
- beam_compiler_11, beam_compiler_12,
- nested_tuples_in_case_expr, otp_2330, guards,
- {group, vsn}, otp_2380, otp_2141, otp_2173, otp_4790,
- const_list_256, bin_syntax_1, bin_syntax_2,
- bin_syntax_3, bin_syntax_4, bin_syntax_5, bin_syntax_6,
- live_var, convopts, bad_functional_value,
- catch_in_catch, redundant_case, long_string, otp_5076,
- complex_guard, otp_5092, otp_5151, otp_5235, otp_5244,
- trycatch_4, opt_crash, otp_5404, otp_5436, otp_5481,
- otp_5553, otp_5632, otp_5714, otp_5872, otp_6121,
- otp_6121a, otp_6121b, otp_7202, otp_7345, on_load,
- string_table,otp_8949_a,otp_8949_a,split_cases].
+ [self_compile_old_inliner,self_compile,
+ {group,p}].
groups() ->
- [{vsn, [], [vsn_1, vsn_2, vsn_3]}].
+ [{vsn,[parallel],[vsn_1,vsn_2,vsn_3]},
+ {p,test_lib:parallel(),
+ [compiler_1,
+ compiler_3,compiler_5,beam_compiler_1,
+ beam_compiler_2,beam_compiler_3,beam_compiler_4,
+ beam_compiler_5,beam_compiler_6,beam_compiler_7,
+ beam_compiler_8,beam_compiler_9,beam_compiler_10,
+ beam_compiler_11,beam_compiler_12,
+ nested_tuples_in_case_expr,otp_2330,guards,
+ {group,vsn},otp_2380,otp_2141,otp_2173,otp_4790,
+ const_list_256,bin_syntax_1,bin_syntax_2,
+ bin_syntax_3,bin_syntax_4,bin_syntax_5,bin_syntax_6,
+ live_var,convopts,bad_functional_value,
+ catch_in_catch,redundant_case,long_string,otp_5076,
+ complex_guard,otp_5092,otp_5151,otp_5235,otp_5244,
+ trycatch_4,opt_crash,otp_5404,otp_5436,otp_5481,
+ otp_5553,otp_5632,otp_5714,otp_5872,otp_6121,
+ otp_6121a,otp_6121b,otp_7202,otp_7345,on_load,
+ string_table,otp_8949_a,otp_8949_a,split_cases]}].
init_per_suite(Config) ->
Config.
diff --git a/lib/compiler/test/core_SUITE.erl b/lib/compiler/test/core_SUITE.erl
index 06185bfc34..a40dc32d59 100644
--- a/lib/compiler/test/core_SUITE.erl
+++ b/lib/compiler/test/core_SUITE.erl
@@ -43,11 +43,13 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
test_lib:recompile(?MODULE),
- [dehydrated_itracer,nested_tries,seq_in_guard,make_effect_seq,
- eval_is_boolean,unsafe_case,nomatch_shadow,reversed_annos].
+ [{group,p}].
groups() ->
- [].
+ [{p,test_lib:parallel(),
+ [dehydrated_itracer,nested_tries,seq_in_guard,make_effect_seq,
+ eval_is_boolean,unsafe_case,nomatch_shadow,reversed_annos]}].
+
init_per_suite(Config) ->
Config.
diff --git a/lib/compiler/test/core_fold_SUITE.erl b/lib/compiler/test/core_fold_SUITE.erl
index 54bd52947e..2adc71c237 100644
--- a/lib/compiler/test/core_fold_SUITE.erl
+++ b/lib/compiler/test/core_fold_SUITE.erl
@@ -31,11 +31,13 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
test_lib:recompile(?MODULE),
- [t_element, setelement, t_length, append, t_apply, bifs,
- eq, nested_call_in_case, guard_try_catch, coverage].
+ [{group,p}].
groups() ->
- [].
+ [{p,test_lib:parallel(),
+ [t_element,setelement,t_length,append,t_apply,bifs,
+ eq,nested_call_in_case,guard_try_catch,coverage]}].
+
init_per_suite(Config) ->
Config.
diff --git a/lib/compiler/test/error_SUITE.erl b/lib/compiler/test/error_SUITE.erl
index e798023cd8..859c4571ea 100644
--- a/lib/compiler/test/error_SUITE.erl
+++ b/lib/compiler/test/error_SUITE.erl
@@ -32,10 +32,11 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
test_lib:recompile(?MODULE),
- [head_mismatch_line, warnings_as_errors, bif_clashes, transforms].
+ [{group,p}].
groups() ->
- [].
+ [{p,test_lib:parallel(),
+ [head_mismatch_line,warnings_as_errors,bif_clashes,transforms]}].
init_per_suite(Config) ->
Config.
@@ -282,12 +283,14 @@ filter(X) ->
%% Compiles a test module and returns the list of errors and warnings.
test_filename(Conf) ->
- Filename = "errors_test.erl",
+ Filename = ["errors_test_",test_lib:uniq(),".erl"],
DataDir = ?config(priv_dir, Conf),
filename:join(DataDir, Filename).
run_test(Test0, File, Warnings, WriteBeam) ->
- ?line Test = ["-module(errors_test). ", Test0],
+ ModName = filename:rootname(filename:basename(File), ".erl"),
+ Mod = list_to_atom(ModName),
+ Test = ["-module(",ModName,"). ",Test0],
?line Opts = case WriteBeam of
dont_write_beam ->
[binary,return_errors|Warnings];
@@ -301,17 +304,17 @@ run_test(Test0, File, Warnings, WriteBeam) ->
%% Test result of compilation.
?line Res = case compile:file(File, Opts) of
- {ok,errors_test,_,[{_File,Ws}]} ->
+ {ok,Mod,_,[{_File,Ws}]} ->
%io:format("compile:file(~s,~p) ->~n~p~n",
% [File,Opts,Ws]),
{warning,Ws};
- {ok,errors_test,_,[]} ->
+ {ok,Mod,_,[]} ->
%io:format("compile:file(~s,~p) ->~n~p~n",
% [File,Opts,Ws]),
[];
- {ok,errors_test,[{_File,Ws}]} ->
+ {ok,Mod,[{_File,Ws}]} ->
{warning,Ws};
- {ok,errors_test,[]} ->
+ {ok,Mod,[]} ->
[];
{error,[{XFile,Es}],Ws} = _ZZ when is_list(XFile) ->
%io:format("compile:file(~s,~p) ->~n~p~n",
diff --git a/lib/compiler/test/guard_SUITE.erl b/lib/compiler/test/guard_SUITE.erl
index 40711783ed..66c0b9a295 100644
--- a/lib/compiler/test/guard_SUITE.erl
+++ b/lib/compiler/test/guard_SUITE.erl
@@ -39,17 +39,18 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
test_lib:recompile(?MODULE),
- [misc, const_cond, basic_not, complex_not, nested_nots,
- semicolon, complex_semicolon, comma, or_guard,
- more_or_guards, complex_or_guards, and_guard, xor_guard,
- more_xor_guards, build_in_guard, old_guard_tests, gbif,
- t_is_boolean, is_function_2, tricky, rel_ops,
- literal_type_tests, basic_andalso_orelse, traverse_dcd,
- check_qlc_hrl, andalso_semi, t_tuple_size, binary_part,
- bad_constants].
+ [{group,p}].
groups() ->
- [].
+ [{p,test_lib:parallel(),
+ [misc,const_cond,basic_not,complex_not,nested_nots,
+ semicolon,complex_semicolon,comma,or_guard,
+ more_or_guards,complex_or_guards,and_guard,xor_guard,
+ more_xor_guards,build_in_guard,old_guard_tests,gbif,
+ t_is_boolean,is_function_2,tricky,rel_ops,
+ literal_type_tests,basic_andalso_orelse,traverse_dcd,
+ check_qlc_hrl,andalso_semi,t_tuple_size,binary_part,
+ bad_constants]}].
init_per_suite(Config) ->
Config.
diff --git a/lib/compiler/test/inline_SUITE.erl b/lib/compiler/test/inline_SUITE.erl
index 2e17d3fde6..e2eb6a0dec 100644
--- a/lib/compiler/test/inline_SUITE.erl
+++ b/lib/compiler/test/inline_SUITE.erl
@@ -32,17 +32,22 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
test_lib:recompile(?MODULE),
- [attribute, bsdecode, bsdes, barnes2, decode1, smith,
- itracer, pseudoknot, comma_splitter, lists, really_inlined, otp_7223,
- coverage].
+ [{group,p}].
groups() ->
- [].
+ [{p,test_lib:parallel(),
+ [attribute,bsdecode,bsdes,barnes2,decode1,smith,
+ itracer,pseudoknot,comma_splitter,lists,really_inlined,otp_7223,
+ coverage]}].
init_per_suite(Config) ->
- Config.
+ Pa = "-pa " ++ filename:dirname(code:which(?MODULE)),
+ {ok,Node} = start_node(compiler, Pa),
+ [{testing_node,Node}|Config].
-end_per_suite(_Config) ->
+end_per_suite(Config) ->
+ Node = ?config(testing_node, Config),
+ ?t:stop_node(Node),
ok.
init_per_group(_GroupName, Config) ->
@@ -81,6 +86,7 @@ attribute(Config) when is_list(Config) ->
?comp(comma_splitter).
try_inline(Mod, Config) ->
+ Node = ?config(testing_node, Config),
?line Src = filename:join(?config(data_dir, Config), atom_to_list(Mod)),
?line Out = ?config(priv_dir,Config),
@@ -89,8 +95,6 @@ try_inline(Mod, Config) ->
?line {ok,Mod} = compile:file(Src, [{outdir,Out},report,bin_opt_info,clint]),
?line Dog = test_server:timetrap(test_server:minutes(10)),
- ?line Pa = "-pa " ++ filename:dirname(code:which(?MODULE)),
- ?line {ok,Node} = start_node(compiler, Pa),
?line NormalResult = rpc:call(Node, ?MODULE, load_and_call, [Out,Mod]),
?line test_server:timetrap_cancel(Dog),
@@ -125,7 +129,6 @@ try_inline(Mod, Config) ->
%% Delete Beam file.
?line ok = file:delete(filename:join(Out, atom_to_list(Mod)++code:objfile_extension())),
- ?line ?t:stop_node(Node),
ok.
compare(Same, Same) -> ok;
@@ -293,9 +296,9 @@ otp_7223_2({a}) ->
1.
coverage(Config) when is_list(Config) ->
- ?line Src = filename:join(?config(data_dir, Config), bsdecode),
- ?line Out = ?config(priv_dir,Config),
- ?line {ok,Mod} = compile:file(Src, [{outdir,Out},report,{inline,0},clint]),
- ?line {ok,Mod} = compile:file(Src, [{outdir,Out},report,{inline,20},verbose,clint]),
- ?line ok = file:delete(filename:join(Out, "bsdecode"++code:objfile_extension())),
+ Mod = bsdecode,
+ Src = filename:join(?config(data_dir, Config), Mod),
+ {ok,Mod,_} = compile:file(Src, [binary,report,{inline,0},clint]),
+ {ok,Mod,_} = compile:file(Src, [binary,report,{inline,20},
+ verbose,clint]),
ok.
diff --git a/lib/compiler/test/match_SUITE.erl b/lib/compiler/test/match_SUITE.erl
index 9406d7de8f..de44926d81 100644
--- a/lib/compiler/test/match_SUITE.erl
+++ b/lib/compiler/test/match_SUITE.erl
@@ -30,11 +30,13 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
test_lib:recompile(?MODULE),
- [pmatch, mixed, aliases, match_in_call, untuplify,
- shortcut_boolean, letify_guard, selectify, underscore, coverage].
+ [{group,p}].
groups() ->
- [].
+ [{p,test_lib:parallel(),
+ [pmatch,mixed,aliases,match_in_call,untuplify,
+ shortcut_boolean,letify_guard,selectify,underscore,coverage]}].
+
init_per_suite(Config) ->
Config.
diff --git a/lib/compiler/test/misc_SUITE.erl b/lib/compiler/test/misc_SUITE.erl
index 6df8b2ac30..44c7161530 100644
--- a/lib/compiler/test/misc_SUITE.erl
+++ b/lib/compiler/test/misc_SUITE.erl
@@ -57,11 +57,12 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
-spec all() -> misc_SUITE_test_cases().
all() ->
test_lib:recompile(?MODULE),
- [tobias, empty_string, md5, silly_coverage,
- confused_literals, integer_encoding, override_bif].
+ [{group,p}].
groups() ->
- [].
+ [{p,[],%%test_lib:parallel(),
+ [tobias,empty_string,md5,silly_coverage,
+ confused_literals,integer_encoding,override_bif]}].
init_per_suite(Config) ->
Config.
diff --git a/lib/compiler/test/receive_SUITE.erl b/lib/compiler/test/receive_SUITE.erl
index 2a67615e5e..82c823b789 100644
--- a/lib/compiler/test/receive_SUITE.erl
+++ b/lib/compiler/test/receive_SUITE.erl
@@ -40,10 +40,12 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
test_lib:recompile(?MODULE),
- [recv, coverage, otp_7980, ref_opt, export].
+ [{group,p}].
groups() ->
- [].
+ [{p,test_lib:parallel(),
+ [recv,coverage,otp_7980,ref_opt,export]}].
+
init_per_suite(Config) ->
Config.
diff --git a/lib/compiler/test/record_SUITE.erl b/lib/compiler/test/record_SUITE.erl
index 363422ec7e..96f3712be9 100644
--- a/lib/compiler/test/record_SUITE.erl
+++ b/lib/compiler/test/record_SUITE.erl
@@ -42,12 +42,14 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
test_lib:recompile(?MODULE),
- [errors, record_test_2, record_test_3,
- record_access_in_guards, guard_opt, eval_once, foobar,
- missing_test_heap, nested_access, coverage].
+ [{group,p}].
groups() ->
- [].
+ [{p,test_lib:parallel(),
+ [errors,record_test_2,record_test_3,
+ record_access_in_guards,guard_opt,eval_once,foobar,
+ missing_test_heap,nested_access,coverage]}].
+
init_per_suite(Config) ->
Config.
diff --git a/lib/compiler/test/test_lib.erl b/lib/compiler/test/test_lib.erl
index 2295592a38..996c369705 100644
--- a/lib/compiler/test/test_lib.erl
+++ b/lib/compiler/test/test_lib.erl
@@ -20,7 +20,8 @@
-include("test_server.hrl").
-compile({no_auto_import,[binary_part/2]}).
--export([recompile/1,opt_opts/1,get_data_dir/1,smoke_disasm/1,p_run/2,binary_part/2]).
+-export([recompile/1,parallel/0,uniq/0,opt_opts/1,get_data_dir/1,
+ smoke_disasm/1,p_run/2,binary_part/2]).
recompile(Mod) when is_atom(Mod) ->
case whereis(cover_server) of
@@ -43,6 +44,18 @@ smoke_disasm(File) when is_list(File) ->
Res = beam_disasm:file(File),
{beam_file,_Mod} = {element(1, Res),element(2, Res)}.
+parallel() ->
+ case ?t:is_cover() orelse erlang:system_info(schedulers) =:= 1 of
+ true -> [];
+ false -> [parallel]
+ end.
+
+uniq() ->
+ U0 = erlang:ref_to_list(make_ref()),
+ U1 = re:replace(U0, "^#Ref", ""),
+ U = re:replace(U1, "[^[A-Za-z0-9_]+", "_", [global]),
+ re:replace(U, "_*$", "", [{return,list}]).
+
%% Retrieve the "interesting" compiler options (options for optimization
%% and compatibility) for the given module.
diff --git a/lib/compiler/test/trycatch_SUITE.erl b/lib/compiler/test/trycatch_SUITE.erl
index 29119c0f5d..4530d08c77 100644
--- a/lib/compiler/test/trycatch_SUITE.erl
+++ b/lib/compiler/test/trycatch_SUITE.erl
@@ -32,13 +32,15 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
test_lib:recompile(?MODULE),
- [basic, lean_throw, try_of, try_after, catch_oops,
- after_oops, eclectic, rethrow, nested_of, nested_catch,
- nested_after, nested_horrid, last_call_optimization,
- bool, plain_catch_coverage, andalso_orelse, get_in_try].
+ [{group,p}].
groups() ->
- [].
+ [{p,test_lib:parallel(),
+ [basic,lean_throw,try_of,try_after,catch_oops,
+ after_oops,eclectic,rethrow,nested_of,nested_catch,
+ nested_after,nested_horrid,last_call_optimization,
+ bool,plain_catch_coverage,andalso_orelse,get_in_try]}].
+
init_per_suite(Config) ->
Config.
diff --git a/lib/compiler/test/warnings_SUITE.erl b/lib/compiler/test/warnings_SUITE.erl
index f6a572abfa..9ce0df5ec4 100644
--- a/lib/compiler/test/warnings_SUITE.erl
+++ b/lib/compiler/test/warnings_SUITE.erl
@@ -55,12 +55,13 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
test_lib:recompile(?MODULE),
- [pattern, pattern2, pattern3, pattern4, guard,
- bad_arith, bool_cases, bad_apply, files, effect,
- bin_opt_info, bin_construction].
+ [{group,p}].
groups() ->
- [].
+ [{p,test_lib:parallel(),
+ [pattern,pattern2,pattern3,pattern4,guard,
+ bad_arith,bool_cases,bad_apply,files,effect,
+ bin_opt_info,bin_construction]}].
init_per_suite(Config) ->
Config.
@@ -556,9 +557,10 @@ run(Config, Tests) ->
%% Compiles a test module and returns the list of errors and warnings.
run_test(Conf, Test0, Warnings) ->
- Filename = 'warnings_test.erl',
+ Mod = "warnings_"++test_lib:uniq(),
+ Filename = Mod ++ ".erl",
?line DataDir = ?privdir,
- ?line Test = ["-module(warnings_test). ", Test0],
+ Test = ["-module(", Mod, "). ", Test0],
?line File = filename:join(DataDir, Filename),
?line Opts = [binary,export_all,return|Warnings],
?line ok = file:write_file(File, Test),
diff --git a/lib/diameter/.gitignore b/lib/diameter/.gitignore
index 5afcbedc23..8b13789179 100644
--- a/lib/diameter/.gitignore
+++ b/lib/diameter/.gitignore
@@ -1,58 +1 @@
-# Match at any level.
-*~
-autom4te.cache
-
-# Compiler derivatives
-#
-# Do not use too creative wildcards.
-# Those might ignore files that should not be ignored.
-
-i686-pc-linux-gnu
-x86_64-unknown-linux-gnu
-i386-apple-darwin[0-9]*.[0-9]*.[0-9]*
-sparc-sun-solaris[0-9]*.[0-9]*
-i386-pc-solaris[0-9]*.[0-9]*
-i386-unknown-freebsd[0-9]*.[0-9]*
-tile-tilera-linux-gnu
-powerpc-unknown-linux-gnu
-
-# Mac OS X
-a.out.dSYM/
-
-# Anchored from $DIAMETER_TOP
-/config.log
-/config.status
-
-/Makefile
-/configure
-
-
-# General patterns for applications in lib.
-#
-# Assume that all test/Emakefiles are generated.
-#
-# Any application with a checked-in test/Emakefile should
-# use a negative pattern in its own .gitignore.
-
-#
-# Files generated by configure.
-#
-
-/configure
-/config.log
-/config.status
-
-
-#
-# Generated documentation. (ie. not doc/src)
-#
-
-/doc/[^s]*
-
-
-#
-# Files generated when building/running tests
-#
-
-/test/*.log
diff --git a/lib/diameter/Makefile b/lib/diameter/Makefile
new file mode 100644
index 0000000000..9961d627cf
--- /dev/null
+++ b/lib/diameter/Makefile
@@ -0,0 +1,32 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2010-2012. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+
+include $(ERL_TOP)/make/target.mk
+include $(ERL_TOP)/make/$(TARGET)/otp.mk
+include vsn.mk
+include subdirs.mk
+
+SUB_DIRECTORIES = $(SUB_DIRS) doc/src
+SPECIAL_TARGETS =
+
+include $(ERL_TOP)/make/otp_subdir.mk
+
+info:
+ @echo "APP_VSN = $(APP_VSN)"
+
+.PHONY: info
diff --git a/lib/diameter/Makefile.in b/lib/diameter/Makefile.in
deleted file mode 100644
index cf38c26045..0000000000
--- a/lib/diameter/Makefile.in
+++ /dev/null
@@ -1,88 +0,0 @@
-#
-# %CopyrightBegin%
-#
-# Copyright Ericsson AB 2010-2011. All Rights Reserved.
-#
-# The contents of this file are subject to the Erlang Public License,
-# Version 1.1, (the "License"); you may not use this file except in
-# compliance with the License. You should have received a copy of the
-# Erlang Public License along with this software. If not, it can be
-# retrieved online at http://www.erlang.org/.
-#
-# Software distributed under the License is distributed on an "AS IS"
-# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-# the License for the specific language governing rights and limitations
-# under the License.
-#
-# %CopyrightEnd%
-
-ifneq ($(ERL_TOP),)
-include $(ERL_TOP)/make/target.mk
-include $(ERL_TOP)/make/$(TARGET)/otp.mk
-else
-include $(DIAMETER_TOP)/make/target.mk
-include $(DIAMETER_TOP)/make/$(TARGET)/rules.mk
-endif
-
-# ----------------------------------------------------
-# Application version
-# ----------------------------------------------------
-include vsn.mk
-VSN=$(DIAMETER_VSN)
-
-DIAMETER_TOP = @DIAMETER_TOP@
-
-
-# ----------------------------------------------------
-# Common Macros
-# ----------------------------------------------------
-
-include subdirs.mk
-
-SUB_DIRECTORIES = $(SUB_DIRS) doc/src
-
-SPECIAL_TARGETS =
-
-ifneq ($(ERL_TOP),)
-ifneq ($(PREFIX),)
-CONFIGURE_OPTS += --prefix=$(PREFIX)
-endif
-endif
-
-
-# ----------------------------------------------------
-# Default Subdir Targets
-# ----------------------------------------------------
-ifneq ($(ERL_TOP),)
-include $(ERL_TOP)/make/otp_subdir.mk
-else
-include $(DIAMETER_TOP)/make/subdir.mk
-endif
-
-.PHONY: reconf conf info version dialyzer
-
-reconf:
- autoconf
-
-conf: do_configure
-
-do_configure: configure
- ./configure $(CONFIGURE_OPTS)
-
-configure: configure.in
- autoconf
-
-info:
- @echo "APP_VSN: $(APP_VSN)"
- @echo "DIAMETER_VSN: $(DIAMETER_VSN)"
-
-version:
- @echo "$(VSN)"
-
-
-dialyzer:
- (cd ./ebin; \
- dialyzer --build_plt \
- --output_plt ../priv/diameter.plt \
- -r ../../diameter/ebin \
- --verbose)
diff --git a/lib/diameter/aclocal.m4 b/lib/diameter/aclocal.m4
deleted file mode 100644
index 2abb47dba2..0000000000
--- a/lib/diameter/aclocal.m4
+++ /dev/null
@@ -1,65 +0,0 @@
-dnl
-dnl %CopyrightBegin%
-dnl
-dnl Copyright Ericsson AB 1998-2011. All Rights Reserved.
-dnl
-dnl The contents of this file are subject to the Erlang Public License,
-dnl Version 1.1, (the "License"); you may not use this file except in
-dnl compliance with the License. You should have received a copy of the
-dnl Erlang Public License along with this software. If not, it can be
-dnl retrieved online at http://www.erlang.org/.
-dnl
-dnl Software distributed under the License is distributed on an "AS IS"
-dnl basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-dnl the License for the specific language governing rights and limitations
-dnl under the License.
-dnl
-dnl %CopyrightEnd%
-dnl
-
-dnl
-dnl aclocal.m4
-dnl
-dnl Local macros used in configure.in. The Local Macros which
-dnl could/should be part of autoconf are prefixed LM_, macros specific
-dnl to the Erlang system are prefixed ERL_.
-dnl
-
-dnl ----------------------------------------------------------------------
-dnl
-dnl LM_PROG_INSTALL_DIR
-dnl
-dnl Figure out how to create directories with parents.
-dnl (In my opinion INSTALL_DIR is a bad name, MKSUBDIRS or something is better)
-dnl
-dnl We prefer 'install -d', but use 'mkdir -p' if it exists.
-dnl If none of these methods works, we give up.
-dnl
-
-
-AC_DEFUN(LM_PROG_INSTALL_DIR,
-[AC_CACHE_CHECK(how to create a directory including parents,
-ac_cv_prog_mkdir_p,
-[
-temp_name_base=config.$$
-temp_name=$temp_name_base/x/y/z
-$INSTALL -d $temp_name >/dev/null 2>&1
-ac_cv_prog_mkdir_p=none
-if test -d $temp_name; then
- ac_cv_prog_mkdir_p="$INSTALL -d"
-else
- mkdir -p $temp_name >/dev/null 2>&1
- if test -d $temp_name; then
- ac_cv_prog_mkdir_p="mkdir -p"
- fi
-fi
-rm -fr $temp_name_base
-])
-
-case "${ac_cv_prog_mkdir_p}" in
- none) AC_MSG_ERROR(don't know how create directories with parents) ;;
- *) INSTALL_DIR="$ac_cv_prog_mkdir_p" AC_SUBST(INSTALL_DIR) ;;
-esac
-])
-
-
diff --git a/lib/diameter/autoconf/config.guess b/lib/diameter/autoconf/config.guess
deleted file mode 100755
index 38a833903b..0000000000
--- a/lib/diameter/autoconf/config.guess
+++ /dev/null
@@ -1,1519 +0,0 @@
-#! /bin/sh
-# Attempt to guess a canonical system name.
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation,
-# Inc.
-
-timestamp='2007-05-17'
-
-# This file is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
-# 02110-1301, USA.
-#
-# As a special exception to the GNU General Public License, if you
-# distribute this file as part of a program that contains a
-# configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that program.
-
-
-# Originally written by Per Bothner <[email protected]>.
-# Please send patches to <[email protected]>. Submit a context
-# diff and a properly formatted ChangeLog entry.
-#
-# This script attempts to guess a canonical system name similar to
-# config.sub. If it succeeds, it prints the system name on stdout, and
-# exits with 0. Otherwise, it exits with 1.
-#
-# The plan is that this can be called by configure scripts if you
-# don't specify an explicit build system type.
-
-me=`echo "$0" | sed -e 's,.*/,,'`
-
-usage="\
-Usage: $0 [OPTION]
-
-Output the configuration name of the system \`$me' is run on.
-
-Operation modes:
- -h, --help print this help, then exit
- -t, --time-stamp print date of last modification, then exit
- -v, --version print version number, then exit
-
-Report bugs and patches to <[email protected]>."
-
-version="\
-GNU config.guess ($timestamp)
-
-Originally written by Per Bothner.
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
-Free Software Foundation, Inc.
-
-This is free software; see the source for copying conditions. There is NO
-warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
-
-help="
-Try \`$me --help' for more information."
-
-# Parse command line
-while test $# -gt 0 ; do
- case $1 in
- --time-stamp | --time* | -t )
- echo "$timestamp" ; exit ;;
- --version | -v )
- echo "$version" ; exit ;;
- --help | --h* | -h )
- echo "$usage"; exit ;;
- -- ) # Stop option processing
- shift; break ;;
- - ) # Use stdin as input.
- break ;;
- -* )
- echo "$me: invalid option $1$help" >&2
- exit 1 ;;
- * )
- break ;;
- esac
-done
-
-if test $# != 0; then
- echo "$me: too many arguments$help" >&2
- exit 1
-fi
-
-trap 'exit 1' 1 2 15
-
-# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
-# compiler to aid in system detection is discouraged as it requires
-# temporary files to be created and, as you can see below, it is a
-# headache to deal with in a portable fashion.
-
-# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
-# use `HOST_CC' if defined, but it is deprecated.
-
-# Portable tmp directory creation inspired by the Autoconf team.
-
-set_cc_for_build='
-trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ;
-trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ;
-: ${TMPDIR=/tmp} ;
- { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
- { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } ||
- { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } ||
- { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ;
-dummy=$tmp/dummy ;
-tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ;
-case $CC_FOR_BUILD,$HOST_CC,$CC in
- ,,) echo "int x;" > $dummy.c ;
- for c in cc gcc c89 c99 ; do
- if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then
- CC_FOR_BUILD="$c"; break ;
- fi ;
- done ;
- if test x"$CC_FOR_BUILD" = x ; then
- CC_FOR_BUILD=no_compiler_found ;
- fi
- ;;
- ,,*) CC_FOR_BUILD=$CC ;;
- ,*,*) CC_FOR_BUILD=$HOST_CC ;;
-esac ; set_cc_for_build= ;'
-
-# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
-# ([email protected] 1994-08-24)
-if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
- PATH=$PATH:/.attbin ; export PATH
-fi
-
-UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
-UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
-UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
-UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
-
-# Note: order is significant - the case branches are not exclusive.
-
-case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
- *:NetBSD:*:*)
- # NetBSD (nbsd) targets should (where applicable) match one or
- # more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*,
- # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
- # switched to ELF, *-*-netbsd* would select the old
- # object file format. This provides both forward
- # compatibility and a consistent mechanism for selecting the
- # object file format.
- #
- # Note: NetBSD doesn't particularly care about the vendor
- # portion of the name. We always set it to "unknown".
- sysctl="sysctl -n hw.machine_arch"
- UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
- /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
- case "${UNAME_MACHINE_ARCH}" in
- armeb) machine=armeb-unknown ;;
- arm*) machine=arm-unknown ;;
- sh3el) machine=shl-unknown ;;
- sh3eb) machine=sh-unknown ;;
- sh5el) machine=sh5le-unknown ;;
- *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
- esac
- # The Operating System including object format, if it has switched
- # to ELF recently, or will in the future.
- case "${UNAME_MACHINE_ARCH}" in
- arm*|i386|m68k|ns32k|sh3*|sparc|vax)
- eval $set_cc_for_build
- if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
- | grep __ELF__ >/dev/null
- then
- # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
- # Return netbsd for either. FIX?
- os=netbsd
- else
- os=netbsdelf
- fi
- ;;
- *)
- os=netbsd
- ;;
- esac
- # The OS release
- # Debian GNU/NetBSD machines have a different userland, and
- # thus, need a distinct triplet. However, they do not need
- # kernel version information, so it can be replaced with a
- # suitable tag, in the style of linux-gnu.
- case "${UNAME_VERSION}" in
- Debian*)
- release='-gnu'
- ;;
- *)
- release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
- ;;
- esac
- # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
- # contains redundant information, the shorter form:
- # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
- echo "${machine}-${os}${release}"
- exit ;;
- *:OpenBSD:*:*)
- UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
- echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
- exit ;;
- *:ekkoBSD:*:*)
- echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
- exit ;;
- *:SolidBSD:*:*)
- echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE}
- exit ;;
- macppc:MirBSD:*:*)
- echo powerpc-unknown-mirbsd${UNAME_RELEASE}
- exit ;;
- *:MirBSD:*:*)
- echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
- exit ;;
- alpha:OSF1:*:*)
- case $UNAME_RELEASE in
- *4.0)
- UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
- ;;
- *5.*)
- UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
- ;;
- esac
- # According to Compaq, /usr/sbin/psrinfo has been available on
- # OSF/1 and Tru64 systems produced since 1995. I hope that
- # covers most systems running today. This code pipes the CPU
- # types through head -n 1, so we only detect the type of CPU 0.
- ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1`
- case "$ALPHA_CPU_TYPE" in
- "EV4 (21064)")
- UNAME_MACHINE="alpha" ;;
- "EV4.5 (21064)")
- UNAME_MACHINE="alpha" ;;
- "LCA4 (21066/21068)")
- UNAME_MACHINE="alpha" ;;
- "EV5 (21164)")
- UNAME_MACHINE="alphaev5" ;;
- "EV5.6 (21164A)")
- UNAME_MACHINE="alphaev56" ;;
- "EV5.6 (21164PC)")
- UNAME_MACHINE="alphapca56" ;;
- "EV5.7 (21164PC)")
- UNAME_MACHINE="alphapca57" ;;
- "EV6 (21264)")
- UNAME_MACHINE="alphaev6" ;;
- "EV6.7 (21264A)")
- UNAME_MACHINE="alphaev67" ;;
- "EV6.8CB (21264C)")
- UNAME_MACHINE="alphaev68" ;;
- "EV6.8AL (21264B)")
- UNAME_MACHINE="alphaev68" ;;
- "EV6.8CX (21264D)")
- UNAME_MACHINE="alphaev68" ;;
- "EV6.9A (21264/EV69A)")
- UNAME_MACHINE="alphaev69" ;;
- "EV7 (21364)")
- UNAME_MACHINE="alphaev7" ;;
- "EV7.9 (21364A)")
- UNAME_MACHINE="alphaev79" ;;
- esac
- # A Pn.n version is a patched version.
- # A Vn.n version is a released version.
- # A Tn.n version is a released field test version.
- # A Xn.n version is an unreleased experimental baselevel.
- # 1.2 uses "1.2" for uname -r.
- echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
- exit ;;
- Alpha\ *:Windows_NT*:*)
- # How do we know it's Interix rather than the generic POSIX subsystem?
- # Should we change UNAME_MACHINE based on the output of uname instead
- # of the specific Alpha model?
- echo alpha-pc-interix
- exit ;;
- 21064:Windows_NT:50:3)
- echo alpha-dec-winnt3.5
- exit ;;
- Amiga*:UNIX_System_V:4.0:*)
- echo m68k-unknown-sysv4
- exit ;;
- *:[Aa]miga[Oo][Ss]:*:*)
- echo ${UNAME_MACHINE}-unknown-amigaos
- exit ;;
- *:[Mm]orph[Oo][Ss]:*:*)
- echo ${UNAME_MACHINE}-unknown-morphos
- exit ;;
- *:OS/390:*:*)
- echo i370-ibm-openedition
- exit ;;
- *:z/VM:*:*)
- echo s390-ibm-zvmoe
- exit ;;
- *:OS400:*:*)
- echo powerpc-ibm-os400
- exit ;;
- arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
- echo arm-acorn-riscix${UNAME_RELEASE}
- exit ;;
- arm:riscos:*:*|arm:RISCOS:*:*)
- echo arm-unknown-riscos
- exit ;;
- SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
- echo hppa1.1-hitachi-hiuxmpp
- exit ;;
- Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
- # [email protected] (Earle F. Ake) contributed MIS and NILE.
- if test "`(/bin/universe) 2>/dev/null`" = att ; then
- echo pyramid-pyramid-sysv3
- else
- echo pyramid-pyramid-bsd
- fi
- exit ;;
- NILE*:*:*:dcosx)
- echo pyramid-pyramid-svr4
- exit ;;
- DRS?6000:unix:4.0:6*)
- echo sparc-icl-nx6
- exit ;;
- DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
- case `/usr/bin/uname -p` in
- sparc) echo sparc-icl-nx7; exit ;;
- esac ;;
- sun4H:SunOS:5.*:*)
- echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
- echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- i86pc:SunOS:5.*:* | ix86xen:SunOS:5.*:*)
- echo i386-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- sun4*:SunOS:6*:*)
- # According to config.sub, this is the proper way to canonicalize
- # SunOS6. Hard to guess exactly what SunOS6 will be like, but
- # it's likely to be more like Solaris than SunOS4.
- echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- sun4*:SunOS:*:*)
- case "`/usr/bin/arch -k`" in
- Series*|S4*)
- UNAME_RELEASE=`uname -v`
- ;;
- esac
- # Japanese Language versions have a version number like `4.1.3-JL'.
- echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
- exit ;;
- sun3*:SunOS:*:*)
- echo m68k-sun-sunos${UNAME_RELEASE}
- exit ;;
- sun*:*:4.2BSD:*)
- UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
- test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
- case "`/bin/arch`" in
- sun3)
- echo m68k-sun-sunos${UNAME_RELEASE}
- ;;
- sun4)
- echo sparc-sun-sunos${UNAME_RELEASE}
- ;;
- esac
- exit ;;
- aushp:SunOS:*:*)
- echo sparc-auspex-sunos${UNAME_RELEASE}
- exit ;;
- # The situation for MiNT is a little confusing. The machine name
- # can be virtually everything (everything which is not
- # "atarist" or "atariste" at least should have a processor
- # > m68000). The system name ranges from "MiNT" over "FreeMiNT"
- # to the lowercase version "mint" (or "freemint"). Finally
- # the system name "TOS" denotes a system which is actually not
- # MiNT. But MiNT is downward compatible to TOS, so this should
- # be no problem.
- atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
- echo m68k-atari-mint${UNAME_RELEASE}
- exit ;;
- atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
- echo m68k-atari-mint${UNAME_RELEASE}
- exit ;;
- *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
- echo m68k-atari-mint${UNAME_RELEASE}
- exit ;;
- milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
- echo m68k-milan-mint${UNAME_RELEASE}
- exit ;;
- hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
- echo m68k-hades-mint${UNAME_RELEASE}
- exit ;;
- *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
- echo m68k-unknown-mint${UNAME_RELEASE}
- exit ;;
- m68k:machten:*:*)
- echo m68k-apple-machten${UNAME_RELEASE}
- exit ;;
- powerpc:machten:*:*)
- echo powerpc-apple-machten${UNAME_RELEASE}
- exit ;;
- RISC*:Mach:*:*)
- echo mips-dec-mach_bsd4.3
- exit ;;
- RISC*:ULTRIX:*:*)
- echo mips-dec-ultrix${UNAME_RELEASE}
- exit ;;
- VAX*:ULTRIX*:*:*)
- echo vax-dec-ultrix${UNAME_RELEASE}
- exit ;;
- 2020:CLIX:*:* | 2430:CLIX:*:*)
- echo clipper-intergraph-clix${UNAME_RELEASE}
- exit ;;
- mips:*:*:UMIPS | mips:*:*:RISCos)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
-#ifdef __cplusplus
-#include <stdio.h> /* for printf() prototype */
- int main (int argc, char *argv[]) {
-#else
- int main (argc, argv) int argc; char *argv[]; {
-#endif
- #if defined (host_mips) && defined (MIPSEB)
- #if defined (SYSTYPE_SYSV)
- printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
- #endif
- #if defined (SYSTYPE_SVR4)
- printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
- #endif
- #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
- printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
- #endif
- #endif
- exit (-1);
- }
-EOF
- $CC_FOR_BUILD -o $dummy $dummy.c &&
- dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` &&
- SYSTEM_NAME=`$dummy $dummyarg` &&
- { echo "$SYSTEM_NAME"; exit; }
- echo mips-mips-riscos${UNAME_RELEASE}
- exit ;;
- Motorola:PowerMAX_OS:*:*)
- echo powerpc-motorola-powermax
- exit ;;
- Motorola:*:4.3:PL8-*)
- echo powerpc-harris-powermax
- exit ;;
- Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
- echo powerpc-harris-powermax
- exit ;;
- Night_Hawk:Power_UNIX:*:*)
- echo powerpc-harris-powerunix
- exit ;;
- m88k:CX/UX:7*:*)
- echo m88k-harris-cxux7
- exit ;;
- m88k:*:4*:R4*)
- echo m88k-motorola-sysv4
- exit ;;
- m88k:*:3*:R3*)
- echo m88k-motorola-sysv3
- exit ;;
- AViiON:dgux:*:*)
- # DG/UX returns AViiON for all architectures
- UNAME_PROCESSOR=`/usr/bin/uname -p`
- if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
- then
- if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
- [ ${TARGET_BINARY_INTERFACE}x = x ]
- then
- echo m88k-dg-dgux${UNAME_RELEASE}
- else
- echo m88k-dg-dguxbcs${UNAME_RELEASE}
- fi
- else
- echo i586-dg-dgux${UNAME_RELEASE}
- fi
- exit ;;
- M88*:DolphinOS:*:*) # DolphinOS (SVR3)
- echo m88k-dolphin-sysv3
- exit ;;
- M88*:*:R3*:*)
- # Delta 88k system running SVR3
- echo m88k-motorola-sysv3
- exit ;;
- XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
- echo m88k-tektronix-sysv3
- exit ;;
- Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
- echo m68k-tektronix-bsd
- exit ;;
- *:IRIX*:*:*)
- echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
- exit ;;
- ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
- echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
- exit ;; # Note that: echo "'`uname -s`'" gives 'AIX '
- i*86:AIX:*:*)
- echo i386-ibm-aix
- exit ;;
- ia64:AIX:*:*)
- if [ -x /usr/bin/oslevel ] ; then
- IBM_REV=`/usr/bin/oslevel`
- else
- IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
- fi
- echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
- exit ;;
- *:AIX:2:3)
- if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #include <sys/systemcfg.h>
-
- main()
- {
- if (!__power_pc())
- exit(1);
- puts("powerpc-ibm-aix3.2.5");
- exit(0);
- }
-EOF
- if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy`
- then
- echo "$SYSTEM_NAME"
- else
- echo rs6000-ibm-aix3.2.5
- fi
- elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
- echo rs6000-ibm-aix3.2.4
- else
- echo rs6000-ibm-aix3.2
- fi
- exit ;;
- *:AIX:*:[45])
- IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
- if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
- IBM_ARCH=rs6000
- else
- IBM_ARCH=powerpc
- fi
- if [ -x /usr/bin/oslevel ] ; then
- IBM_REV=`/usr/bin/oslevel`
- else
- IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
- fi
- echo ${IBM_ARCH}-ibm-aix${IBM_REV}
- exit ;;
- *:AIX:*:*)
- echo rs6000-ibm-aix
- exit ;;
- ibmrt:4.4BSD:*|romp-ibm:BSD:*)
- echo romp-ibm-bsd4.4
- exit ;;
- ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and
- echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to
- exit ;; # report: romp-ibm BSD 4.3
- *:BOSX:*:*)
- echo rs6000-bull-bosx
- exit ;;
- DPX/2?00:B.O.S.:*:*)
- echo m68k-bull-sysv3
- exit ;;
- 9000/[34]??:4.3bsd:1.*:*)
- echo m68k-hp-bsd
- exit ;;
- hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
- echo m68k-hp-bsd4.4
- exit ;;
- 9000/[34678]??:HP-UX:*:*)
- HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
- case "${UNAME_MACHINE}" in
- 9000/31? ) HP_ARCH=m68000 ;;
- 9000/[34]?? ) HP_ARCH=m68k ;;
- 9000/[678][0-9][0-9])
- if [ -x /usr/bin/getconf ]; then
- sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
- sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
- case "${sc_cpu_version}" in
- 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
- 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
- 532) # CPU_PA_RISC2_0
- case "${sc_kernel_bits}" in
- 32) HP_ARCH="hppa2.0n" ;;
- 64) HP_ARCH="hppa2.0w" ;;
- '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20
- esac ;;
- esac
- fi
- if [ "${HP_ARCH}" = "" ]; then
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
-
- #define _HPUX_SOURCE
- #include <stdlib.h>
- #include <unistd.h>
-
- int main ()
- {
- #if defined(_SC_KERNEL_BITS)
- long bits = sysconf(_SC_KERNEL_BITS);
- #endif
- long cpu = sysconf (_SC_CPU_VERSION);
-
- switch (cpu)
- {
- case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
- case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
- case CPU_PA_RISC2_0:
- #if defined(_SC_KERNEL_BITS)
- switch (bits)
- {
- case 64: puts ("hppa2.0w"); break;
- case 32: puts ("hppa2.0n"); break;
- default: puts ("hppa2.0"); break;
- } break;
- #else /* !defined(_SC_KERNEL_BITS) */
- puts ("hppa2.0"); break;
- #endif
- default: puts ("hppa1.0"); break;
- }
- exit (0);
- }
-EOF
- (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
- test -z "$HP_ARCH" && HP_ARCH=hppa
- fi ;;
- esac
- if [ ${HP_ARCH} = "hppa2.0w" ]
- then
- eval $set_cc_for_build
-
- # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
- # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler
- # generating 64-bit code. GNU and HP use different nomenclature:
- #
- # $ CC_FOR_BUILD=cc ./config.guess
- # => hppa2.0w-hp-hpux11.23
- # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
- # => hppa64-hp-hpux11.23
-
- if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
- grep __LP64__ >/dev/null
- then
- HP_ARCH="hppa2.0w"
- else
- HP_ARCH="hppa64"
- fi
- fi
- echo ${HP_ARCH}-hp-hpux${HPUX_REV}
- exit ;;
- ia64:HP-UX:*:*)
- HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
- echo ia64-hp-hpux${HPUX_REV}
- exit ;;
- 3050*:HI-UX:*:*)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #include <unistd.h>
- int
- main ()
- {
- long cpu = sysconf (_SC_CPU_VERSION);
- /* The order matters, because CPU_IS_HP_MC68K erroneously returns
- true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct
- results, however. */
- if (CPU_IS_PA_RISC (cpu))
- {
- switch (cpu)
- {
- case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
- case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
- case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
- default: puts ("hppa-hitachi-hiuxwe2"); break;
- }
- }
- else if (CPU_IS_HP_MC68K (cpu))
- puts ("m68k-hitachi-hiuxwe2");
- else puts ("unknown-hitachi-hiuxwe2");
- exit (0);
- }
-EOF
- $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` &&
- { echo "$SYSTEM_NAME"; exit; }
- echo unknown-hitachi-hiuxwe2
- exit ;;
- 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
- echo hppa1.1-hp-bsd
- exit ;;
- 9000/8??:4.3bsd:*:*)
- echo hppa1.0-hp-bsd
- exit ;;
- *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
- echo hppa1.0-hp-mpeix
- exit ;;
- hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
- echo hppa1.1-hp-osf
- exit ;;
- hp8??:OSF1:*:*)
- echo hppa1.0-hp-osf
- exit ;;
- i*86:OSF1:*:*)
- if [ -x /usr/sbin/sysversion ] ; then
- echo ${UNAME_MACHINE}-unknown-osf1mk
- else
- echo ${UNAME_MACHINE}-unknown-osf1
- fi
- exit ;;
- parisc*:Lites*:*:*)
- echo hppa1.1-hp-lites
- exit ;;
- C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
- echo c1-convex-bsd
- exit ;;
- C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
- if getsysinfo -f scalar_acc
- then echo c32-convex-bsd
- else echo c2-convex-bsd
- fi
- exit ;;
- C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
- echo c34-convex-bsd
- exit ;;
- C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
- echo c38-convex-bsd
- exit ;;
- C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
- echo c4-convex-bsd
- exit ;;
- CRAY*Y-MP:*:*:*)
- echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- CRAY*[A-Z]90:*:*:*)
- echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
- | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
- -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
- -e 's/\.[^.]*$/.X/'
- exit ;;
- CRAY*TS:*:*:*)
- echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- CRAY*T3E:*:*:*)
- echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- CRAY*SV1:*:*:*)
- echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- *:UNICOS/mp:*:*)
- echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
- FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
- FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
- FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
- echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
- exit ;;
- 5000:UNIX_System_V:4.*:*)
- FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
- FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
- echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
- exit ;;
- i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
- echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
- exit ;;
- sparc*:BSD/OS:*:*)
- echo sparc-unknown-bsdi${UNAME_RELEASE}
- exit ;;
- *:BSD/OS:*:*)
- echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
- exit ;;
- *:FreeBSD:*:*)
- case ${UNAME_MACHINE} in
- pc98)
- echo i386-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
- amd64)
- echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
- *)
- echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
- esac
- exit ;;
- i*:CYGWIN*:*)
- echo ${UNAME_MACHINE}-pc-cygwin
- exit ;;
- *:MINGW*:*)
- echo ${UNAME_MACHINE}-pc-mingw32
- exit ;;
- i*:windows32*:*)
- # uname -m includes "-pc" on this system.
- echo ${UNAME_MACHINE}-mingw32
- exit ;;
- i*:PW*:*)
- echo ${UNAME_MACHINE}-pc-pw32
- exit ;;
- *:Interix*:[3456]*)
- case ${UNAME_MACHINE} in
- x86)
- echo i586-pc-interix${UNAME_RELEASE}
- exit ;;
- EM64T | authenticamd)
- echo x86_64-unknown-interix${UNAME_RELEASE}
- exit ;;
- esac ;;
- [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
- echo i${UNAME_MACHINE}-pc-mks
- exit ;;
- i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
- # How do we know it's Interix rather than the generic POSIX subsystem?
- # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
- # UNAME_MACHINE based on the output of uname instead of i386?
- echo i586-pc-interix
- exit ;;
- i*:UWIN*:*)
- echo ${UNAME_MACHINE}-pc-uwin
- exit ;;
- amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
- echo x86_64-unknown-cygwin
- exit ;;
- p*:CYGWIN*:*)
- echo powerpcle-unknown-cygwin
- exit ;;
- prep*:SunOS:5.*:*)
- echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- *:GNU:*:*)
- # the GNU system
- echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
- exit ;;
- *:GNU/*:*:*)
- # other systems with GNU libc and userland
- echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu
- exit ;;
- i*86:Minix:*:*)
- echo ${UNAME_MACHINE}-pc-minix
- exit ;;
- arm*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- avr32*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- cris:Linux:*:*)
- echo cris-axis-linux-gnu
- exit ;;
- crisv32:Linux:*:*)
- echo crisv32-axis-linux-gnu
- exit ;;
- frv:Linux:*:*)
- echo frv-unknown-linux-gnu
- exit ;;
- ia64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- m32r*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- m68*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- mips:Linux:*:*)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #undef CPU
- #undef mips
- #undef mipsel
- #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
- CPU=mipsel
- #else
- #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
- CPU=mips
- #else
- CPU=
- #endif
- #endif
-EOF
- eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
- /^CPU/{
- s: ::g
- p
- }'`"
- test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
- ;;
- mips64:Linux:*:*)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #undef CPU
- #undef mips64
- #undef mips64el
- #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
- CPU=mips64el
- #else
- #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
- CPU=mips64
- #else
- CPU=
- #endif
- #endif
-EOF
- eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
- /^CPU/{
- s: ::g
- p
- }'`"
- test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
- ;;
- or32:Linux:*:*)
- echo or32-unknown-linux-gnu
- exit ;;
- ppc:Linux:*:*)
- echo powerpc-unknown-linux-gnu
- exit ;;
- ppc64:Linux:*:*)
- echo powerpc64-unknown-linux-gnu
- exit ;;
- alpha:Linux:*:*)
- case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
- EV5) UNAME_MACHINE=alphaev5 ;;
- EV56) UNAME_MACHINE=alphaev56 ;;
- PCA56) UNAME_MACHINE=alphapca56 ;;
- PCA57) UNAME_MACHINE=alphapca56 ;;
- EV6) UNAME_MACHINE=alphaev6 ;;
- EV67) UNAME_MACHINE=alphaev67 ;;
- EV68*) UNAME_MACHINE=alphaev68 ;;
- esac
- objdump --private-headers /bin/sh | grep ld.so.1 >/dev/null
- if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
- echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
- exit ;;
- parisc:Linux:*:* | hppa:Linux:*:*)
- # Look for CPU level
- case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
- PA7*) echo hppa1.1-unknown-linux-gnu ;;
- PA8*) echo hppa2.0-unknown-linux-gnu ;;
- *) echo hppa-unknown-linux-gnu ;;
- esac
- exit ;;
- parisc64:Linux:*:* | hppa64:Linux:*:*)
- echo hppa64-unknown-linux-gnu
- exit ;;
- s390:Linux:*:* | s390x:Linux:*:*)
- echo ${UNAME_MACHINE}-ibm-linux
- exit ;;
- sh64*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- sh*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- sparc:Linux:*:* | sparc64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- tile:Linux:*:*)
- echo tile-unknown-linux-gnu
- exit ;;
- vax:Linux:*:*)
- echo ${UNAME_MACHINE}-dec-linux-gnu
- exit ;;
- x86_64:Linux:*:*)
- echo x86_64-unknown-linux-gnu
- exit ;;
- xtensa:Linux:*:*)
- echo xtensa-unknown-linux-gnu
- exit ;;
- i*86:Linux:*:*)
- # The BFD linker knows what the default object file format is, so
- # first see if it will tell us. cd to the root directory to prevent
- # problems with other programs or directories called `ld' in the path.
- # Set LC_ALL=C to ensure ld outputs messages in English.
- ld_supported_targets=`cd /; LC_ALL=C ld --help 2>&1 \
- | sed -ne '/supported targets:/!d
- s/[ ][ ]*/ /g
- s/.*supported targets: *//
- s/ .*//
- p'`
- case "$ld_supported_targets" in
- elf32-i386)
- TENTATIVE="${UNAME_MACHINE}-pc-linux-gnu"
- ;;
- a.out-i386-linux)
- echo "${UNAME_MACHINE}-pc-linux-gnuaout"
- exit ;;
- coff-i386)
- echo "${UNAME_MACHINE}-pc-linux-gnucoff"
- exit ;;
- "")
- # Either a pre-BFD a.out linker (linux-gnuoldld) or
- # one that does not give us useful --help.
- echo "${UNAME_MACHINE}-pc-linux-gnuoldld"
- exit ;;
- esac
- # Determine whether the default compiler is a.out or elf
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #include <features.h>
- #ifdef __ELF__
- # ifdef __GLIBC__
- # if __GLIBC__ >= 2
- LIBC=gnu
- # else
- LIBC=gnulibc1
- # endif
- # else
- LIBC=gnulibc1
- # endif
- #else
- #if defined(__INTEL_COMPILER) || defined(__PGI) || defined(__SUNPRO_C) || defined(__SUNPRO_CC)
- LIBC=gnu
- #else
- LIBC=gnuaout
- #endif
- #endif
- #ifdef __dietlibc__
- LIBC=dietlibc
- #endif
-EOF
- eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
- /^LIBC/{
- s: ::g
- p
- }'`"
- test x"${LIBC}" != x && {
- echo "${UNAME_MACHINE}-pc-linux-${LIBC}"
- exit
- }
- test x"${TENTATIVE}" != x && { echo "${TENTATIVE}"; exit; }
- ;;
- i*86:DYNIX/ptx:4*:*)
- # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
- # earlier versions are messed up and put the nodename in both
- # sysname and nodename.
- echo i386-sequent-sysv4
- exit ;;
- i*86:UNIX_SV:4.2MP:2.*)
- # Unixware is an offshoot of SVR4, but it has its own version
- # number series starting with 2...
- # I am not positive that other SVR4 systems won't match this,
- # I just have to hope. -- rms.
- # Use sysv4.2uw... so that sysv4* matches it.
- echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
- exit ;;
- i*86:OS/2:*:*)
- # If we were able to find `uname', then EMX Unix compatibility
- # is probably installed.
- echo ${UNAME_MACHINE}-pc-os2-emx
- exit ;;
- i*86:XTS-300:*:STOP)
- echo ${UNAME_MACHINE}-unknown-stop
- exit ;;
- i*86:atheos:*:*)
- echo ${UNAME_MACHINE}-unknown-atheos
- exit ;;
- i*86:syllable:*:*)
- echo ${UNAME_MACHINE}-pc-syllable
- exit ;;
- i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.0*:*)
- echo i386-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- i*86:*DOS:*:*)
- echo ${UNAME_MACHINE}-pc-msdosdjgpp
- exit ;;
- i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
- UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
- if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
- echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
- else
- echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
- fi
- exit ;;
- i*86:*:5:[678]*)
- # UnixWare 7.x, OpenUNIX and OpenServer 6.
- case `/bin/uname -X | grep "^Machine"` in
- *486*) UNAME_MACHINE=i486 ;;
- *Pentium) UNAME_MACHINE=i586 ;;
- *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
- esac
- echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
- exit ;;
- i*86:*:3.2:*)
- if test -f /usr/options/cb.name; then
- UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
- echo ${UNAME_MACHINE}-pc-isc$UNAME_REL
- elif /bin/uname -X 2>/dev/null >/dev/null ; then
- UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
- (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
- (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
- && UNAME_MACHINE=i586
- (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
- && UNAME_MACHINE=i686
- (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
- && UNAME_MACHINE=i686
- echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
- else
- echo ${UNAME_MACHINE}-pc-sysv32
- fi
- exit ;;
- pc:*:*:*)
- # Left here for compatibility:
- # uname -m prints for DJGPP always 'pc', but it prints nothing about
- # the processor, so we play safe by assuming i386.
- echo i386-pc-msdosdjgpp
- exit ;;
- Intel:Mach:3*:*)
- echo i386-pc-mach3
- exit ;;
- paragon:*:*:*)
- echo i860-intel-osf1
- exit ;;
- i860:*:4.*:*) # i860-SVR4
- if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
- echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
- else # Add other i860-SVR4 vendors below as they are discovered.
- echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4
- fi
- exit ;;
- mini*:CTIX:SYS*5:*)
- # "miniframe"
- echo m68010-convergent-sysv
- exit ;;
- mc68k:UNIX:SYSTEM5:3.51m)
- echo m68k-convergent-sysv
- exit ;;
- M680?0:D-NIX:5.3:*)
- echo m68k-diab-dnix
- exit ;;
- M68*:*:R3V[5678]*:*)
- test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
- 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
- OS_REL=''
- test -r /etc/.relid \
- && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
- /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
- && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
- /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
- && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
- 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
- /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
- && { echo i486-ncr-sysv4; exit; } ;;
- m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
- echo m68k-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- mc68030:UNIX_System_V:4.*:*)
- echo m68k-atari-sysv4
- exit ;;
- TSUNAMI:LynxOS:2.*:*)
- echo sparc-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- rs6000:LynxOS:2.*:*)
- echo rs6000-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.0*:*)
- echo powerpc-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- SM[BE]S:UNIX_SV:*:*)
- echo mips-dde-sysv${UNAME_RELEASE}
- exit ;;
- RM*:ReliantUNIX-*:*:*)
- echo mips-sni-sysv4
- exit ;;
- RM*:SINIX-*:*:*)
- echo mips-sni-sysv4
- exit ;;
- *:SINIX-*:*:*)
- if uname -p 2>/dev/null >/dev/null ; then
- UNAME_MACHINE=`(uname -p) 2>/dev/null`
- echo ${UNAME_MACHINE}-sni-sysv4
- else
- echo ns32k-sni-sysv
- fi
- exit ;;
- PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
- echo i586-unisys-sysv4
- exit ;;
- *:UNIX_System_V:4*:FTX*)
- # From Gerald Hewes <[email protected]>.
- # How about differentiating between stratus architectures? -djm
- echo hppa1.1-stratus-sysv4
- exit ;;
- *:*:*:FTX*)
- echo i860-stratus-sysv4
- exit ;;
- i*86:VOS:*:*)
- echo ${UNAME_MACHINE}-stratus-vos
- exit ;;
- *:VOS:*:*)
- echo hppa1.1-stratus-vos
- exit ;;
- mc68*:A/UX:*:*)
- echo m68k-apple-aux${UNAME_RELEASE}
- exit ;;
- news*:NEWS-OS:6*:*)
- echo mips-sony-newsos6
- exit ;;
- R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
- if [ -d /usr/nec ]; then
- echo mips-nec-sysv${UNAME_RELEASE}
- else
- echo mips-unknown-sysv${UNAME_RELEASE}
- fi
- exit ;;
- BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
- echo powerpc-be-beos
- exit ;;
- BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only.
- echo powerpc-apple-beos
- exit ;;
- BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
- echo i586-pc-beos
- exit ;;
- SX-4:SUPER-UX:*:*)
- echo sx4-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-5:SUPER-UX:*:*)
- echo sx5-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-6:SUPER-UX:*:*)
- echo sx6-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-7:SUPER-UX:*:*)
- echo sx7-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-8:SUPER-UX:*:*)
- echo sx8-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-8R:SUPER-UX:*:*)
- echo sx8r-nec-superux${UNAME_RELEASE}
- exit ;;
- Power*:Rhapsody:*:*)
- echo powerpc-apple-rhapsody${UNAME_RELEASE}
- exit ;;
- *:Rhapsody:*:*)
- echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
- exit ;;
- *:Darwin:*:*)
- UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
- case $UNAME_PROCESSOR in
- unknown) UNAME_PROCESSOR=powerpc ;;
- esac
- echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
- exit ;;
- *:procnto*:*:* | *:QNX:[0123456789]*:*)
- UNAME_PROCESSOR=`uname -p`
- if test "$UNAME_PROCESSOR" = "x86"; then
- UNAME_PROCESSOR=i386
- UNAME_MACHINE=pc
- fi
- echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
- exit ;;
- *:QNX:*:4*)
- echo i386-pc-qnx
- exit ;;
- NSE-?:NONSTOP_KERNEL:*:*)
- echo nse-tandem-nsk${UNAME_RELEASE}
- exit ;;
- NSR-?:NONSTOP_KERNEL:*:*)
- echo nsr-tandem-nsk${UNAME_RELEASE}
- exit ;;
- *:NonStop-UX:*:*)
- echo mips-compaq-nonstopux
- exit ;;
- BS2000:POSIX*:*:*)
- echo bs2000-siemens-sysv
- exit ;;
- DS/*:UNIX_System_V:*:*)
- echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
- exit ;;
- *:Plan9:*:*)
- # "uname -m" is not consistent, so use $cputype instead. 386
- # is converted to i386 for consistency with other x86
- # operating systems.
- if test "$cputype" = "386"; then
- UNAME_MACHINE=i386
- else
- UNAME_MACHINE="$cputype"
- fi
- echo ${UNAME_MACHINE}-unknown-plan9
- exit ;;
- *:TOPS-10:*:*)
- echo pdp10-unknown-tops10
- exit ;;
- *:TENEX:*:*)
- echo pdp10-unknown-tenex
- exit ;;
- KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
- echo pdp10-dec-tops20
- exit ;;
- XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
- echo pdp10-xkl-tops20
- exit ;;
- *:TOPS-20:*:*)
- echo pdp10-unknown-tops20
- exit ;;
- *:ITS:*:*)
- echo pdp10-unknown-its
- exit ;;
- SEI:*:*:SEIUX)
- echo mips-sei-seiux${UNAME_RELEASE}
- exit ;;
- *:DragonFly:*:*)
- echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
- exit ;;
- *:*VMS:*:*)
- UNAME_MACHINE=`(uname -p) 2>/dev/null`
- case "${UNAME_MACHINE}" in
- A*) echo alpha-dec-vms ; exit ;;
- I*) echo ia64-dec-vms ; exit ;;
- V*) echo vax-dec-vms ; exit ;;
- esac ;;
- *:XENIX:*:SysV)
- echo i386-pc-xenix
- exit ;;
- i*86:skyos:*:*)
- echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//'
- exit ;;
- i*86:rdos:*:*)
- echo ${UNAME_MACHINE}-pc-rdos
- exit ;;
-esac
-
-#echo '(No uname command or uname output not recognized.)' 1>&2
-#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2
-
-eval $set_cc_for_build
-cat >$dummy.c <<EOF
-#ifdef _SEQUENT_
-# include <sys/types.h>
-# include <sys/utsname.h>
-#endif
-main ()
-{
-#if defined (sony)
-#if defined (MIPSEB)
- /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed,
- I don't know.... */
- printf ("mips-sony-bsd\n"); exit (0);
-#else
-#include <sys/param.h>
- printf ("m68k-sony-newsos%s\n",
-#ifdef NEWSOS4
- "4"
-#else
- ""
-#endif
- ); exit (0);
-#endif
-#endif
-
-#if defined (__arm) && defined (__acorn) && defined (__unix)
- printf ("arm-acorn-riscix\n"); exit (0);
-#endif
-
-#if defined (hp300) && !defined (hpux)
- printf ("m68k-hp-bsd\n"); exit (0);
-#endif
-
-#if defined (NeXT)
-#if !defined (__ARCHITECTURE__)
-#define __ARCHITECTURE__ "m68k"
-#endif
- int version;
- version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`;
- if (version < 4)
- printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
- else
- printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
- exit (0);
-#endif
-
-#if defined (MULTIMAX) || defined (n16)
-#if defined (UMAXV)
- printf ("ns32k-encore-sysv\n"); exit (0);
-#else
-#if defined (CMU)
- printf ("ns32k-encore-mach\n"); exit (0);
-#else
- printf ("ns32k-encore-bsd\n"); exit (0);
-#endif
-#endif
-#endif
-
-#if defined (__386BSD__)
- printf ("i386-pc-bsd\n"); exit (0);
-#endif
-
-#if defined (sequent)
-#if defined (i386)
- printf ("i386-sequent-dynix\n"); exit (0);
-#endif
-#if defined (ns32000)
- printf ("ns32k-sequent-dynix\n"); exit (0);
-#endif
-#endif
-
-#if defined (_SEQUENT_)
- struct utsname un;
-
- uname(&un);
-
- if (strncmp(un.version, "V2", 2) == 0) {
- printf ("i386-sequent-ptx2\n"); exit (0);
- }
- if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
- printf ("i386-sequent-ptx1\n"); exit (0);
- }
- printf ("i386-sequent-ptx\n"); exit (0);
-
-#endif
-
-#if defined (vax)
-# if !defined (ultrix)
-# include <sys/param.h>
-# if defined (BSD)
-# if BSD == 43
- printf ("vax-dec-bsd4.3\n"); exit (0);
-# else
-# if BSD == 199006
- printf ("vax-dec-bsd4.3reno\n"); exit (0);
-# else
- printf ("vax-dec-bsd\n"); exit (0);
-# endif
-# endif
-# else
- printf ("vax-dec-bsd\n"); exit (0);
-# endif
-# else
- printf ("vax-dec-ultrix\n"); exit (0);
-# endif
-#endif
-
-#if defined (alliant) && defined (i860)
- printf ("i860-alliant-bsd\n"); exit (0);
-#endif
-
- exit (1);
-}
-EOF
-
-$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` &&
- { echo "$SYSTEM_NAME"; exit; }
-
-# Apollos put the system type in the environment.
-
-test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; }
-
-# Convex versions that predate uname can use getsysinfo(1)
-
-if [ -x /usr/convex/getsysinfo ]
-then
- case `getsysinfo -f cpu_type` in
- c1*)
- echo c1-convex-bsd
- exit ;;
- c2*)
- if getsysinfo -f scalar_acc
- then echo c32-convex-bsd
- else echo c2-convex-bsd
- fi
- exit ;;
- c34*)
- echo c34-convex-bsd
- exit ;;
- c38*)
- echo c38-convex-bsd
- exit ;;
- c4*)
- echo c4-convex-bsd
- exit ;;
- esac
-fi
-
-cat >&2 <<EOF
-$0: unable to guess system type
-
-This script, last modified $timestamp, has failed to recognize
-the operating system you are using. It is advised that you
-download the most up to date version of the config scripts from
-
- http://savannah.gnu.org/cgi-bin/viewcvs/*checkout*/config/config/config.guess
-and
- http://savannah.gnu.org/cgi-bin/viewcvs/*checkout*/config/config/config.sub
-
-If the version you run ($0) is already up to date, please
-send the following data and any information you think might be
-pertinent to <[email protected]> in order to provide the needed
-information to handle your system.
-
-config.guess timestamp = $timestamp
-
-uname -m = `(uname -m) 2>/dev/null || echo unknown`
-uname -r = `(uname -r) 2>/dev/null || echo unknown`
-uname -s = `(uname -s) 2>/dev/null || echo unknown`
-uname -v = `(uname -v) 2>/dev/null || echo unknown`
-
-/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
-/bin/uname -X = `(/bin/uname -X) 2>/dev/null`
-
-hostinfo = `(hostinfo) 2>/dev/null`
-/bin/universe = `(/bin/universe) 2>/dev/null`
-/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null`
-/bin/arch = `(/bin/arch) 2>/dev/null`
-/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null`
-/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
-
-UNAME_MACHINE = ${UNAME_MACHINE}
-UNAME_RELEASE = ${UNAME_RELEASE}
-UNAME_SYSTEM = ${UNAME_SYSTEM}
-UNAME_VERSION = ${UNAME_VERSION}
-EOF
-
-exit 1
-
-# Local variables:
-# eval: (add-hook 'write-file-hooks 'time-stamp)
-# time-stamp-start: "timestamp='"
-# time-stamp-format: "%:y-%02m-%02d"
-# time-stamp-end: "'"
-# End:
diff --git a/lib/diameter/autoconf/config.sub b/lib/diameter/autoconf/config.sub
deleted file mode 100755
index f43233b104..0000000000
--- a/lib/diameter/autoconf/config.sub
+++ /dev/null
@@ -1,1630 +0,0 @@
-#! /bin/sh
-# Configuration validation subroutine script.
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation,
-# Inc.
-
-timestamp='2007-04-29'
-
-# This file is (in principle) common to ALL GNU software.
-# The presence of a machine in this file suggests that SOME GNU software
-# can handle that machine. It does not imply ALL GNU software can.
-#
-# This file is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
-# 02110-1301, USA.
-#
-# As a special exception to the GNU General Public License, if you
-# distribute this file as part of a program that contains a
-# configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that program.
-
-
-# Please send patches to <[email protected]>. Submit a context
-# diff and a properly formatted ChangeLog entry.
-#
-# Configuration subroutine to validate and canonicalize a configuration type.
-# Supply the specified configuration type as an argument.
-# If it is invalid, we print an error message on stderr and exit with code 1.
-# Otherwise, we print the canonical config type on stdout and succeed.
-
-# This file is supposed to be the same for all GNU packages
-# and recognize all the CPU types, system types and aliases
-# that are meaningful with *any* GNU software.
-# Each package is responsible for reporting which valid configurations
-# it does not support. The user should be able to distinguish
-# a failure to support a valid configuration from a meaningless
-# configuration.
-
-# The goal of this file is to map all the various variations of a given
-# machine specification into a single specification in the form:
-# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
-# or in some cases, the newer four-part form:
-# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
-# It is wrong to echo any other type of specification.
-
-me=`echo "$0" | sed -e 's,.*/,,'`
-
-usage="\
-Usage: $0 [OPTION] CPU-MFR-OPSYS
- $0 [OPTION] ALIAS
-
-Canonicalize a configuration name.
-
-Operation modes:
- -h, --help print this help, then exit
- -t, --time-stamp print date of last modification, then exit
- -v, --version print version number, then exit
-
-Report bugs and patches to <[email protected]>."
-
-version="\
-GNU config.sub ($timestamp)
-
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
-Free Software Foundation, Inc.
-
-This is free software; see the source for copying conditions. There is NO
-warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
-
-help="
-Try \`$me --help' for more information."
-
-# Parse command line
-while test $# -gt 0 ; do
- case $1 in
- --time-stamp | --time* | -t )
- echo "$timestamp" ; exit ;;
- --version | -v )
- echo "$version" ; exit ;;
- --help | --h* | -h )
- echo "$usage"; exit ;;
- -- ) # Stop option processing
- shift; break ;;
- - ) # Use stdin as input.
- break ;;
- -* )
- echo "$me: invalid option $1$help"
- exit 1 ;;
-
- *local*)
- # First pass through any local machine types.
- echo $1
- exit ;;
-
- * )
- break ;;
- esac
-done
-
-case $# in
- 0) echo "$me: missing argument$help" >&2
- exit 1;;
- 1) ;;
- *) echo "$me: too many arguments$help" >&2
- exit 1;;
-esac
-
-# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
-# Here we must recognize all the valid KERNEL-OS combinations.
-maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
-case $maybe_os in
- nto-qnx* | linux-gnu* | linux-dietlibc | linux-newlib* | linux-uclibc* | \
- uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* | \
- storm-chaos* | os2-emx* | rtmk-nova*)
- os=-$maybe_os
- basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
- ;;
- *)
- basic_machine=`echo $1 | sed 's/-[^-]*$//'`
- if [ $basic_machine != $1 ]
- then os=`echo $1 | sed 's/.*-/-/'`
- else os=; fi
- ;;
-esac
-
-### Let's recognize common machines as not being operating systems so
-### that things like config.sub decstation-3100 work. We also
-### recognize some manufacturers as not being operating systems, so we
-### can provide default operating systems below.
-case $os in
- -sun*os*)
- # Prevent following clause from handling this invalid input.
- ;;
- -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
- -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
- -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
- -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
- -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
- -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
- -apple | -axis | -knuth | -cray)
- os=
- basic_machine=$1
- ;;
- -sim | -cisco | -oki | -wec | -winbond)
- os=
- basic_machine=$1
- ;;
- -scout)
- ;;
- -wrs)
- os=-vxworks
- basic_machine=$1
- ;;
- -chorusos*)
- os=-chorusos
- basic_machine=$1
- ;;
- -chorusrdb)
- os=-chorusrdb
- basic_machine=$1
- ;;
- -hiux*)
- os=-hiuxwe2
- ;;
- -sco6)
- os=-sco5v6
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco5)
- os=-sco3.2v5
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco4)
- os=-sco3.2v4
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco3.2.[4-9]*)
- os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco3.2v[4-9]*)
- # Don't forget version if it is 3.2v4 or newer.
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco5v6*)
- # Don't forget version if it is 3.2v4 or newer.
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco*)
- os=-sco3.2v2
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -udk*)
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -isc)
- os=-isc2.2
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -clix*)
- basic_machine=clipper-intergraph
- ;;
- -isc*)
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -lynx*)
- os=-lynxos
- ;;
- -ptx*)
- basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
- ;;
- -windowsnt*)
- os=`echo $os | sed -e 's/windowsnt/winnt/'`
- ;;
- -psos*)
- os=-psos
- ;;
- -mint | -mint[0-9]*)
- basic_machine=m68k-atari
- os=-mint
- ;;
-esac
-
-# Decode aliases for certain CPU-COMPANY combinations.
-case $basic_machine in
- # Recognize the basic CPU types without company name.
- # Some are omitted here because they have special meanings below.
- 1750a | 580 \
- | a29k \
- | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
- | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
- | am33_2.0 \
- | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \
- | bfin \
- | c4x | clipper \
- | d10v | d30v | dlx | dsp16xx \
- | fido | fr30 | frv \
- | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
- | i370 | i860 | i960 | ia64 \
- | ip2k | iq2000 \
- | m32c | m32r | m32rle | m68000 | m68k | m88k \
- | maxq | mb | microblaze | mcore | mep \
- | mips | mipsbe | mipseb | mipsel | mipsle \
- | mips16 \
- | mips64 | mips64el \
- | mips64vr | mips64vrel \
- | mips64orion | mips64orionel \
- | mips64vr4100 | mips64vr4100el \
- | mips64vr4300 | mips64vr4300el \
- | mips64vr5000 | mips64vr5000el \
- | mips64vr5900 | mips64vr5900el \
- | mipsisa32 | mipsisa32el \
- | mipsisa32r2 | mipsisa32r2el \
- | mipsisa64 | mipsisa64el \
- | mipsisa64r2 | mipsisa64r2el \
- | mipsisa64sb1 | mipsisa64sb1el \
- | mipsisa64sr71k | mipsisa64sr71kel \
- | mipstx39 | mipstx39el \
- | mn10200 | mn10300 \
- | mt \
- | msp430 \
- | nios | nios2 \
- | ns16k | ns32k \
- | or32 \
- | pdp10 | pdp11 | pj | pjl \
- | powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \
- | pyramid \
- | score \
- | sh | sh[1234] | sh[24]a | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
- | sh64 | sh64le \
- | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
- | sparcv8 | sparcv9 | sparcv9b | sparcv9v \
- | spu | strongarm \
- | tahoe | thumb | tic4x | tic80 | tron \
- | v850 | v850e \
- | we32k \
- | x86 | xc16x | xscale | xscalee[bl] | xstormy16 | xtensa \
- | z8k)
- basic_machine=$basic_machine-unknown
- ;;
- m6811 | m68hc11 | m6812 | m68hc12)
- # Motorola 68HC11/12.
- basic_machine=$basic_machine-unknown
- os=-none
- ;;
- m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
- ;;
- ms1)
- basic_machine=mt-unknown
- ;;
-
- # We use `pc' rather than `unknown'
- # because (1) that's what they normally are, and
- # (2) the word "unknown" tends to confuse beginning users.
- i*86 | x86_64)
- basic_machine=$basic_machine-pc
- ;;
- # Object if more than one company name word.
- *-*-*)
- echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
- exit 1
- ;;
- # Recognize the basic CPU types with company name.
- 580-* \
- | a29k-* \
- | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
- | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
- | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
- | arm-* | armbe-* | armle-* | armeb-* | armv*-* \
- | avr-* | avr32-* \
- | bfin-* | bs2000-* \
- | c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \
- | clipper-* | craynv-* | cydra-* \
- | d10v-* | d30v-* | dlx-* \
- | elxsi-* \
- | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
- | h8300-* | h8500-* \
- | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
- | i*86-* | i860-* | i960-* | ia64-* \
- | ip2k-* | iq2000-* \
- | m32c-* | m32r-* | m32rle-* \
- | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
- | m88110-* | m88k-* | maxq-* | mcore-* \
- | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
- | mips16-* \
- | mips64-* | mips64el-* \
- | mips64vr-* | mips64vrel-* \
- | mips64orion-* | mips64orionel-* \
- | mips64vr4100-* | mips64vr4100el-* \
- | mips64vr4300-* | mips64vr4300el-* \
- | mips64vr5000-* | mips64vr5000el-* \
- | mips64vr5900-* | mips64vr5900el-* \
- | mipsisa32-* | mipsisa32el-* \
- | mipsisa32r2-* | mipsisa32r2el-* \
- | mipsisa64-* | mipsisa64el-* \
- | mipsisa64r2-* | mipsisa64r2el-* \
- | mipsisa64sb1-* | mipsisa64sb1el-* \
- | mipsisa64sr71k-* | mipsisa64sr71kel-* \
- | mipstx39-* | mipstx39el-* \
- | mmix-* \
- | mt-* \
- | msp430-* \
- | nios-* | nios2-* \
- | none-* | np1-* | ns16k-* | ns32k-* \
- | orion-* \
- | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
- | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \
- | pyramid-* \
- | romp-* | rs6000-* \
- | sh-* | sh[1234]-* | sh[24]a-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
- | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
- | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
- | sparclite-* \
- | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | strongarm-* | sv1-* | sx?-* \
- | tahoe-* | thumb-* \
- | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
- | tron-* \
- | v850-* | v850e-* | vax-* \
- | we32k-* \
- | x86-* | x86_64-* | xc16x-* | xps100-* | xscale-* | xscalee[bl]-* \
- | xstormy16-* | xtensa-* \
- | ymp-* \
- | z8k-*)
- ;;
- # Recognize the various machine names and aliases which stand
- # for a CPU type and a company and sometimes even an OS.
- 386bsd)
- basic_machine=i386-unknown
- os=-bsd
- ;;
- 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
- basic_machine=m68000-att
- ;;
- 3b*)
- basic_machine=we32k-att
- ;;
- a29khif)
- basic_machine=a29k-amd
- os=-udi
- ;;
- abacus)
- basic_machine=abacus-unknown
- ;;
- adobe68k)
- basic_machine=m68010-adobe
- os=-scout
- ;;
- alliant | fx80)
- basic_machine=fx80-alliant
- ;;
- altos | altos3068)
- basic_machine=m68k-altos
- ;;
- am29k)
- basic_machine=a29k-none
- os=-bsd
- ;;
- amd64)
- basic_machine=x86_64-pc
- ;;
- amd64-*)
- basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- amdahl)
- basic_machine=580-amdahl
- os=-sysv
- ;;
- amiga | amiga-*)
- basic_machine=m68k-unknown
- ;;
- amigaos | amigados)
- basic_machine=m68k-unknown
- os=-amigaos
- ;;
- amigaunix | amix)
- basic_machine=m68k-unknown
- os=-sysv4
- ;;
- apollo68)
- basic_machine=m68k-apollo
- os=-sysv
- ;;
- apollo68bsd)
- basic_machine=m68k-apollo
- os=-bsd
- ;;
- aux)
- basic_machine=m68k-apple
- os=-aux
- ;;
- balance)
- basic_machine=ns32k-sequent
- os=-dynix
- ;;
- c90)
- basic_machine=c90-cray
- os=-unicos
- ;;
- convex-c1)
- basic_machine=c1-convex
- os=-bsd
- ;;
- convex-c2)
- basic_machine=c2-convex
- os=-bsd
- ;;
- convex-c32)
- basic_machine=c32-convex
- os=-bsd
- ;;
- convex-c34)
- basic_machine=c34-convex
- os=-bsd
- ;;
- convex-c38)
- basic_machine=c38-convex
- os=-bsd
- ;;
- cray | j90)
- basic_machine=j90-cray
- os=-unicos
- ;;
- craynv)
- basic_machine=craynv-cray
- os=-unicosmp
- ;;
- cr16c)
- basic_machine=cr16c-unknown
- os=-elf
- ;;
- crds | unos)
- basic_machine=m68k-crds
- ;;
- crisv32 | crisv32-* | etraxfs*)
- basic_machine=crisv32-axis
- ;;
- cris | cris-* | etrax*)
- basic_machine=cris-axis
- ;;
- crx)
- basic_machine=crx-unknown
- os=-elf
- ;;
- da30 | da30-*)
- basic_machine=m68k-da30
- ;;
- decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
- basic_machine=mips-dec
- ;;
- decsystem10* | dec10*)
- basic_machine=pdp10-dec
- os=-tops10
- ;;
- decsystem20* | dec20*)
- basic_machine=pdp10-dec
- os=-tops20
- ;;
- delta | 3300 | motorola-3300 | motorola-delta \
- | 3300-motorola | delta-motorola)
- basic_machine=m68k-motorola
- ;;
- delta88)
- basic_machine=m88k-motorola
- os=-sysv3
- ;;
- djgpp)
- basic_machine=i586-pc
- os=-msdosdjgpp
- ;;
- dpx20 | dpx20-*)
- basic_machine=rs6000-bull
- os=-bosx
- ;;
- dpx2* | dpx2*-bull)
- basic_machine=m68k-bull
- os=-sysv3
- ;;
- ebmon29k)
- basic_machine=a29k-amd
- os=-ebmon
- ;;
- elxsi)
- basic_machine=elxsi-elxsi
- os=-bsd
- ;;
- encore | umax | mmax)
- basic_machine=ns32k-encore
- ;;
- es1800 | OSE68k | ose68k | ose | OSE)
- basic_machine=m68k-ericsson
- os=-ose
- ;;
- fx2800)
- basic_machine=i860-alliant
- ;;
- genix)
- basic_machine=ns32k-ns
- ;;
- gmicro)
- basic_machine=tron-gmicro
- os=-sysv
- ;;
- go32)
- basic_machine=i386-pc
- os=-go32
- ;;
- h3050r* | hiux*)
- basic_machine=hppa1.1-hitachi
- os=-hiuxwe2
- ;;
- h8300hms)
- basic_machine=h8300-hitachi
- os=-hms
- ;;
- h8300xray)
- basic_machine=h8300-hitachi
- os=-xray
- ;;
- h8500hms)
- basic_machine=h8500-hitachi
- os=-hms
- ;;
- harris)
- basic_machine=m88k-harris
- os=-sysv3
- ;;
- hp300-*)
- basic_machine=m68k-hp
- ;;
- hp300bsd)
- basic_machine=m68k-hp
- os=-bsd
- ;;
- hp300hpux)
- basic_machine=m68k-hp
- os=-hpux
- ;;
- hp3k9[0-9][0-9] | hp9[0-9][0-9])
- basic_machine=hppa1.0-hp
- ;;
- hp9k2[0-9][0-9] | hp9k31[0-9])
- basic_machine=m68000-hp
- ;;
- hp9k3[2-9][0-9])
- basic_machine=m68k-hp
- ;;
- hp9k6[0-9][0-9] | hp6[0-9][0-9])
- basic_machine=hppa1.0-hp
- ;;
- hp9k7[0-79][0-9] | hp7[0-79][0-9])
- basic_machine=hppa1.1-hp
- ;;
- hp9k78[0-9] | hp78[0-9])
- # FIXME: really hppa2.0-hp
- basic_machine=hppa1.1-hp
- ;;
- hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
- # FIXME: really hppa2.0-hp
- basic_machine=hppa1.1-hp
- ;;
- hp9k8[0-9][13679] | hp8[0-9][13679])
- basic_machine=hppa1.1-hp
- ;;
- hp9k8[0-9][0-9] | hp8[0-9][0-9])
- basic_machine=hppa1.0-hp
- ;;
- hppa-next)
- os=-nextstep3
- ;;
- hppaosf)
- basic_machine=hppa1.1-hp
- os=-osf
- ;;
- hppro)
- basic_machine=hppa1.1-hp
- os=-proelf
- ;;
- i370-ibm* | ibm*)
- basic_machine=i370-ibm
- ;;
-# I'm not sure what "Sysv32" means. Should this be sysv3.2?
- i*86v32)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-sysv32
- ;;
- i*86v4*)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-sysv4
- ;;
- i*86v)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-sysv
- ;;
- i*86sol2)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-solaris2
- ;;
- i386mach)
- basic_machine=i386-mach
- os=-mach
- ;;
- i386-vsta | vsta)
- basic_machine=i386-unknown
- os=-vsta
- ;;
- iris | iris4d)
- basic_machine=mips-sgi
- case $os in
- -irix*)
- ;;
- *)
- os=-irix4
- ;;
- esac
- ;;
- isi68 | isi)
- basic_machine=m68k-isi
- os=-sysv
- ;;
- m88k-omron*)
- basic_machine=m88k-omron
- ;;
- magnum | m3230)
- basic_machine=mips-mips
- os=-sysv
- ;;
- merlin)
- basic_machine=ns32k-utek
- os=-sysv
- ;;
- mingw32)
- basic_machine=i386-pc
- os=-mingw32
- ;;
- mingw32ce)
- basic_machine=arm-unknown
- os=-mingw32ce
- ;;
- miniframe)
- basic_machine=m68000-convergent
- ;;
- *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
- basic_machine=m68k-atari
- os=-mint
- ;;
- mips3*-*)
- basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
- ;;
- mips3*)
- basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
- ;;
- monitor)
- basic_machine=m68k-rom68k
- os=-coff
- ;;
- morphos)
- basic_machine=powerpc-unknown
- os=-morphos
- ;;
- msdos)
- basic_machine=i386-pc
- os=-msdos
- ;;
- ms1-*)
- basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
- ;;
- mvs)
- basic_machine=i370-ibm
- os=-mvs
- ;;
- ncr3000)
- basic_machine=i486-ncr
- os=-sysv4
- ;;
- netbsd386)
- basic_machine=i386-unknown
- os=-netbsd
- ;;
- netwinder)
- basic_machine=armv4l-rebel
- os=-linux
- ;;
- news | news700 | news800 | news900)
- basic_machine=m68k-sony
- os=-newsos
- ;;
- news1000)
- basic_machine=m68030-sony
- os=-newsos
- ;;
- news-3600 | risc-news)
- basic_machine=mips-sony
- os=-newsos
- ;;
- necv70)
- basic_machine=v70-nec
- os=-sysv
- ;;
- next | m*-next )
- basic_machine=m68k-next
- case $os in
- -nextstep* )
- ;;
- -ns2*)
- os=-nextstep2
- ;;
- *)
- os=-nextstep3
- ;;
- esac
- ;;
- nh3000)
- basic_machine=m68k-harris
- os=-cxux
- ;;
- nh[45]000)
- basic_machine=m88k-harris
- os=-cxux
- ;;
- nindy960)
- basic_machine=i960-intel
- os=-nindy
- ;;
- mon960)
- basic_machine=i960-intel
- os=-mon960
- ;;
- nonstopux)
- basic_machine=mips-compaq
- os=-nonstopux
- ;;
- np1)
- basic_machine=np1-gould
- ;;
- nsr-tandem)
- basic_machine=nsr-tandem
- ;;
- op50n-* | op60c-*)
- basic_machine=hppa1.1-oki
- os=-proelf
- ;;
- openrisc | openrisc-*)
- basic_machine=or32-unknown
- ;;
- os400)
- basic_machine=powerpc-ibm
- os=-os400
- ;;
- OSE68000 | ose68000)
- basic_machine=m68000-ericsson
- os=-ose
- ;;
- os68k)
- basic_machine=m68k-none
- os=-os68k
- ;;
- pa-hitachi)
- basic_machine=hppa1.1-hitachi
- os=-hiuxwe2
- ;;
- paragon)
- basic_machine=i860-intel
- os=-osf
- ;;
- pbd)
- basic_machine=sparc-tti
- ;;
- pbb)
- basic_machine=m68k-tti
- ;;
- pc532 | pc532-*)
- basic_machine=ns32k-pc532
- ;;
- pc98)
- basic_machine=i386-pc
- ;;
- pc98-*)
- basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentium | p5 | k5 | k6 | nexgen | viac3)
- basic_machine=i586-pc
- ;;
- pentiumpro | p6 | 6x86 | athlon | athlon_*)
- basic_machine=i686-pc
- ;;
- pentiumii | pentium2 | pentiumiii | pentium3)
- basic_machine=i686-pc
- ;;
- pentium4)
- basic_machine=i786-pc
- ;;
- pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
- basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentiumpro-* | p6-* | 6x86-* | athlon-*)
- basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*)
- basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentium4-*)
- basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pn)
- basic_machine=pn-gould
- ;;
- power) basic_machine=power-ibm
- ;;
- ppc) basic_machine=powerpc-unknown
- ;;
- ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- ppcle | powerpclittle | ppc-le | powerpc-little)
- basic_machine=powerpcle-unknown
- ;;
- ppcle-* | powerpclittle-*)
- basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- ppc64) basic_machine=powerpc64-unknown
- ;;
- ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- ppc64le | powerpc64little | ppc64-le | powerpc64-little)
- basic_machine=powerpc64le-unknown
- ;;
- ppc64le-* | powerpc64little-*)
- basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- ps2)
- basic_machine=i386-ibm
- ;;
- pw32)
- basic_machine=i586-unknown
- os=-pw32
- ;;
- rdos)
- basic_machine=i386-pc
- os=-rdos
- ;;
- rom68k)
- basic_machine=m68k-rom68k
- os=-coff
- ;;
- rm[46]00)
- basic_machine=mips-siemens
- ;;
- rtpc | rtpc-*)
- basic_machine=romp-ibm
- ;;
- s390 | s390-*)
- basic_machine=s390-ibm
- ;;
- s390x | s390x-*)
- basic_machine=s390x-ibm
- ;;
- sa29200)
- basic_machine=a29k-amd
- os=-udi
- ;;
- sb1)
- basic_machine=mipsisa64sb1-unknown
- ;;
- sb1el)
- basic_machine=mipsisa64sb1el-unknown
- ;;
- sde)
- basic_machine=mipsisa32-sde
- os=-elf
- ;;
- sei)
- basic_machine=mips-sei
- os=-seiux
- ;;
- sequent)
- basic_machine=i386-sequent
- ;;
- sh)
- basic_machine=sh-hitachi
- os=-hms
- ;;
- sh5el)
- basic_machine=sh5le-unknown
- ;;
- sh64)
- basic_machine=sh64-unknown
- ;;
- sparclite-wrs | simso-wrs)
- basic_machine=sparclite-wrs
- os=-vxworks
- ;;
- sps7)
- basic_machine=m68k-bull
- os=-sysv2
- ;;
- spur)
- basic_machine=spur-unknown
- ;;
- st2000)
- basic_machine=m68k-tandem
- ;;
- stratus)
- basic_machine=i860-stratus
- os=-sysv4
- ;;
- sun2)
- basic_machine=m68000-sun
- ;;
- sun2os3)
- basic_machine=m68000-sun
- os=-sunos3
- ;;
- sun2os4)
- basic_machine=m68000-sun
- os=-sunos4
- ;;
- sun3os3)
- basic_machine=m68k-sun
- os=-sunos3
- ;;
- sun3os4)
- basic_machine=m68k-sun
- os=-sunos4
- ;;
- sun4os3)
- basic_machine=sparc-sun
- os=-sunos3
- ;;
- sun4os4)
- basic_machine=sparc-sun
- os=-sunos4
- ;;
- sun4sol2)
- basic_machine=sparc-sun
- os=-solaris2
- ;;
- sun3 | sun3-*)
- basic_machine=m68k-sun
- ;;
- sun4)
- basic_machine=sparc-sun
- ;;
- sun386 | sun386i | roadrunner)
- basic_machine=i386-sun
- ;;
- sv1)
- basic_machine=sv1-cray
- os=-unicos
- ;;
- symmetry)
- basic_machine=i386-sequent
- os=-dynix
- ;;
- t3e)
- basic_machine=alphaev5-cray
- os=-unicos
- ;;
- t90)
- basic_machine=t90-cray
- os=-unicos
- ;;
- tic54x | c54x*)
- basic_machine=tic54x-unknown
- os=-coff
- ;;
- tic55x | c55x*)
- basic_machine=tic55x-unknown
- os=-coff
- ;;
- tic6x | c6x*)
- basic_machine=tic6x-unknown
- os=-coff
- ;;
- tx39)
- basic_machine=mipstx39-unknown
- ;;
- tx39el)
- basic_machine=mipstx39el-unknown
- ;;
- tile*)
- basic_machine=tile-tilera
- os=-linux-gnu
- ;;
- toad1)
- basic_machine=pdp10-xkl
- os=-tops20
- ;;
- tower | tower-32)
- basic_machine=m68k-ncr
- ;;
- tpf)
- basic_machine=s390x-ibm
- os=-tpf
- ;;
- udi29k)
- basic_machine=a29k-amd
- os=-udi
- ;;
- ultra3)
- basic_machine=a29k-nyu
- os=-sym1
- ;;
- v810 | necv810)
- basic_machine=v810-nec
- os=-none
- ;;
- vaxv)
- basic_machine=vax-dec
- os=-sysv
- ;;
- vms)
- basic_machine=vax-dec
- os=-vms
- ;;
- vpp*|vx|vx-*)
- basic_machine=f301-fujitsu
- ;;
- vxworks960)
- basic_machine=i960-wrs
- os=-vxworks
- ;;
- vxworks68)
- basic_machine=m68k-wrs
- os=-vxworks
- ;;
- vxworks29k)
- basic_machine=a29k-wrs
- os=-vxworks
- ;;
- w65*)
- basic_machine=w65-wdc
- os=-none
- ;;
- w89k-*)
- basic_machine=hppa1.1-winbond
- os=-proelf
- ;;
- xbox)
- basic_machine=i686-pc
- os=-mingw32
- ;;
- xps | xps100)
- basic_machine=xps100-honeywell
- ;;
- ymp)
- basic_machine=ymp-cray
- os=-unicos
- ;;
- z8k-*-coff)
- basic_machine=z8k-unknown
- os=-sim
- ;;
- none)
- basic_machine=none-none
- os=-none
- ;;
-
-# Here we handle the default manufacturer of certain CPU types. It is in
-# some cases the only manufacturer, in others, it is the most popular.
- w89k)
- basic_machine=hppa1.1-winbond
- ;;
- op50n)
- basic_machine=hppa1.1-oki
- ;;
- op60c)
- basic_machine=hppa1.1-oki
- ;;
- romp)
- basic_machine=romp-ibm
- ;;
- mmix)
- basic_machine=mmix-knuth
- ;;
- rs6000)
- basic_machine=rs6000-ibm
- ;;
- vax)
- basic_machine=vax-dec
- ;;
- pdp10)
- # there are many clones, so DEC is not a safe bet
- basic_machine=pdp10-unknown
- ;;
- pdp11)
- basic_machine=pdp11-dec
- ;;
- we32k)
- basic_machine=we32k-att
- ;;
- sh[1234] | sh[24]a | sh[34]eb | sh[1234]le | sh[23]ele)
- basic_machine=sh-unknown
- ;;
- sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
- basic_machine=sparc-sun
- ;;
- cydra)
- basic_machine=cydra-cydrome
- ;;
- orion)
- basic_machine=orion-highlevel
- ;;
- orion105)
- basic_machine=clipper-highlevel
- ;;
- mac | mpw | mac-mpw)
- basic_machine=m68k-apple
- ;;
- pmac | pmac-mpw)
- basic_machine=powerpc-apple
- ;;
- *-unknown)
- # Make sure to match an already-canonicalized machine name.
- ;;
- *)
- echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
- exit 1
- ;;
-esac
-
-# Here we canonicalize certain aliases for manufacturers.
-case $basic_machine in
- *-digital*)
- basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
- ;;
- *-commodore*)
- basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
- ;;
- *)
- ;;
-esac
-
-# Decode manufacturer-specific aliases for certain operating systems.
-
-if [ x"$os" != x"" ]
-then
-case $os in
- # First match some system type aliases
- # that might get confused with valid system types.
- # -solaris* is a basic system type, with this one exception.
- -solaris1 | -solaris1.*)
- os=`echo $os | sed -e 's|solaris1|sunos4|'`
- ;;
- -solaris)
- os=-solaris2
- ;;
- -svr4*)
- os=-sysv4
- ;;
- -unixware*)
- os=-sysv4.2uw
- ;;
- -gnu/linux*)
- os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
- ;;
- # First accept the basic system types.
- # The portable systems comes first.
- # Each alternative MUST END IN A *, to match a version number.
- # -sysv* is not here because it comes later, after sysvr4.
- -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
- | -*vms* | -sco* | -esix* | -isc* | -aix* | -sunos | -sunos[34]*\
- | -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \
- | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
- | -aos* \
- | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
- | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
- | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
- | -openbsd* | -solidbsd* \
- | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
- | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
- | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
- | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
- | -chorusos* | -chorusrdb* \
- | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
- | -mingw32* | -linux-gnu* | -linux-newlib* | -linux-uclibc* \
- | -uxpv* | -beos* | -mpeix* | -udk* \
- | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
- | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
- | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
- | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
- | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
- | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
- | -skyos* | -haiku* | -rdos* | -toppers* | -drops*)
- # Remember, each alternative MUST END IN *, to match a version number.
- ;;
- -qnx*)
- case $basic_machine in
- x86-* | i*86-*)
- ;;
- *)
- os=-nto$os
- ;;
- esac
- ;;
- -nto-qnx*)
- ;;
- -nto*)
- os=`echo $os | sed -e 's|nto|nto-qnx|'`
- ;;
- -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
- | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \
- | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
- ;;
- -mac*)
- os=`echo $os | sed -e 's|mac|macos|'`
- ;;
- -linux-dietlibc)
- os=-linux-dietlibc
- ;;
- -linux*)
- os=`echo $os | sed -e 's|linux|linux-gnu|'`
- ;;
- -sunos5*)
- os=`echo $os | sed -e 's|sunos5|solaris2|'`
- ;;
- -sunos6*)
- os=`echo $os | sed -e 's|sunos6|solaris3|'`
- ;;
- -opened*)
- os=-openedition
- ;;
- -os400*)
- os=-os400
- ;;
- -wince*)
- os=-wince
- ;;
- -osfrose*)
- os=-osfrose
- ;;
- -osf*)
- os=-osf
- ;;
- -utek*)
- os=-bsd
- ;;
- -dynix*)
- os=-bsd
- ;;
- -acis*)
- os=-aos
- ;;
- -atheos*)
- os=-atheos
- ;;
- -syllable*)
- os=-syllable
- ;;
- -386bsd)
- os=-bsd
- ;;
- -ctix* | -uts*)
- os=-sysv
- ;;
- -nova*)
- os=-rtmk-nova
- ;;
- -ns2 )
- os=-nextstep2
- ;;
- -nsk*)
- os=-nsk
- ;;
- # Preserve the version number of sinix5.
- -sinix5.*)
- os=`echo $os | sed -e 's|sinix|sysv|'`
- ;;
- -sinix*)
- os=-sysv4
- ;;
- -tpf*)
- os=-tpf
- ;;
- -triton*)
- os=-sysv3
- ;;
- -oss*)
- os=-sysv3
- ;;
- -svr4)
- os=-sysv4
- ;;
- -svr3)
- os=-sysv3
- ;;
- -sysvr4)
- os=-sysv4
- ;;
- # This must come after -sysvr4.
- -sysv*)
- ;;
- -ose*)
- os=-ose
- ;;
- -es1800*)
- os=-ose
- ;;
- -xenix)
- os=-xenix
- ;;
- -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
- os=-mint
- ;;
- -aros*)
- os=-aros
- ;;
- -kaos*)
- os=-kaos
- ;;
- -zvmoe)
- os=-zvmoe
- ;;
- -none)
- ;;
- *)
- # Get rid of the `-' at the beginning of $os.
- os=`echo $os | sed 's/[^-]*-//'`
- echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
- exit 1
- ;;
-esac
-else
-
-# Here we handle the default operating systems that come with various machines.
-# The value should be what the vendor currently ships out the door with their
-# machine or put another way, the most popular os provided with the machine.
-
-# Note that if you're going to try to match "-MANUFACTURER" here (say,
-# "-sun"), then you have to tell the case statement up towards the top
-# that MANUFACTURER isn't an operating system. Otherwise, code above
-# will signal an error saying that MANUFACTURER isn't an operating
-# system, and we'll never get to this point.
-
-case $basic_machine in
- score-*)
- os=-elf
- ;;
- spu-*)
- os=-elf
- ;;
- *-acorn)
- os=-riscix1.2
- ;;
- arm*-rebel)
- os=-linux
- ;;
- arm*-semi)
- os=-aout
- ;;
- c4x-* | tic4x-*)
- os=-coff
- ;;
- # This must come before the *-dec entry.
- pdp10-*)
- os=-tops20
- ;;
- pdp11-*)
- os=-none
- ;;
- *-dec | vax-*)
- os=-ultrix4.2
- ;;
- m68*-apollo)
- os=-domain
- ;;
- i386-sun)
- os=-sunos4.0.2
- ;;
- m68000-sun)
- os=-sunos3
- # This also exists in the configure program, but was not the
- # default.
- # os=-sunos4
- ;;
- m68*-cisco)
- os=-aout
- ;;
- mep-*)
- os=-elf
- ;;
- mips*-cisco)
- os=-elf
- ;;
- mips*-*)
- os=-elf
- ;;
- or32-*)
- os=-coff
- ;;
- *-tti) # must be before sparc entry or we get the wrong os.
- os=-sysv3
- ;;
- sparc-* | *-sun)
- os=-sunos4.1.1
- ;;
- *-be)
- os=-beos
- ;;
- *-haiku)
- os=-haiku
- ;;
- *-ibm)
- os=-aix
- ;;
- *-knuth)
- os=-mmixware
- ;;
- *-wec)
- os=-proelf
- ;;
- *-winbond)
- os=-proelf
- ;;
- *-oki)
- os=-proelf
- ;;
- *-hp)
- os=-hpux
- ;;
- *-hitachi)
- os=-hiux
- ;;
- i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
- os=-sysv
- ;;
- *-cbm)
- os=-amigaos
- ;;
- *-dg)
- os=-dgux
- ;;
- *-dolphin)
- os=-sysv3
- ;;
- m68k-ccur)
- os=-rtu
- ;;
- m88k-omron*)
- os=-luna
- ;;
- *-next )
- os=-nextstep
- ;;
- *-sequent)
- os=-ptx
- ;;
- *-crds)
- os=-unos
- ;;
- *-ns)
- os=-genix
- ;;
- i370-*)
- os=-mvs
- ;;
- *-next)
- os=-nextstep3
- ;;
- *-gould)
- os=-sysv
- ;;
- *-highlevel)
- os=-bsd
- ;;
- *-encore)
- os=-bsd
- ;;
- *-sgi)
- os=-irix
- ;;
- *-siemens)
- os=-sysv4
- ;;
- *-masscomp)
- os=-rtu
- ;;
- f30[01]-fujitsu | f700-fujitsu)
- os=-uxpv
- ;;
- *-rom68k)
- os=-coff
- ;;
- *-*bug)
- os=-coff
- ;;
- *-apple)
- os=-macos
- ;;
- *-atari*)
- os=-mint
- ;;
- *)
- os=-none
- ;;
-esac
-fi
-
-# Here we handle the case where we know the os, and the CPU type, but not the
-# manufacturer. We pick the logical manufacturer.
-vendor=unknown
-case $basic_machine in
- *-unknown)
- case $os in
- -riscix*)
- vendor=acorn
- ;;
- -sunos*)
- vendor=sun
- ;;
- -aix*)
- vendor=ibm
- ;;
- -beos*)
- vendor=be
- ;;
- -hpux*)
- vendor=hp
- ;;
- -mpeix*)
- vendor=hp
- ;;
- -hiux*)
- vendor=hitachi
- ;;
- -unos*)
- vendor=crds
- ;;
- -dgux*)
- vendor=dg
- ;;
- -luna*)
- vendor=omron
- ;;
- -genix*)
- vendor=ns
- ;;
- -mvs* | -opened*)
- vendor=ibm
- ;;
- -os400*)
- vendor=ibm
- ;;
- -ptx*)
- vendor=sequent
- ;;
- -tpf*)
- vendor=ibm
- ;;
- -vxsim* | -vxworks* | -windiss*)
- vendor=wrs
- ;;
- -aux*)
- vendor=apple
- ;;
- -hms*)
- vendor=hitachi
- ;;
- -mpw* | -macos*)
- vendor=apple
- ;;
- -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
- vendor=atari
- ;;
- -vos*)
- vendor=stratus
- ;;
- esac
- basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
- ;;
-esac
-
-echo $basic_machine$os
-exit
-
-# Local variables:
-# eval: (add-hook 'write-file-hooks 'time-stamp)
-# time-stamp-start: "timestamp='"
-# time-stamp-format: "%:y-%02m-%02d"
-# time-stamp-end: "'"
-# End:
diff --git a/lib/diameter/autoconf/install-sh b/lib/diameter/autoconf/install-sh
deleted file mode 100755
index a5897de6ea..0000000000
--- a/lib/diameter/autoconf/install-sh
+++ /dev/null
@@ -1,519 +0,0 @@
-#!/bin/sh
-# install - install a program, script, or datafile
-
-scriptversion=2006-12-25.00
-
-# This originates from X11R5 (mit/util/scripts/install.sh), which was
-# later released in X11R6 (xc/config/util/install.sh) with the
-# following copyright and license.
-#
-# Copyright (C) 1994 X Consortium
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to
-# deal in the Software without restriction, including without limitation the
-# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-# sell copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
-# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC-
-# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-#
-# Except as contained in this notice, the name of the X Consortium shall not
-# be used in advertising or otherwise to promote the sale, use or other deal-
-# ings in this Software without prior written authorization from the X Consor-
-# tium.
-#
-#
-# FSF changes to this file are in the public domain.
-#
-# Calling this script install-sh is preferred over install.sh, to prevent
-# `make' implicit rules from creating a file called install from it
-# when there is no Makefile.
-#
-# This script is compatible with the BSD install script, but was written
-# from scratch.
-
-nl='
-'
-IFS=" "" $nl"
-
-# set DOITPROG to echo to test this script
-
-# Don't use :- since 4.3BSD and earlier shells don't like it.
-doit=${DOITPROG-}
-if test -z "$doit"; then
- doit_exec=exec
-else
- doit_exec=$doit
-fi
-
-# Put in absolute file names if you don't have them in your path;
-# or use environment vars.
-
-chgrpprog=${CHGRPPROG-chgrp}
-chmodprog=${CHMODPROG-chmod}
-chownprog=${CHOWNPROG-chown}
-cmpprog=${CMPPROG-cmp}
-cpprog=${CPPROG-cp}
-mkdirprog=${MKDIRPROG-mkdir}
-mvprog=${MVPROG-mv}
-rmprog=${RMPROG-rm}
-stripprog=${STRIPPROG-strip}
-
-posix_glob='?'
-initialize_posix_glob='
- test "$posix_glob" != "?" || {
- if (set -f) 2>/dev/null; then
- posix_glob=
- else
- posix_glob=:
- fi
- }
-'
-
-posix_mkdir=
-
-# Desired mode of installed file.
-mode=0755
-
-chgrpcmd=
-chmodcmd=$chmodprog
-chowncmd=
-mvcmd=$mvprog
-rmcmd="$rmprog -f"
-stripcmd=
-
-src=
-dst=
-dir_arg=
-dst_arg=
-
-copy_on_change=false
-no_target_directory=
-
-usage="\
-Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE
- or: $0 [OPTION]... SRCFILES... DIRECTORY
- or: $0 [OPTION]... -t DIRECTORY SRCFILES...
- or: $0 [OPTION]... -d DIRECTORIES...
-
-In the 1st form, copy SRCFILE to DSTFILE.
-In the 2nd and 3rd, copy all SRCFILES to DIRECTORY.
-In the 4th, create DIRECTORIES.
-
-Options:
- --help display this help and exit.
- --version display version info and exit.
-
- -c (ignored)
- -C install only if different (preserve the last data modification time)
- -d create directories instead of installing files.
- -g GROUP $chgrpprog installed files to GROUP.
- -m MODE $chmodprog installed files to MODE.
- -o USER $chownprog installed files to USER.
- -s $stripprog installed files.
- -t DIRECTORY install into DIRECTORY.
- -T report an error if DSTFILE is a directory.
-
-Environment variables override the default commands:
- CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG
- RMPROG STRIPPROG
-"
-
-while test $# -ne 0; do
- case $1 in
- -c) ;;
-
- -C) copy_on_change=true;;
-
- -d) dir_arg=true;;
-
- -g) chgrpcmd="$chgrpprog $2"
- shift;;
-
- --help) echo "$usage"; exit $?;;
-
- -m) mode=$2
- case $mode in
- *' '* | *' '* | *'
-'* | *'*'* | *'?'* | *'['*)
- echo "$0: invalid mode: $mode" >&2
- exit 1;;
- esac
- shift;;
-
- -o) chowncmd="$chownprog $2"
- shift;;
-
- -s) stripcmd=$stripprog;;
-
- -t) dst_arg=$2
- shift;;
-
- -T) no_target_directory=true;;
-
- --version) echo "$0 $scriptversion"; exit $?;;
-
- --) shift
- break;;
-
- -*) echo "$0: invalid option: $1" >&2
- exit 1;;
-
- *) break;;
- esac
- shift
-done
-
-if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then
- # When -d is used, all remaining arguments are directories to create.
- # When -t is used, the destination is already specified.
- # Otherwise, the last argument is the destination. Remove it from $@.
- for arg
- do
- if test -n "$dst_arg"; then
- # $@ is not empty: it contains at least $arg.
- set fnord "$@" "$dst_arg"
- shift # fnord
- fi
- shift # arg
- dst_arg=$arg
- done
-fi
-
-if test $# -eq 0; then
- if test -z "$dir_arg"; then
- echo "$0: no input file specified." >&2
- exit 1
- fi
- # It's OK to call `install-sh -d' without argument.
- # This can happen when creating conditional directories.
- exit 0
-fi
-
-if test -z "$dir_arg"; then
- trap '(exit $?); exit' 1 2 13 15
-
- # Set umask so as not to create temps with too-generous modes.
- # However, 'strip' requires both read and write access to temps.
- case $mode in
- # Optimize common cases.
- *644) cp_umask=133;;
- *755) cp_umask=22;;
-
- *[0-7])
- if test -z "$stripcmd"; then
- u_plus_rw=
- else
- u_plus_rw='% 200'
- fi
- cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;;
- *)
- if test -z "$stripcmd"; then
- u_plus_rw=
- else
- u_plus_rw=,u+rw
- fi
- cp_umask=$mode$u_plus_rw;;
- esac
-fi
-
-for src
-do
- # Protect names starting with `-'.
- case $src in
- -*) src=./$src;;
- esac
-
- if test -n "$dir_arg"; then
- dst=$src
- dstdir=$dst
- test -d "$dstdir"
- dstdir_status=$?
- else
-
- # Waiting for this to be detected by the "$cpprog $src $dsttmp" command
- # might cause directories to be created, which would be especially bad
- # if $src (and thus $dsttmp) contains '*'.
- if test ! -f "$src" && test ! -d "$src"; then
- echo "$0: $src does not exist." >&2
- exit 1
- fi
-
- if test -z "$dst_arg"; then
- echo "$0: no destination specified." >&2
- exit 1
- fi
-
- dst=$dst_arg
- # Protect names starting with `-'.
- case $dst in
- -*) dst=./$dst;;
- esac
-
- # If destination is a directory, append the input filename; won't work
- # if double slashes aren't ignored.
- if test -d "$dst"; then
- if test -n "$no_target_directory"; then
- echo "$0: $dst_arg: Is a directory" >&2
- exit 1
- fi
- dstdir=$dst
- dst=$dstdir/`basename "$src"`
- dstdir_status=0
- else
- # Prefer dirname, but fall back on a substitute if dirname fails.
- dstdir=`
- (dirname "$dst") 2>/dev/null ||
- expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
- X"$dst" : 'X\(//\)[^/]' \| \
- X"$dst" : 'X\(//\)$' \| \
- X"$dst" : 'X\(/\)' \| . 2>/dev/null ||
- echo X"$dst" |
- sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
- s//\1/
- q
- }
- /^X\(\/\/\)[^/].*/{
- s//\1/
- q
- }
- /^X\(\/\/\)$/{
- s//\1/
- q
- }
- /^X\(\/\).*/{
- s//\1/
- q
- }
- s/.*/./; q'
- `
-
- test -d "$dstdir"
- dstdir_status=$?
- fi
- fi
-
- obsolete_mkdir_used=false
-
- if test $dstdir_status != 0; then
- case $posix_mkdir in
- '')
- # Create intermediate dirs using mode 755 as modified by the umask.
- # This is like FreeBSD 'install' as of 1997-10-28.
- umask=`umask`
- case $stripcmd.$umask in
- # Optimize common cases.
- *[2367][2367]) mkdir_umask=$umask;;
- .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;;
-
- *[0-7])
- mkdir_umask=`expr $umask + 22 \
- - $umask % 100 % 40 + $umask % 20 \
- - $umask % 10 % 4 + $umask % 2
- `;;
- *) mkdir_umask=$umask,go-w;;
- esac
-
- # With -d, create the new directory with the user-specified mode.
- # Otherwise, rely on $mkdir_umask.
- if test -n "$dir_arg"; then
- mkdir_mode=-m$mode
- else
- mkdir_mode=
- fi
-
- posix_mkdir=false
- case $umask in
- *[123567][0-7][0-7])
- # POSIX mkdir -p sets u+wx bits regardless of umask, which
- # is incompatible with FreeBSD 'install' when (umask & 300) != 0.
- ;;
- *)
- tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$
- trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0
-
- if (umask $mkdir_umask &&
- exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1
- then
- if test -z "$dir_arg" || {
- # Check for POSIX incompatibilities with -m.
- # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or
- # other-writeable bit of parent directory when it shouldn't.
- # FreeBSD 6.1 mkdir -m -p sets mode of existing directory.
- ls_ld_tmpdir=`ls -ld "$tmpdir"`
- case $ls_ld_tmpdir in
- d????-?r-*) different_mode=700;;
- d????-?--*) different_mode=755;;
- *) false;;
- esac &&
- $mkdirprog -m$different_mode -p -- "$tmpdir" && {
- ls_ld_tmpdir_1=`ls -ld "$tmpdir"`
- test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1"
- }
- }
- then posix_mkdir=:
- fi
- rmdir "$tmpdir/d" "$tmpdir"
- else
- # Remove any dirs left behind by ancient mkdir implementations.
- rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null
- fi
- trap '' 0;;
- esac;;
- esac
-
- if
- $posix_mkdir && (
- umask $mkdir_umask &&
- $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir"
- )
- then :
- else
-
- # The umask is ridiculous, or mkdir does not conform to POSIX,
- # or it failed possibly due to a race condition. Create the
- # directory the slow way, step by step, checking for races as we go.
-
- case $dstdir in
- /*) prefix='/';;
- -*) prefix='./';;
- *) prefix='';;
- esac
-
- eval "$initialize_posix_glob"
-
- oIFS=$IFS
- IFS=/
- $posix_glob set -f
- set fnord $dstdir
- shift
- $posix_glob set +f
- IFS=$oIFS
-
- prefixes=
-
- for d
- do
- test -z "$d" && continue
-
- prefix=$prefix$d
- if test -d "$prefix"; then
- prefixes=
- else
- if $posix_mkdir; then
- (umask=$mkdir_umask &&
- $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break
- # Don't fail if two instances are running concurrently.
- test -d "$prefix" || exit 1
- else
- case $prefix in
- *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;;
- *) qprefix=$prefix;;
- esac
- prefixes="$prefixes '$qprefix'"
- fi
- fi
- prefix=$prefix/
- done
-
- if test -n "$prefixes"; then
- # Don't fail if two instances are running concurrently.
- (umask $mkdir_umask &&
- eval "\$doit_exec \$mkdirprog $prefixes") ||
- test -d "$dstdir" || exit 1
- obsolete_mkdir_used=true
- fi
- fi
- fi
-
- if test -n "$dir_arg"; then
- { test -z "$chowncmd" || $doit $chowncmd "$dst"; } &&
- { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } &&
- { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false ||
- test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1
- else
-
- # Make a couple of temp file names in the proper directory.
- dsttmp=$dstdir/_inst.$$_
- rmtmp=$dstdir/_rm.$$_
-
- # Trap to clean up those temp files at exit.
- trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0
-
- # Copy the file name to the temp name.
- (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") &&
-
- # and set any options; do chmod last to preserve setuid bits.
- #
- # If any of these fail, we abort the whole thing. If we want to
- # ignore errors from any of these, just make sure not to ignore
- # errors from the above "$doit $cpprog $src $dsttmp" command.
- #
- { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } &&
- { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } &&
- { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } &&
- { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } &&
-
- # If -C, don't bother to copy if it wouldn't change the file.
- if $copy_on_change &&
- old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` &&
- new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` &&
-
- eval "$initialize_posix_glob" &&
- $posix_glob set -f &&
- set X $old && old=:$2:$4:$5:$6 &&
- set X $new && new=:$2:$4:$5:$6 &&
- $posix_glob set +f &&
-
- test "$old" = "$new" &&
- $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1
- then
- rm -f "$dsttmp"
- else
- # Rename the file to the real destination.
- $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null ||
-
- # The rename failed, perhaps because mv can't rename something else
- # to itself, or perhaps because mv is so ancient that it does not
- # support -f.
- {
- # Now remove or move aside any old file at destination location.
- # We try this two ways since rm can't unlink itself on some
- # systems and the destination file might be busy for other
- # reasons. In this case, the final cleanup might fail but the new
- # file should still install successfully.
- {
- test ! -f "$dst" ||
- $doit $rmcmd -f "$dst" 2>/dev/null ||
- { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null &&
- { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; }
- } ||
- { echo "$0: cannot unlink or rename $dst" >&2
- (exit 1); exit 1
- }
- } &&
-
- # Now rename the file to the real destination.
- $doit $mvcmd "$dsttmp" "$dst"
- }
- fi || exit 1
-
- trap '' 0
- fi
-done
-
-# Local variables:
-# eval: (add-hook 'write-file-hooks 'time-stamp)
-# time-stamp-start: "scriptversion="
-# time-stamp-format: "%:y-%02m-%02d.%02H"
-# time-stamp-end: "$"
-# End:
diff --git a/lib/diameter/configure.in b/lib/diameter/configure.in
deleted file mode 100644
index 8acfb28fed..0000000000
--- a/lib/diameter/configure.in
+++ /dev/null
@@ -1,137 +0,0 @@
-dnl Process this file with autoconf to produce a configure script.
-
-dnl %CopyrightBegin%
-dnl
-dnl Copyright Ericsson AB 1998-2011. All Rights Reserved.
-dnl
-dnl The contents of this file are subject to the Erlang Public License,
-dnl Version 1.1, (the "License"); you may not use this file except in
-dnl compliance with the License. You should have received a copy of the
-dnl Erlang Public License along with this software. If not, it can be
-dnl retrieved online at http://www.erlang.org/.
-dnl
-dnl Software distributed under the License is distributed on an "AS IS"
-dnl basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-dnl the License for the specific language governing rights and limitations
-dnl under the License.
-dnl
-dnl %CopyrightEnd%
-
-if test "x$no_recursion" != "xyes" -a "x$OVERRIDE_CONFIG_CACHE" = "x"; then
- # We do not want to use a common cache!
- cache_file=/dev/null
-fi
-
-AC_INIT(vsn.mk)
-
-dnl <STANDALONE DIAMETER>
-dnl This is needed for diameters own environment to rock,
-dnl but since we are now integrated into OTP, we skip it.
-dnl In order to build stand-alone we need atleast 2.63...
-dnl AC_PREREQ(2.63)
-dnl </STANDALONE DIAMETER>
-
-dnl LM_PRECIOUS_VARS
-
-
-dnl The OTP source tree is the default "top",
-dnl but we can also define our own top: DIAMETER_TOP
-
-if test -n "$ERL_TOP" || test -d $ERL_TOP ; then
- erl_top=${ERL_TOP}
- AC_CONFIG_AUX_DIRS($erl_top/erts/autoconf)
- DIAMETER_TOP=${ERL_TOP}/lib/diameter
-else
- AC_ARG_VAR(DIAMETER_TOP, [Diameter top source directory])
- if test -n "$DIAMETER_TOP" || test -d $DIAMETER_TOP ; then
- AC_CONFIG_AUX_DIRS(autoconf)
- fi
-
- dnl <STANDALONE DIAMETER>
- dnl AC_ERLANG_SUBST_ROOT_DIR
- dnl AC_ERLANG_SUBST_LIB_DIR
- dnl AC_ERLANG_CHECK_LIB([erl_docgen],
- dnl [echo "erl_docgen version \"$ERLANG_LIB_VER_erl_docgen\""
- dnl echo "is installed in \"$ERLANG_LIB_DIR_erl_docgen\""],
- dnl [AC_MSG_ERROR([erl_docgen was not found!])])
- dnl AC_ERLANG_CHECK_LIB([test_server],
- dnl [echo "test_server version \"$ERLANG_LIB_VER_test_server\""
- dnl echo "is installed in \"$ERLANG_LIB_DIR_test_server\""],
- dnl [AC_MSG_ERROR([test_server was not found!])])
- dnl </STANDALONE DIAMETER>
-
-fi
-
-AC_SUBST(DIAMETER_TOP)
-export DIAMETER_TOP
-
-if test "X$host" != "Xfree_source" -a "X$host" != "Xwin32"; then
- AC_CANONICAL_HOST
-fi
-
-TARGET=$host
-AC_SUBST(TARGET)
-
-if test "x$erl_top" = "x"; then
- dnl STANDALONE DIAMETER
- AC_CHECK_PROGS(XSLTPROC, xsltproc)
- if test -z "$XSLTPROC"; then
- echo "xsltproc" >> doc/CONF_INFO
- AC_MSG_WARN([No 'xsltproc' command found: the documentation can not be built])
- fi
-
- AC_CHECK_PROGS(FOP, fop)
- if test -z "$FOP"; then
- AC_MSG_ERROR([No 'fop' command found: the documentation can not be built])
- fi
-fi
-
-dnl
-dnl We can live with Solaris /usr/ucb/install
-dnl
-case $host in
- *-*-solaris*|free_source)
- if test -x /usr/ucb/install; then
- INSTALL="/usr/ucb/install -c"
- fi
- ;;
- *)
- ;;
-esac
-AC_PROG_INSTALL
-LM_PROG_INSTALL_DIR
-
-case $host_os in
- darwin*)
- dnl Need to preserve modification time on archives;
- dnl otherwise, ranlib has to be run on archives
- dnl again after installation.
- INSTALL_DATA="$INSTALL_DATA -p";;
- *)
- ;;
-esac
-
-dnl
-dnl Fix for Tilera install permissions
-dnl
-
-case $build in
- *tile*)
- INSTALL_PROGRAM="$INSTALL_PROGRAM -m755"
- INSTALL_SCRIPT="$INSTALL_SCRIPT -m755"
- ;;
- *)
- ;;
-esac
-
-
-dnl <STANDALONE DIAMETER>
-dnl AC_ERLANG_NEED_ERL([$PATH])
-dnl AC_ERLANG_NEED_ERLC([$PATH])
-dnl </STANDALONE DIAMETER>
-
-AC_OUTPUT(
- Makefile:Makefile.in
- make/$host/rules.mk:make/rules.mk.in
- )
-
diff --git a/lib/diameter/doc/.gitignore b/lib/diameter/doc/.gitignore
new file mode 100644
index 0000000000..b634bdd7ba
--- /dev/null
+++ b/lib/diameter/doc/.gitignore
@@ -0,0 +1,4 @@
+
+/html/
+/man*/
+/pdf/
diff --git a/lib/diameter/doc/src/.gitignore b/lib/diameter/doc/src/.gitignore
index feeb378fd8..5776e1cc76 100644
--- a/lib/diameter/doc/src/.gitignore
+++ b/lib/diameter/doc/src/.gitignore
@@ -1,2 +1,3 @@
/depend.mk
+/seehere.ent
diff --git a/lib/diameter/doc/src/Makefile b/lib/diameter/doc/src/Makefile
index 59ca660797..8ad38ba0d5 100644
--- a/lib/diameter/doc/src/Makefile
+++ b/lib/diameter/doc/src/Makefile
@@ -16,13 +16,8 @@
#
# %CopyrightEnd%
-ifneq ($(ERL_TOP),)
include $(ERL_TOP)/make/target.mk
include $(ERL_TOP)/make/$(TARGET)/otp.mk
-else
-include $(DIAMETER_TOP)/make/target.mk
-include $(DIAMETER_TOP)/make/$(TARGET)/rules.mk
-endif
include ../../vsn.mk
@@ -39,7 +34,8 @@ XML_REF_FILES = $(XML_REF1_FILES) $(XML_REF3_FILES) $(XML_REF4_FILES)
XML_FILES = $(BOOK_FILES) $(XML_APPLICATION_FILES) \
$(XML_REF_FILES) \
- $(XML_PART_FILES) $(XML_CHAPTER_FILES)
+ $(XML_PART_FILES) $(XML_CHAPTER_FILES) \
+ seealso.ent
INTERNAL_HTML_FILES = $(TECHNICAL_DESCR_FILES:%.xml=$(HTMLDIR)/%.html)
@@ -98,7 +94,7 @@ html: gifs $(HTML_REF_MAN_FILE)
clean clean_docs: clean_pdf clean_html clean_man
rm -f errs core *~
- rm -f depend.mk
+ rm -f depend.mk seehere.ent
clean_pdf:
rm -f $(PDFDIR)/*
@@ -155,11 +151,8 @@ info:
# ----------------------------------------------------
# Release Target
# ----------------------------------------------------
-ifneq ($(ERL_TOP),)
+
include $(ERL_TOP)/make/otp_release_targets.mk
-else
-include $(DIAMETER_TOP)/make/release_targets.mk
-endif
release_docs_spec: $(LOCAL)docs
$(INSTALL_DIR) "$(RELSYSDIR)/doc/pdf"
@@ -178,7 +171,9 @@ release_docs_spec: $(LOCAL)docs
release_spec:
-depend.mk: depend.sed $(XML_REF_FILES) $(XML_CHAPTER_FILES) Makefile
+depend.mk: depend.sed Makefile seealso.ent \
+ $(XML_REF_FILES) $(XML_CHAPTER_FILES)
+ sed -f seehere.sed seealso.ent > seehere.ent
(for f in $(XML_REF_FILES) $(XML_CHAPTER_FILES); do \
sed -f $< $$f | sed "s@%FILE%@`basename $$f .xml`@g"; \
done) \
diff --git a/lib/diameter/doc/src/diameter.xml b/lib/diameter/doc/src/diameter.xml
index b8652a7482..bc42b75c7a 100644
--- a/lib/diameter/doc/src/diameter.xml
+++ b/lib/diameter/doc/src/diameter.xml
@@ -1,5 +1,10 @@
<?xml version="1.0" encoding="latin1" ?>
-<!DOCTYPE erlref SYSTEM "erlref.dtd">
+<!DOCTYPE erlref SYSTEM "erlref.dtd" [
+ <!ENTITY % also SYSTEM "seealso.ent" >
+ <!ENTITY % here SYSTEM "seehere.ent" >
+ %also;
+ %here;
+]>
<erlref>
<header>
@@ -49,15 +54,12 @@ Diameter protocol as defined in RFC 3588.</p>
<p>
Basic usage consists of creating a representation of a
-locally implemented Diameter node and its capabilities with <seealso
-marker="#start_service">start_service/2</seealso>, adding transport
-capability using <seealso
-marker="#add_transport">add_transport/2</seealso> and sending Diameter
-requests and receiving Diameter answers with <seealso
-marker="#call">call/4</seealso>.
+locally implemented Diameter node and its capabilities with
+&start_service;, adding transport capability using
+&add_transport; and sending Diameter
+requests and receiving Diameter answers with &call;.
Incoming Diameter requests are communicated as callbacks to a
-<seealso
-marker="diameter_app">diameter_app(3)</seealso> callback modules as
+&man_app; callback modules as
specified in the service configuration.</p>
<p>
@@ -90,7 +92,7 @@ in this module.</p>
<item>
<p>
Types corresponding to RFC 3588 AVP Data Formats.
-Defined in <seealso marker="diameter_dict#DATA_TYPES">diameter_dict(4)</seealso>.</p>
+Defined in &dict_data_types;.</p>
<marker id="application_alias"/>
</item>
@@ -100,7 +102,7 @@ Defined in <seealso marker="diameter_dict#DATA_TYPES">diameter_dict(4)</seealso>
<p>
A name identifying a Diameter application in
service configuration.
-Passed to <seealso marker="#call">call/4</seealso> when sending requests
+Passed to &call; when sending requests
defined by the application.</p>
<marker id="application_module"/>
@@ -110,23 +112,22 @@ defined by the application.</p>
| [Mod | ExtraArgs]
| #diameter_callback{}</c></tag>
<item>
-<code>
+<pre>
Mod = atom()
ExtraArgs = list()
-</code>
+</pre>
<p>
-A module implementing the callback interface defined in <seealso
-marker="diameter_app">diameter_app(3)</seealso>, along with any
+A module implementing the callback interface defined in &man_app;,
+along with any
extra arguments to be appended to those documented for the interface.
Note that extra arguments specific to an outgoing request can be
-specified to <seealso marker="#call">call/4</seealso>, in which case
+specified to &call;, in which case
those are are appended to any module-specific extra arguments.</p>
<p>
Specifying a <c>#diameter_callback{}</c> record allows individual
-functions to be configured in place of the usual <seealso
-marker="diameter_app">diameter_app(3)</seealso> callbacks.
+functions to be configured in place of the usual &man_app; callbacks.
See that module for details.</p>
<marker id="application_opt"/>
@@ -141,7 +142,7 @@ Has one the following types.</p>
<taglist>
-<tag><c>{alias, <seealso marker="#application_alias">application_alias()</seealso>}</c></tag>
+<tag><c>{alias, &application_alias;}</c></tag>
<item>
<p>
An unique identifier for the application in the scope of the
@@ -156,17 +157,15 @@ unspecified.</p>
The name of an encode/decode module for the Diameter
messages defined by the application.
These modules are generated from a specification file whose format is
-documented in <seealso
-marker="diameter_dict">diameter_dict(4)</seealso>.</p>
+documented in &man_dict;.</p>
</item>
-<tag><c>{module, <seealso marker="#application_module">application_module()</seealso>}</c></tag>
+<tag><c>{module, &application_module;}</c></tag>
<item>
<p>
The callback module with which messages of the Diameter application are
handled.
-See <seealso marker="diameter_app">diameter_app(3)</seealso> for
-the required interface and semantics.</p>
+See &man_app; for the required interface and semantics.</p>
</item>
<tag><c>{state, term()}</c></tag>
@@ -174,7 +173,7 @@ the required interface and semantics.</p>
<p>
The initial callback state.
The prevailing state is passed to some
-<seealso marker="diameter_app">diameter_app(3)</seealso>
+&man_app;
callbacks, which can then return a new state.
Defaults to the value of the <c>alias</c> option if unspecified.</p>
</item>
@@ -182,14 +181,13 @@ Defaults to the value of the <c>alias</c> option if unspecified.</p>
<tag><c>{call_mutates_state, true|false}</c></tag>
<item>
<p>
-Specifies whether or not the <seealso
-marker="diameter_app#pick_peer">pick_peer/4</seealso>
+Specifies whether or not the &app_pick_peer;
application callback can modify the application state,
Defaults to <c>false</c> if unspecified.</p>
<note>
<p>
-<seealso marker="diameter_app#pick_peer">pick_peer</seealso> callbacks
+&app_pick_peer; callbacks
are serialized when these are allowed to modify state, which is a
potential performance bottleneck.
A simple Diameter client may suffer no ill effects from using mutable
@@ -203,10 +201,8 @@ probably avoid it.</p>
<p>
Determines the manner in which incoming answer messages containing
decode errors are handled.
-If <c>callback</c> then errors result in a <seealso
-marker="diameter_app#handle_answer">handle_answer/4</seealso>
-callback in the same fashion as for <seealso
-marker="diameter_app#handle_request">handle_request/3</seealso>, with
+If <c>callback</c> then errors result in a &app_handle_answer;
+callback in the same fashion as for &app_handle_request;, with
errors communicated in the <c>errors</c> field of the
<c>#diameter_packet{}</c> record passed to the callback.
If <c>report</c> then an answer containing errors is discarded
@@ -214,7 +210,7 @@ without a callback and a warning report is written to the log.
If <c>discard</c> then an answer containing errors is silently
discarded without a callback.
In both the <c>report</c> and <c>discard</c> cases the return value
-for the <seealso marker="#call">call/4</seealso> invocation in
+for the &call; invocation in
question is as if a callback had taken place and returned
<c>{error, failure}</c>.</p>
@@ -231,7 +227,7 @@ Defaults to <c>report</c> if unspecified.</p>
<item>
<p>
-Options available to <seealso marker="#call">call/4</seealso> when
+Options available to &call; when
sending an outgoing Diameter request.
Has one of the following types.</p>
@@ -247,18 +243,18 @@ itself.
Multiple options append to the argument list.</p>
</item>
-<tag><c>{filter, <seealso marker="#peer_filter">peer_filter()</seealso>}</c></tag>
+<tag><c>{filter, &peer_filter;}</c></tag>
<item>
<p>
A filter to apply to the list of available peers before passing them to
-the <seealso marker="diameter_app#pick_peer">pick_peer/4</seealso>
+the &app_pick_peer;
callback for the application in question.
Multiple options are equivalent a single <c>all</c> filter on the
corresponding list of filters.
Defaults to <c>none</c>.</p>
</item>
-<tag><c>{timeout, <seealso marker="diameter_dict#DATA_TYPES">Unsigned32()</seealso>}</c></tag>
+<tag><c>{timeout, &dict_Unsigned32;}</c></tag>
<item>
<p>
The number of milliseconds after which the request should
@@ -269,21 +265,17 @@ Defaults to 5000.</p>
<tag><c>detach</c></tag>
<item>
<p>
-Causes <seealso marker="#call">call/4</seealso> to return <c>ok</c> as
+Causes &call; to return <c>ok</c> as
soon as the request in
question has been encoded instead of waiting for and returning
-the result from a subsequent
-<seealso marker="diameter_app#handle_answer">handle_answer/4</seealso>
-or <seealso
-marker="diameter_app#handle_error">handle_error/4</seealso>
-callback.</p>
+the result from a subsequent &app_handle_answer; or
+&app_handle_error; callback.</p>
</item>
</taglist>
<p>
-An invalid option will cause <seealso marker="#call">call/4</seealso>
-to fail.</p>
+An invalid option will cause &call; to fail.</p>
<marker id="capability"/>
</item>
@@ -300,9 +292,9 @@ Has one of the following types.</p>
<taglist>
-<tag><c>{'Origin-Host', <seealso marker="diameter_dict#DATA_TYPES">DiameterIdentity()</seealso>}</c></tag>
-<tag><c>{'Origin-Realm', <seealso marker="diameter_dict#DATA_TYPES">DiameterIdentity()</seealso>}</c></tag>
-<tag><c>{'Host-IP-Address', [<seealso marker="diameter_dict#DATA_TYPES">Address()</seealso>]}</c></tag>
+<tag><c>{'Origin-Host', &dict_DiameterIdentity;}</c></tag>
+<tag><c>{'Origin-Realm', &dict_DiameterIdentity;}</c></tag>
+<tag><c>{'Host-IP-Address', [&dict_Address;]}</c></tag>
<item>
<p>
An address list is available to the start function of a
@@ -312,24 +304,23 @@ Host-IP-Address need not be specified if the transport start function
returns an address list.</p>
</item>
-<tag><c>{'Vendor-Id', <seealso marker="diameter_dict#DATA_TYPES">Unsigned32()</seealso>}</c></tag>
-<tag><c>{'Product-Name', <seealso marker="diameter_dict#DATA_TYPES">UTF8String()</seealso>}</c></tag>
-<tag><c>{'Origin-State-Id', <seealso marker="diameter_dict#DATA_TYPES">Unsigned32()</seealso>}</c></tag>
+<tag><c>{'Vendor-Id', &dict_Unsigned32;}</c></tag>
+<tag><c>{'Product-Name', &dict_UTF8String;}</c></tag>
+<tag><c>{'Origin-State-Id', &dict_Unsigned32;}</c></tag>
<item>
<p>
Origin-State-Id is optional but will be included in outgoing messages
sent by diameter itself: CER/CEA, DWR/DWA and DPR/DPA.
Setting a value of <c>0</c> (zero) is equivalent to not setting a
value as documented in RFC 3588.
-The function <seealso
-marker="#origin_state_id">origin_state_id/0</seealso>
+The function &origin_state_id;
can be used as to retrieve a value that is computed when the diameter
application is started.</p>
</item>
-<tag><c>{'Supported-Vendor-Id', [<seealso marker="diameter_dict#DATA_TYPES">Unsigned32()</seealso>]}</c></tag>
-<tag><c>{'Auth-Application-Id', [<seealso marker="diameter_dict#DATA_TYPES">Unsigned32()</seealso>]}</c></tag>
-<tag><c>{'Inband-Security-Id', [<seealso marker="diameter_dict#DATA_TYPES">Unsigned32()</seealso>]}</c></tag>
+<tag><c>{'Supported-Vendor-Id', [&dict_Unsigned32;]}</c></tag>
+<tag><c>{'Auth-Application-Id', [&dict_Unsigned32;]}</c></tag>
+<tag><c>{'Inband-Security-Id', [&dict_Unsigned32;]}</c></tag>
<item>
<p>
Inband-Security-Id defaults to the empty list, which is equivalent to a
@@ -338,9 +329,9 @@ If 1 (= TLS) is specified then TLS is selected if the CER/CEA received
from the peer offers it.</p>
</item>
-<tag><c>{'Acct-Application-Id', [<seealso marker="diameter_dict#DATA_TYPES">Unsigned32()</seealso>]}</c></tag>
-<tag><c>{'Vendor-Specific-Application-Id', [<seealso marker="diameter_dict#DATA_TYPES">Grouped()</seealso>]}</c></tag>
-<tag><c>{'Firmware-Revision', <seealso marker="diameter_dict#DATA_TYPES">Unsigned32()</seealso>}</c></tag>
+<tag><c>{'Acct-Application-Id', [&dict_Unsigned32;]}</c></tag>
+<tag><c>{'Vendor-Specific-Application-Id', [&dict_Grouped;]}</c></tag>
+<tag><c>{'Firmware-Revision', &dict_Unsigned32;}</c></tag>
</taglist>
@@ -357,7 +348,7 @@ It is an error to specify duplicate tuples.</p>
An expression that can be evaluated as a function in the following
sense.</p>
-<code>
+<pre>
eval([{M,F,A} | T]) ->
apply(M, F, T ++ A);
eval([[F|A] | T]) ->
@@ -366,19 +357,21 @@ eval([F|A]) ->
apply(F, A);
eval(F) ->
eval([F]).
-</code>
+</pre>
<p>
-Applying an <c><seealso marker="#evaluable">evaluable()</seealso></c>
+Applying an <c>&evaluable;</c>
<c>E</c> to an argument list <c>A</c>
is meant in the sense of <c>eval([E|A])</c>.</p>
<warning>
<p>
-Beware of using fun expressions of the form <c>fun Name/Arity</c> (not
-fun Mod:Name/Arity) in situations in which the fun is not short-lived
+Beware of using fun expressions of the form <c>fun Name/Arity</c> in
+situations in which the fun is not short-lived
and code is to be upgraded at runtime since any processes retaining
-such a fun will have a reference to old code.</p>
+such a fun will have a reference to old code.
+In particular, such a value is typically inappropriate in
+configuration passed to &start_service; or &add_transport;.</p>
</warning>
<marker id="peer_filter"/>
@@ -387,10 +380,8 @@ such a fun will have a reference to old code.</p>
<tag><c>peer_filter() = term()</c></tag>
<item>
<p>
-A filter passed to <seealso marker="#call">call/4</seealso>
-in order to select candidate peers for a
-<seealso marker="diameter_app#pick_peer">pick_peer/4</seealso>
-callback.
+A filter passed to &call; in order to select candidate peers for a
+&app_pick_peer; callback.
Has one of the following types.</p>
<taglist>
@@ -421,42 +412,42 @@ or any peer if the request does not contain
a <c>Destination-Realm</c> AVP.</p>
</item>
-<tag><c>{host, any|<seealso marker="diameter_dict#DATA_TYPES">DiameterIdentity()</seealso>}</c></tag>
+<tag><c>{host, any|&dict_DiameterIdentity;}</c></tag>
<item>
<p>
Matches only those peers whose <c>Origin-Host</c> has the
specified value, or all peers if the atom <c>any</c>.</p>
</item>
-<tag><c>{realm, any|<seealso marker="diameter_dict#DATA_TYPES">DiameterIdentity()</seealso></c></tag>
+<tag><c>{realm, any|&dict_DiameterIdentity;</c></tag>
<item>
<p>
Matches only those peers whose <c>Origin-Realm</c> has the
specified value, or all peers if the atom <c>any</c>.</p>
</item>
-<tag><c>{eval, <seealso marker="#evaluable">evaluable()</seealso>}</c></tag>
+<tag><c>{eval, &evaluable;}</c></tag>
<item>
<p>
-Matches only those peers for which the specified <c><seealso
-marker="#evaluable">evaluable()</seealso></c> returns
+Matches only those peers for which the specified
+<c>&evaluable;</c> returns
<c>true</c> on the connection's <c>diameter_caps</c> record.
Any other return value or exception is equivalent to <c>false</c>.</p>
</item>
-<tag><c>{neg, <seealso marker="#peer_filter">peer_filter()</seealso>}</c></tag>
+<tag><c>{neg, &peer_filter;}</c></tag>
<item>
<p>
Matches only those peers not matched by the specified filter.</p>
</item>
-<tag><c>{all, [<seealso marker="#peer_filter">peer_filter()</seealso>]}</c></tag>
+<tag><c>{all, [&peer_filter;]}</c></tag>
<item>
<p>
Matches only those peers matched by each filter in the specified list.</p>
</item>
-<tag><c>{any, [<seealso marker="#peer_filter">peer_filter()</seealso>]}</c></tag>
+<tag><c>{any, [&peer_filter;]}</c></tag>
<item>
<p>
Matches only those peers matched by at least one filter in the
@@ -472,15 +463,12 @@ that matches no peer.</p>
<note>
<p>
The <c>host</c> and <c>realm</c> filters examine the
-outgoing request as passed to <seealso marker="#call">call/4</seealso>,
-assuming that this is a record- or list-valued <c><seealso
-marker="diameter_app#message">diameter_app:message()</seealso></c>,
+outgoing request as passed to &call;,
+assuming that this is a record- or list-valued <c>&app_message;</c>,
and that the message contains at most one of each AVP.
-If this is not the case then the <c>{host|realm, <seealso
-marker="diameter_dict#DATA_TYPES">DiameterIdentity()</seealso>}</c>
+If this is not the case then the <c>{host|realm, &dict_DiameterIdentity;}</c>
filters must be used to achieve the desired result.
-An empty <c><seealso
-marker="diameter_dict#DATA_TYPES">DiameterIdentity()</seealso></c>
+An empty <c>&dict_DiameterIdentity;</c>
(which should not be typical)
matches all hosts/realms for the purposes of filtering.</p>
</note>
@@ -499,7 +487,7 @@ candidates list.</p>
<item>
<p>
An event message sent to processes that have subscribed to these using
-<seealso marker="#subscribe">subscribe/1</seealso>.</p>
+&subscribe;.</p>
<p>
The <c>info</c> field of the event record can have one of the
@@ -507,16 +495,27 @@ following types.</p>
<taglist>
+<tag><c>start</c></tag>
+<tag><c>stop</c></tag>
+
+<item>
+<p>
+The service is being started or stopped.
+No event precedes a <c>start</c> event.
+No event follows a <c>stop</c> event and this event
+implies the termination of all transport processes.</p>
+</item>
+
<tag><c>{up, Ref, Peer, Config, Pkt}</c></tag>
<tag><c>{up, Ref, Peer, Config}</c></tag>
<tag><c>{down, Ref, Peer, Config}</c></tag>
<item>
-<code>
-Ref = <seealso marker="#transport_ref">transport_ref()</seealso>
-Peer = <seealso marker="diameter_app#peer">diameter_app:peer()</seealso>
-Config = {connect|listen, [<seealso marker="#transport_opt">transport_opt()</seealso>]}
+<pre>
+Ref = &transport_ref;
+Peer = &app_peer;
+Config = {connect|listen, [&transport_opt;]}
Pkt = #diameter_packet{}
-</code>
+</pre>
<p>
The RFC 3539 watchdog state machine has
@@ -530,7 +529,8 @@ connectivity.</p>
<p>
Note that a single <c>up</c>/<c>down</c> event for a given peer
-corresponds to one <seealso marker="diameter_app#peer_up">peer_up/peer_down</seealso>
+corresponds to one
+<seealso marker="diameter_app#Mod:peer_up-3">peer_up/peer_down</seealso>
callback for each of the Diameter applications negotiated during
capablilities exchange.
That is, the event communicates connectivity with the
@@ -540,25 +540,23 @@ respect to individual Diameter applications.</p>
<tag><c>{reconnect, Ref, Opts}</c></tag>
<item>
-<code>
-Ref = <seealso marker="#transport_ref">transport_ref()</seealso>
-Opts = [<seealso marker="#transport_opt">transport_opt()</seealso>]
-</code>
+<pre>
+Ref = &transport_ref;
+Opts = [&transport_opt;]
+</pre>
<p>
A connecting transport is attempting to establish/reestablish a
-transport connection with a peer following <seealso
-marker="#reconnect_timer">reconnect_timer</seealso> or
-<seealso marker="#watchdog_timer">watchdog_timer</seealso>
-expiry.</p>
+transport connection with a peer following &reconnect_timer; or
+&watchdog_timer; expiry.</p>
</item>
<tag><c>{closed, Ref, Reason, Config}</c></tag>
<item>
-<code>
-Ref = <seealso marker="#transport_ref">transport_ref()</seealso>
-Config = {connect|listen, [<seealso marker="#transport_opt">transport_opt()</seealso>]}
-</code>
+<pre>
+Ref = &transport_ref;
+Config = {connect|listen, [&transport_opt;]}
+</pre>
<p>
Capabilities exchange has failed.
@@ -568,13 +566,13 @@ Capabilities exchange has failed.
<tag><c>{'CER', Result, Caps, Pkt}</c></tag>
<item>
-<code>
+<pre>
Result = ResultCode | {capabilities_cb, CB, ResultCode|discard}
Caps = #diameter_caps{}
Pkt = #diameter_packet{}
ResultCode = integer()
-CB = <seealso marker="#evaluable">evaluable()</seealso>
-</code>
+CB = &evaluable;
+</pre>
<p>
An incoming CER has been answered with the indicated result code or
@@ -588,11 +586,11 @@ contains the rejecting callback.</p>
<tag><c>{'CER', Caps, {ResultCode, Pkt}}</c></tag>
<item>
-<code>
+<pre>
ResultCode = integer()
Caps = #diameter_caps{}
Pkt = #diameter_packet{}
-</code>
+</pre>
<p>
An incoming CER contained errors and has been answered with the
@@ -601,14 +599,21 @@ indicated result code.
<c>Pkt</c> contains the CER in question.</p>
</item>
+<tag><c>{'CER', timeout}</c></tag>
+<item>
+<p>
+An expected CER was not received within &capx_timeout; of
+connection establishment.</p>
+</item>
+
<tag><c>{'CEA', Result, Caps, Pkt}</c></tag>
<item>
-<code>
+<pre>
Result = integer() | atom() | {capabilities_cb, CB, ResultCode|discard}
Caps = #diameter_caps{}
Pkt = #diameter_packet{}
ResultCode = integer()
-</code>
+</pre>
<p>
An incoming CEA has been rejected for the indicated reason.
@@ -623,10 +628,10 @@ contains the rejecting callback.</p>
<tag><c>{'CEA', Caps, Pkt}</c></tag>
<item>
-<code>
+<pre>
Caps = #diameter_caps{}
Pkt = #diameter_packet{}
-</code>
+</pre>
<p>
An incoming CEA contained errors and has been rejected.
@@ -634,17 +639,24 @@ An incoming CEA contained errors and has been rejected.
<c>Pkt</c> contains the CEA in question.</p>
</item>
+<tag><c>{'CEA', timeout}</c></tag>
+<item>
+<p>
+An expected CEA was not received within &capx_timeout;
+of connection establishment.</p>
+</item>
+
</taglist>
</item>
<tag><c>{watchdog, Ref, PeerRef, {From, To}, Config}</c></tag>
<item>
-<code>
-Ref = <seealso marker="#transport_ref">transport_ref()</seealso>
-PeerRef = <seealso marker="diameter_app#peer_ref">diameter_app:peer_ref()</seealso>
+<pre>
+Ref = &transport_ref;
+PeerRef = &app_peer_ref;
From, To = initial | okay | suspect | down | reopen
Config = {connect|listen, [transport_opt()]}
-</code>
+</pre>
<p>
An RFC 3539 watchdog state machine has changed state.</p>
@@ -662,8 +674,7 @@ info fields of forms other than the above.</p>
<tag><c>service_name() = term()</c></tag>
<item>
<p>
-The name of a service as passed to <seealso
-marker="#start_service">start_service/2</seealso> and with which the
+The name of a service as passed to &start_service; and with which the
service is identified.
There can be at most one service with a given name on a given node.
Note that <seealso marker="erts:erlang#make_ref-0">erlang:make_ref/0</seealso>
@@ -675,91 +686,97 @@ can be used to generate a service name that is somewhat unique.</p>
<tag><c>service_opt()</c></tag>
<item>
<p>
-An option passed to <seealso
-marker="#start_service">start_service/2</seealso>.
-Can be any <c><seealso marker="#capability">capability()</seealso></c> as
-well as the following.</p>
+An option passed to &start_service;.
+Can be any <c>&capability;</c> as well as the following.</p>
<taglist>
-<tag><c>{application, [<seealso marker="#application_opt">application_opt()</seealso>]}</c></tag>
+<tag><c>{application, [&application_opt;]}</c></tag>
<item>
<p>
Defines a Diameter application supported by the service.</p>
<p>
-A service must configure one <c>application</c> for each Diameter
+A service must configure one tuple for each Diameter
application it intends to support.
-For an outgoing Diameter request, the relevant <c><seealso
-marker="#application_alias">application_alias()</seealso></c> is
-passed to <seealso marker="#call">call/4</seealso>, while for an
+For an outgoing Diameter request, the relevant <c>&application_alias;</c> is
+passed to &call;, while for an
incoming request the application identifier in the message
header determines the application, the identifier being specified in
the application's <seealso marker="diameter_dict">dictionary</seealso>
file.</p>
</item>
-</taglist>
-
-<marker id="transport_opt"/>
-</item>
-
-<tag><c>transport_opt()</c></tag>
+<tag><c>{restrict_connections, false
+ | node
+ | nodes
+ | [node()]
+ | evaluable()}</c></tag>
<item>
<p>
-An option passed to <seealso
-marker="#add_transport">add_transport/2</seealso>.
-Has one of the following types.</p>
+Specifies the degree to which multiple transport connections to the
+same peer are accepted by the service.</p>
-<taglist>
-<tag><c>{transport_module, atom()}</c></tag>
-<item>
<p>
-A module implementing a transport process as defined in <seealso
-marker="diameter_transport">diameter_transport(3)</seealso>.
-Defaults to <c>diameter_tcp</c> if unspecified.</p>
+If type <c>[node()]</c> then a connection is rejected if another already
+exists on any of the specified nodes.
+Values of type <c>false</c>, <c>node</c>, <c>nodes</c> or
+&evaluable; are equivalent to
+values <c>[]</c>, <c>[node()]</c>, <c>[node()|nodes()]</c> and the
+evaluated value, respectively, evaluation of each expression taking
+place whenever a new connection is to be established.
+Note that <c>false</c> allows an unlimited number of connections to be
+established with the same peer.</p>
<p>
-Multiple <c>transport_module</c> and <c>transport_config</c>
-options are allowed.
-The order of these is significant in this case (and only in this case),
-a <c>transport_module</c> being paired with the first
-<c>transport_config</c> following it in the options list, or the
-default value for trailing modules.
-Transport starts will be attempted with each of the
-modules in order until one establishes a connection within the
-corresponding timeout (see below) or all fail.</p>
+Multiple connections are independent and governed
+by their own peer and watchdog state machines.</p>
+
+<p>
+Defaults to <c>nodes</c>.</p>
</item>
-<tag><c>{transport_config, term()}</c></tag>
-<tag><c>{transport_config, term(), <seealso marker="diameter_dict#DATA_TYPES">Unsigned32()</seealso>}</c></tag>
+<tag><c>{sequence, {H,N} | &evaluable;}</c></tag>
<item>
<p>
-A term passed as the third argument to the <seealso
-marker="diameter_transport#start">start/3</seealso> function of
-the relevant <c>transport_module</c> in order to start a transport process.
-Defaults to the empty list if unspecified.</p>
-
+Specifies a constant value <c>H</c> for the topmost <c>32-N</c> bits of
+of 32-bit End-to-End and Hop-by-Hop identifiers generated
+by the service, either explicity or as a return value of a function
+to be evaluated at &start_service;.
+In particular, an identifier <c>Id</c> is mapped to a new identifier
+as follows.</p>
+<pre>
+(H bsl N) bor (Id band ((1 bsl N) - 1))
+</pre>
<p>
-The 3-tuple form additionally specifies an interval, in milliseconds,
-after which a started transport process should be terminated if it has
-not yet established a connection.
-For example, the following options on a connecting transport
-request a connection with one peer over SCTP or another
-(typically the same) over TCP.</p>
+Note that RFC 3588 requires that End-to-End identifiers remain unique
+for a period of at least 4 minutes and that this and the call rate
+places a lower bound on the appropriate values of <c>N</c>:
+at a rate of <c>R</c> requests per second an <c>N</c>-bit counter
+traverses all of its values in <c>(1 bsl N) div (R*60)</c> minutes so
+the bound is <c>4*R*60 =&lt; 1 bsl N</c>.</p>
-<code>
-{transport_module, diameter_sctp}
-{transport_config, SctpOpts, 5000}
-{transport_module, diameter_tcp}
-{transport_config, TcpOpts}
-</code>
+<p><c>N</c> must lie in the range <c>0..32</c> and <c>H</c> must be a
+non-negative integer less than <c>1 bsl (32-N)</c>.</p>
<p>
-To listen on both SCTP and TCP, define one transport for each.</p>
+Defaults to <c>{0,32}</c>.</p>
+</item>
+
+</taglist>
+
+<marker id="transport_opt"/>
</item>
-<tag><c>{applications, [<seealso marker="#application_alias">application_alias()</seealso>]}</c></tag>
+<tag><c>transport_opt()</c></tag>
+<item>
+<p>
+An option passed to &add_transport;.
+Has one of the following types.</p>
+
+<taglist>
+<marker id="applications"/>
+<tag><c>{applications, [&application_alias;]}</c></tag>
<item>
<p>
The list of Diameter applications to which the transport should be
@@ -768,7 +785,8 @@ Defaults to all applications configured on the service in question.
Applications not configured on the service in question are ignored.</p>
</item>
-<tag><c>{capabilities, [<seealso marker="#capability">capability()</seealso>]}</c></tag>
+<marker id="capabilities"/>
+<tag><c>{capabilities, [&capability;]}</c></tag>
<item>
<p>
AVP's used to construct outgoing CER/CEA messages.
@@ -778,98 +796,260 @@ question.</p>
<p>
Specifying a capability as a transport option
may be particularly appropriate for Inband-Security-Id, in case
-TLS is desired over TCP as implemented by
-<seealso marker="diameter_tcp">diameter_tcp(3)</seealso>.</p>
+TLS is desired over TCP as implemented by &man_tcp;.</p>
</item>
-<tag><c>{capabilities_cb, <seealso marker="#evaluable">evaluable()</seealso>}</c></tag>
+<marker id="capabilities_cb"/>
+<tag><c>{capabilities_cb, &evaluable;}</c></tag>
<item>
<p>
A callback invoked upon reception of CER/CEA during capabilities
exchange in order to ask whether or not the connection should
be accepted.
-Applied to the relevant <c><seealso
-marker="#transport_ref">transport_ref()</seealso></c> and the
-<c>#diameter_caps{}</c> record of the connection.
-Returning <c>ok</c> accepts the connection.
-Returning <c>integer()</c> causes an incoming
-CER to be answered with the specified Result-Code.
-Returning <c>discard</c> causes an incoming CER to
-be discarded.
-Returning <c>unknown</c> is equivalent to returning <c>3010</c>,
-DIAMETER_UNKNOWN_PEER.
-Returning anything but <c>ok</c> or a 2xxx series result
-code causes the transport connection to be broken.</p>
+Applied to the <c>&transport_ref;</c> and
+<c>#diameter_caps{}</c> record of the connection.</p>
+
+<p>
+The return value can have one of the following types.</p>
+
+<taglist>
+<tag><c>ok</c></tag>
+<item>
+<p>
+Accept the connection.</p>
+</item>
+
+<tag><c>integer()</c></tag>
+<item>
+<p>
+Causes an incoming CER to be answered with the specified Result-Code.</p>
+</item>
+
+<tag><c>discard</c></tag>
+<item>
+<p>
+Causes an incoming CER to be discarded without CEA being sent.</p>
+</item>
+
+<tag><c>unknown</c></tag>
+<item>
+<p>
+Equivalent to returning <c>3010</c>, DIAMETER_UNKNOWN_PEER.</p>
+</item>
+</taglist>
<p>
-Multiple <c>capabilities_cb</c> options can be specified, in which
+Returning anything but <c>ok</c> or a 2xxx series result
+code causes the transport connection to be broken.
+Multiple &capabilities_cb;
+options can be specified, in which
case the corresponding callbacks are applied until either all return
<c>ok</c> or one does not.</p>
+</item>
-<marker id="watchdog_timer"/>
+<marker id="capx_timeout"/>
+<tag><c>{capx_timeout, &dict_Unsigned32;}</c></tag>
+<item>
+<p>
+The number of milliseconds after which a transport process having an
+established transport connection will be terminated if the expected
+capabilities exchange message (CER or CEA) is not received from the peer.
+For a connecting transport, the timing reconnection attempts is
+governed by &watchdog_timer; or
+&reconnect_timer; expiry.
+For a listening transport, the peer determines the timing.</p>
+
+<p>
+Defaults to 10000.</p>
</item>
-<tag><c>{watchdog_timer, TwInit}</c></tag>
+<marker id="disconnect_cb"/>
+<tag><c>{disconnect_cb, &evaluable;}</c></tag>
+
<item>
-<code>
-TwInit = <seealso marker="diameter_dict#DATA_TYPES">Unsigned32()</seealso>
- | {M,F,A}
-</code>
+<p>
+A callback invoked prior to terminating the transport process of a
+transport connection having watchdog state <c>OKAY</c>.
+Applied to <c>Reason=transport|service|application</c> and the
+<c>&transport_ref;</c> and
+<c>&app_peer;</c>
+in question, <c>Reason</c> indicating whether the the diameter
+application is being stopped, the service in question is being stopped
+at &stop_service; or
+the transport in question is being removed at &remove_transport;,
+respectively.</p>
<p>
-The RFC 3539 watchdog timer.
-An integer value is interpreted as the RFC's TwInit in milliseconds,
-a jitter of &plusmn; 2 seconds being added at each rearming of the
-timer to compute the RFC's Tw.
-An MFA is expected to return the RFC's Tw directly, with jitter
-applied, allowing the jitter calculation to be performed by
-the callback.</p>
+The return value can have one of the following types.</p>
+<taglist>
+<tag><c>{dpr, [option()]}</c></tag>
+<item>
<p>
-An integer value must be at least 6000 as required by RFC 3539.
-Defaults to 30000 if unspecified.</p>
+Causes Disconnect-Peer-Request to be sent to the peer, the transport
+process being terminated following reception of
+Disconnect-Peer-Answer or timeout.
+An <c>option()</c> can be one of the following.</p>
-<marker id="reconnect_timer"/>
+<taglist>
+<tag><c>{cause, 0|rebooting|1|busy|2|goaway}</c></tag>
+<item>
+<p>
+The Disconnect-Cause to send, <c>REBOOTING</c>, <c>BUSY</c> and
+<c>DO_NOT_WANT_TO_TALK_TO_YOU</c> respectively.
+Defaults to <c>rebooting</c> for <c>Reason=service|application</c> and
+<c>goaway</c> for <c>Reason=transport</c>.</p>
+</item>
+
+<tag><c>{timeout, &dict_Unsigned32;}</c></tag>
+<item>
+<p>
+The number of milliseconds after which the transport process is
+terminated if DPA has not been received.
+Defaults to 1000.</p>
+</item>
+</taglist>
+</item>
+
+<tag><c>dpr</c></tag>
+<item>
+<p>
+Equivalent to <c>{dpr, []}</c>.</p>
+</item>
+
+<tag><c>close</c></tag>
+<item>
+<p>
+Causes the transport process to be terminated without
+Disconnect-Peer-Request being sent to the peer.</p>
+</item>
+
+<tag><c>ignore</c></tag>
+<item>
+<p>
+Equivalent to not having configured the callback.</p>
+</item>
+</taglist>
+
+<p>
+Multiple &disconnect_cb;
+options can be specified, in which
+case the corresponding callbacks are applied until one of them returns
+a value other than <c>ignore</c>.
+All callbacks returning <c>ignore</c> is equivalent to not having
+configured them.</p>
+
+<p>
+Defaults to a single callback returning <c>dpr</c>.</p>
</item>
+<marker id="reconnect_timer"/>
<tag><c>{reconnect_timer, Tc}</c></tag>
<item>
-<code>
-Tc = <seealso marker="diameter_dict#DATA_TYPES">Unsigned32()</seealso>
-</code>
+<pre>
+Tc = &dict_Unsigned32;
+</pre>
<p>
For a connecting transport, the RFC 3588 Tc timer, in milliseconds.
Note that this timer determines the frequency with which a transport
will attempt to establish a connection with its peer only <em>before</em>
an initial connection is established: once there is an initial
-connection it's watchdog_timer that determines the frequency of
-reconnection attempts, as required by RFC 3539.</p>
+connection it's &watchdog_timer; that determines the
+frequency of reconnection attempts, as required by RFC 3539.</p>
<p>
For a listening transport, the timer specifies the time after which a
previously connected peer will be forgotten: a connection after this time is
regarded as an initial connection rather than a reestablishment,
-causing the RFC 3539 state machine to pass to state OPEN rather than
+causing the RFC 3539 state machine to pass to state OKAY rather than
REOPEN.
-Note that these semantics are not goverened by the RFC and
-that a listening transport's <c>reconnect_timer</c> should be greater
+Note that these semantics are not governed by the RFC and
+that a listening transport's &reconnect_timer; should be greater
than its peer's Tw plus jitter.</p>
<p>
Defaults to 30000 for a connecting transport and 60000 for a listening
transport.</p>
+</item>
+
+<marker id="transport_config"/>
+<tag><c>{transport_config, term()}</c></tag>
+<tag><c>{transport_config, term(), &dict_Unsigned32;}</c></tag>
+<item>
+<p>
+A term passed as the third argument to the &transport_start; function of
+the relevant &transport_module; in order to
+start a transport process.
+Defaults to the empty list if unspecified.</p>
+
+<p>
+The 3-tuple form additionally specifies an interval, in milliseconds,
+after which a started transport process should be terminated if it has
+not yet established a connection.
+For example, the following options on a connecting transport
+request a connection with one peer over SCTP or another
+(typically the same) over TCP.</p>
+
+<pre>
+{transport_module, diameter_sctp}
+{transport_config, SctpOpts, 5000}
+{transport_module, diameter_tcp}
+{transport_config, TcpOpts}
+</pre>
+
+<p>
+To listen on both SCTP and TCP, define one transport for each.</p>
+</item>
+
+<marker id="transport_module"/>
+<tag><c>{transport_module, atom()}</c></tag>
+<item>
+<p>
+A module implementing a transport process as defined in &man_transport;.
+Defaults to <c>diameter_tcp</c> if unspecified.</p>
+
+<p>
+Multiple <c>transport_module</c> and &transport_config;
+options are allowed.
+The order of these is significant in this case (and only in this case),
+a <c>transport_module</c> being paired with the first
+&transport_config;
+following it in the options list, or the default value for trailing
+modules.
+Transport starts will be attempted with each of the
+modules in order until one establishes a connection within the
+corresponding timeout (see below) or all fail.</p>
+</item>
+
+<marker id="watchdog_timer"/>
+<tag><c>{watchdog_timer, TwInit}</c></tag>
+<item>
+<pre>
+TwInit = &dict_Unsigned32;
+ | {M,F,A}
+</pre>
+
+<p>
+The RFC 3539 watchdog timer.
+An integer value is interpreted as the RFC's TwInit in milliseconds,
+a jitter of &plusmn; 2 seconds being added at each rearming of the
+timer to compute the RFC's Tw.
+An MFA is expected to return the RFC's Tw directly, with jitter
+applied, allowing the jitter calculation to be performed by
+the callback.</p>
+<p>
+An integer value must be at least 6000 as required by RFC 3539.
+Defaults to 30000 if unspecified.</p>
</item>
</taglist>
<p>
Unrecognized options are silently ignored but are returned unmodified
-by <seealso
-marker="#service_info">service_info/2</seealso> and can be referred to
-in predicate functions passed to <seealso
-marker="#remove_transport">remove_transport/2</seealso>.</p>
+by &service_info; and can be referred to
+in predicate functions passed to &remove_transport;.</p>
<marker id="transport_ref"/>
</item>
@@ -877,8 +1057,7 @@ marker="#remove_transport">remove_transport/2</seealso>.</p>
<tag><c>transport_ref() = reference()</c></tag>
<item>
<p>
-An reference returned by <seealso
-marker="#add_transport">add_transport/2</seealso> that
+An reference returned by &add_transport; that
identifies the configuration.</p>
</item>
@@ -886,7 +1065,6 @@ identifies the configuration.</p>
</section>
-<marker id="add_transport"/>
<funcs>
<!-- ===================================================================== -->
@@ -896,9 +1074,9 @@ identifies the configuration.</p>
-> {ok, Ref} | {error, Reason}</name>
<fsummary>Add transport capability to a service.</fsummary>
<type>
-<v>SvcName = <seealso marker="#service_name">service_name()</seealso></v>
-<v>Opt = <seealso marker="#transport_opt">transport_opt()</seealso></v>
-<v>Ref = <seealso marker="#transport_ref">transport_ref()</seealso></v>
+<v>SvcName = &service_name;</v>
+<v>Opt = &transport_opt;</v>
+<v>Ref = &transport_ref;</v>
<v>Reason = term()</v>
</type>
<desc>
@@ -917,8 +1095,7 @@ one peer, an listening transport potentially with many.</p>
The diameter application takes responsibility for exchanging
CER/CEA with the peer.
Upon successful completion of capabilities exchange the service
-calls each relevant application module's <seealso
-marker="diameter_app#peer_up">peer_up/3</seealso> callback
+calls each relevant application module's &app_peer_up; callback
after which the caller can exchange Diameter messages with the peer over
the transport.
In addition to CER/CEA, the service takes responsibility for the
@@ -937,7 +1114,6 @@ been configured: a service can be started after configuring
its transports.</p>
</note>
-<marker id="call"/>
</desc>
</func>
@@ -947,11 +1123,11 @@ its transports.</p>
<name>call(SvcName, App, Request, [Opt]) -> Answer | ok | {error, Reason}</name>
<fsummary>Send a Diameter request message.</fsummary>
<type>
-<v>SvcName = <seealso marker="#service_name">service_name()</seealso></v>
-<v>App = <seealso marker="#application_alias">application_alias()</seealso></v>
-<v>Request = <seealso marker="diameter_app#message">diameter_app:message()</seealso></v>
+<v>SvcName = &service_name;</v>
+<v>App = &application_alias;</v>
+<v>Request = &app_message;</v>
<v>Answer = term()</v>
-<v>Opt = <seealso marker="#call_opt">call_opt()</seealso></v>
+<v>Opt = &call_opt;</v>
</type>
<desc>
<p>
@@ -960,37 +1136,29 @@ Send a Diameter request message.</p>
<p>
<c>App</c> specifies the Diameter application in which the request is
defined and callbacks to the corresponding callback module
-will follow as described below and in <seealso
-marker="diameter_app">diameter_app(3)</seealso>.
+will follow as described below and in &man_app;.
Unless the <c>detach</c> option is specified, the call returns either
when an answer message is received from the peer or an error occurs.
In the answer case, the return value is as returned by a
-<seealso
-marker="diameter_app#handle_answer">handle_answer/4</seealso>
-callback.
+&app_handle_answer; callback.
In the error case, whether or not the error is returned directly
-by diameter or from a <seealso
-marker="diameter_app#handle_error">handle_error/4</seealso>
+by diameter or from a &app_handle_error;
callback depends on whether or not the outgoing request is
successfully encoded for transmission to the peer, the cases being
documented below.</p>
<p>
If there are no suitable peers, or if
-<seealso marker="diameter_app#pick_peer">pick_peer/4</seealso>
+&app_pick_peer;
rejects them by returning <c>false</c>, then <c>{error,no_connection}</c>
is returned.
-Otherwise <seealso marker="diameter_app#pick_peer">pick_peer/4</seealso>
-is followed by a
-<seealso
-marker="diameter_app#prepare_request">prepare_request/3</seealso>
-callback, the message is encoded and then sent.</p>
+Otherwise &app_pick_peer; is followed by a
+&app_prepare_request; callback, the message is encoded and then sent.</p>
<p>
There are several error cases which may prevent an
answer from being received and passed to a
-<seealso marker="diameter_app#handle_answer">handle_answer/4</seealso>
-callback:</p>
+&app_handle_answer; callback:</p>
<list>
@@ -1005,16 +1173,14 @@ is returned.</p>
<p>
If the request is successfully encoded and sent but
the answer times out then a
-<seealso marker="diameter_app#handle_error">handle_error/4</seealso>
-callback takes place with <c>Reason = timeout</c>.</p>
+&app_handle_error; callback takes place with <c>Reason = timeout</c>.</p>
</item>
<item>
<p>
If the request is successfully encoded and sent but the service in
question is stopped before an answer is received then a
-<seealso marker="diameter_app#handle_error">handle_error/4</seealso>
-callback takes place with <c>Reason = cancel</c>.</p>
+&app_handle_error; callback takes place with <c>Reason = cancel</c>.</p>
</item>
<item>
@@ -1023,18 +1189,11 @@ If the transport connection with the peer goes down after the request
has been sent but before an answer has been received then an attempt
is made to resend the request to an alternate peer.
If no such peer is available, or if the subsequent
-<seealso marker="diameter_app#pick_peer">pick_peer/4</seealso>
-callback rejects the candidates, then a
-<seealso marker="diameter_app#handle_error">handle_error/4</seealso>
-callback takes place with <c>Reason = failover</c>.
-If a peer is selected then a
-<seealso
-marker="diameter_app#prepare_retransmit">prepare_retransmit/3</seealso>
+&app_pick_peer; callback rejects the candidates, then a
+&app_handle_error; callback takes place with <c>Reason = failover</c>.
+If a peer is selected then a &app_prepare_retransmit;
callback takes place, after which the semantics are the same as
-following an initial
-<seealso marker="diameter_app#prepare_request">
-prepare_request/3</seealso>
-callback.</p>
+following an initial &app_prepare_request; callback.</p>
</item>
<item>
@@ -1061,14 +1220,13 @@ Note that <c>{error,encode}</c> is the only return value which
guarantees that the request has <em>not</em> been sent over the
transport connection.</p>
-<marker id="origin_state_id"/>
</desc>
</func>
<!-- ===================================================================== -->
<func>
-<name>origin_state_id() -> <seealso marker="diameter_dict#DATA_TYPES">Unsigned32()</seealso></name>
+<name>origin_state_id() -> &dict_Unsigned32;</name>
<fsummary>Returns a reasonable Origin-State-Id.</fsummary>
<desc>
<p>
@@ -1077,26 +1235,26 @@ outgoing messages.</p>
<p>
The value returned is the number of seconds since 19680120T031408Z,
-the first value that can be encoded as a Diameter <c><seealso marker="diameter_dict#DATA_TYPES">Time()</seealso></c>,
+the first value that can be encoded as a Diameter <c>&dict_Time;</c>,
at the time the diameter application was started.</p>
-<marker id="remove_transport"/>
</desc>
</func>
<!-- ===================================================================== -->
<func>
-<name>remove_transport(SvcName, Pred) -> ok</name>
+<name>remove_transport(SvcName, Pred) -> ok | {error, Reason}</name>
<fsummary>Remove previously added transports.</fsummary>
<type>
-<v>SvcName = <seealso marker="#service_name">service_name()</seealso></v>
-<v>Pred = Fun | MFA | <seealso marker="#transport_ref">transport_ref()</seealso> | list() | true | false</v>
+<v>SvcName = &service_name;</v>
+<v>Pred = Fun | MFA | &transport_ref; | list() | true | false</v>
<v></v>
-<v>Fun = fun((<seealso marker="#transport_ref">transport_ref()</seealso>, connect|listen, list()) -> boolean())</v>
-<v>&nbsp;&nbsp;&nbsp; | fun((<seealso marker="#transport_ref">transport_ref()</seealso>, list()) -> boolean())</v>
+<v>Fun = fun((&transport_ref;, connect|listen, list()) -> boolean())</v>
+<v>&nbsp;&nbsp;&nbsp; | fun((&transport_ref;, list()) -> boolean())</v>
<v>&nbsp;&nbsp;&nbsp; | fun((list()) -> boolean())</v>
<v>MFA = {atom(), atom(), list()}</v>
+<v>Reason = term()</v>
</type>
<desc>
<p>
@@ -1106,12 +1264,11 @@ Remove previously added transports.</p>
<c>Pred</c> determines which transports to remove.
An arity-3-valued <c>Pred</c> removes all transports for which
<c>Pred(Ref, Type, Opts)</c> returns <c>true</c>, where <c>Type</c> and
-<c>Opts</c> are as passed to <seealso
-marker="#add_transport">add_transport/2</seealso> and <c>Ref</c> is
+<c>Opts</c> are as passed to &add_transport; and <c>Ref</c> is
as returned by it.
The remaining forms are equivalent to an arity-3 fun as follows.</p>
-<code>
+<pre>
Pred = fun(transport_ref(), list()): fun(Ref, _, Opts) -> Pred(Ref, Opts) end
Pred = fun(list()): fun(_, _, Opts) -> Pred(Opts) end
Pred = transport_ref(): fun(Ref, _, _) -> Pred == Ref end
@@ -1119,20 +1276,15 @@ Pred = list(): fun(_, _, Opts) -> [] == Pred -- Opts end
Pred = true: fun(_, _, _) -> true end
Pred = false: fun(_, _, _) -> false end
Pred = {M,F,A}: fun(Ref, Type, Opts) -> apply(M, F, [Ref, Type, Opts | A]) end
-</code>
+</pre>
<p>
-Removing a transport causes all associated transport connections to
-be broken.
-A DPR message with
-Disconnect-Cause <c>DO_NOT_WANT_TO_TALK_TO_YOU</c> will be sent
-to each connected peer before disassociating the transport configuration
-from the service and terminating the transport upon reception of
-DPA or timeout.</p>
-
-<!-- TODO: document the timeout value, possibly make configurable. -->
+Removing a transport causes the corresponding transport processes to
+be terminated.
+Whether or not a DPR message is sent to a peer is
+controlled by value of &disconnect_cb;
+configured on the transport.</p>
-<marker id="service_info"/>
</desc>
</func>
@@ -1142,13 +1294,19 @@ DPA or timeout.</p>
<name>service_info(SvcName, Info) -> term()</name>
<fsummary>Return information about a started service.</fsummary>
<type>
-<v>SvcName = <seealso marker="#service_name">service_name()</seealso></v>
+<v>SvcName = &service_name;</v>
<v>Info = Item | [Info]</v>
<v>Item = atom()</v>
</type>
<desc>
<p>
Return information about a started service.
+Requesting info for an unknown service causes <c>undefined</c> to be
+returned.
+Requesting a list of items causes a tagged list to be
+returned.</p>
+
+<p>
<c>Item</c> can be one of the following.</p>
<taglist>
@@ -1167,15 +1325,13 @@ Return information about a started service.
<tag><c>'Firmware-Revision'</c></tag>
<item>
<p>
-Return a capability value as configured with <seealso
-marker="#start_service">start_service/2</seealso>.</p>
+Return a capability value as configured with &start_service;.</p>
</item>
<tag><c>applications</c></tag>
<item>
<p>
-Return the list of applications as configured with <seealso
-marker="#start_service">start_service/2</seealso>.
+Return the list of applications as configured with &start_service;.
</p>
</item>
@@ -1183,23 +1339,21 @@ marker="#start_service">start_service/2</seealso>.
<item>
<p>
Return a tagged list of all capabilities values as configured with
-<seealso
-marker="#start_service">start_service/2</seealso>.</p>
+&start_service;.</p>
</item>
<tag><c>transport</c></tag>
<item>
<p>
Return a list containing one entry for each of the service's transport
-as configured with <seealso
-marker="#add_transport">add_transport/2</seealso>.
+as configured with &add_transport;.
Each entry is a tagged list containing both configuration and
information about established peer connections.
An example return value with for a client service with Origin-Host
"client.example.com" configured with a single transport connected to
"server.example.com" might look as follows.</p>
-<code>
+<pre>
[[{ref,#Ref&lt;0.0.0.93>},
{type,connect},
{options,[{transport_module,diameter_tcp},
@@ -1244,23 +1398,18 @@ An example return value with for a client service with Origin-Host
{{{0,258,0},recv,{'Result-Code',2001}},3},
{{{0,280,1},recv},2},
{{{0,280,0},send},2}]}]]
-</code>
+</pre>
<p>
-Here <c>ref</c> is a <c><seealso
-marker="#transport_ref">transport_ref()</seealso></c> and <c>options</c>
-the corresponding <c><seealso
-marker="#transport_opt">transport_opt()</seealso></c> list passed to <seealso
-marker="#add_transport">add_transport/2</seealso>.
+Here <c>ref</c> is a <c>&transport_ref;</c> and <c>options</c>
+the corresponding <c>&transport_opt;</c> list passed to
+&add_transport;.
The <c>watchdog</c> entry shows the state of a connection's RFC 3539 watchdog
state machine.
-The <c>peer</c> entry identifies the <c><seealso
-marker="diameter_app#peer_ref">diameter_app:peer_ref()</seealso></c> for
-which there will have been <seealso
-marker="diameter_app#peer_up">peer_up</seealso> callbacks for the
+The <c>peer</c> entry identifies the <c>&app_peer_ref;</c> for
+which there will have been &app_peer_up; callbacks for the
Diameter applications identified by the <c>apps</c> entry,
-<c>common</c> being the <c><seealso
-marker="#application_alias">application_alias()</seealso></c>.
+<c>common</c> being the <c>&application_alias;</c>.
The <c>caps</c> entry identifies the capabilities sent by the local
node and received from the peer during capabilities exchange.
The <c>port</c> entry displays socket-level information about the
@@ -1279,12 +1428,12 @@ during the lifetime of the transport configuration.</p>
<p>
A listening transport presents its information slightly differently
-since there may be multiple accepted connections for the same <c><seealso
-marker="#transport_ref">transport_ref()</seealso></c>.
+since there may be multiple accepted connections for the same
+<c>&transport_ref;</c>.
The <c>transport</c> info returned by a server with a single client
connection might look as follows.</p>
-<code>
+<pre>
[[{ref,#Ref&lt;0.0.0.61>},
{type,listen},
{options,[{transport_module,diameter_tcp},
@@ -1331,7 +1480,7 @@ connection might look as follows.</p>
{{{0,280,0},send},5},
{{{0,257,1},recv},1},
{{{0,257,0},send},1}]}]]
-</code>
+</pre>
<p>
The information presented here is as in the <c>connect</c> case except
@@ -1350,7 +1499,7 @@ connections and for which Diameter-level statistics are accumulated
only for the lifetime of the transport connection.
A return value for the server above might look as follows.</p>
-<code>
+<pre>
[[{ref,#Ref&lt;0.0.0.61>},
{type,accept},
{options,[{transport_module,diameter_tcp},
@@ -1396,7 +1545,7 @@ A return value for the server above might look as follows.</p>
{{{0,280,0},send},66},
{{{0,257,1},recv},1},
{{{0,257,0},send},1}]}]]
-</code>
+</pre>
<p>
Note that there may be multiple entries with the same <c>ref</c>, in
@@ -1407,27 +1556,37 @@ contrast to <c>transport</c> info.</p>
<item>
<p>
Return a <c>{{Counter, Ref}, non_neg_integer()}</c> list of counter values.
-<c>Ref</c> can be either a <c><seealso
-marker="#transport_ref">transport_ref()</seealso></c>
-or a <c><seealso
-marker="diameter_app#peer_ref">diameter_app:peer_ref()</seealso></c>.
+<c>Ref</c> can be either a <c>&transport_ref;</c>
+or a <c>&app_peer_ref;</c>.
Entries for the latter are folded into corresponding entries for the
former as peer connections go down.
-Entries for both are removed at <seealso
-marker="#remove_transport">remove_transport/2</seealso>.
+Entries for both are removed at &remove_transport;.
The Diameter-level statistics returned by <c>transport</c> and
<c>connections</c> info are based upon these entries.</p>
</item>
-</taglist>
-
+<tag><c>&app_peer_ref;</c></tag>
+<item>
<p>
-Requesting info for an unknown service causes <c>undefined</c> to be
-returned.
-Requesting a list of items causes a tagged list to be
-returned.</p>
+Return transport configuration associated with a single peer, as
+passed to &add_transport;.
+The returned list is empty if the peer is unknown.
+Otherwise it contains the <c>ref</c>, <c>type</c> and <c>options</c>
+tuples as in <c>transport</c> and <c>connections</c> info above.
+For example:</p>
+
+<pre>
+[{ref,#Ref&lt;0.0.0.61>},
+ {type,accept},
+ {options,[{transport_module,diameter_tcp},
+ {transport_config,[{reuseaddr,true},
+ {ip,{127,0,0,1}},
+ {port,3868}]}]}]
+</pre>
+</item>
+
+</taglist>
-<marker id="services"/>
</desc>
</func>
@@ -1437,23 +1596,22 @@ returned.</p>
<name>services() -> [SvcName]</name>
<fsummary>Return the list of started services.</fsummary>
<type>
-<v>SvcName = <seealso marker="#service_name">service_name()</seealso></v>
+<v>SvcName = &service_name;</v>
</type>
<desc>
<p>
Return the list of started services.</p>
-<marker id="session_id"/>
</desc>
</func>
<!-- ===================================================================== -->
<func>
-<name>session_id(Ident) -> <seealso marker="diameter_dict#DATA_TYPES">OctetString()</seealso></name>
+<name>session_id(Ident) -> &dict_OctetString;</name>
<fsummary>Return a value for a Session-Id AVP.</fsummary>
<type>
-<v>Ident = <seealso marker="diameter_dict#DATA_TYPES">DiameterIdentity()</seealso></v>
+<v>Ident = &dict_DiameterIdentity;</v>
</type>
<desc>
<p>
@@ -1464,7 +1622,6 @@ The value has the form required by section 8.8 of RFC 3588.
Ident should be the Origin-Host of the peer from which
the message containing the returned value will be sent.</p>
-<marker id="start"/>
</desc>
</func>
@@ -1481,7 +1638,6 @@ The diameter application must be started before starting a service.
In a production system this is typically accomplished by a boot
file, not by calling <c>start/0</c> explicitly.</p>
-<marker id="start_service"/>
</desc>
</func>
@@ -1490,8 +1646,8 @@ file, not by calling <c>start/0</c> explicitly.</p>
<name>start_service(SvcName, Options) -> ok | {error, Reason}</name>
<fsummary>Start a Diameter service.</fsummary>
<type>
-<v>SvcName = <seealso marker="#service_name">service_name()</seealso></v>
-<v>Options = [<seealso marker="#service_opt">service_opt()</seealso>]</v>
+<v>SvcName = &service_name;</v>
+<v>Options = [&service_opt;]</v>
<v>Reason = term()</v>
</type>
<desc>
@@ -1501,8 +1657,7 @@ Start a diameter service.</p>
<p>
A service defines a locally-implemented Diameter node, specifying the
capabilities to be advertised during capabilities exchange.
-Transports are added to a service using <seealso
-marker="#add_transport">add_transport/2</seealso>.
+Transports are added to a service using &add_transport;.
</p>
<note>
@@ -1513,7 +1668,6 @@ capabilities and restrict its supported Diameter applications so
necessarily the case.</p>
</note>
-<marker id="stop_service"/>
</desc>
</func>
@@ -1528,7 +1682,6 @@ Stop the diameter application.</p>
<p>
</p>
-<marker id="stop_service"/>
</desc>
</func>
@@ -1537,7 +1690,7 @@ Stop the diameter application.</p>
<name>stop_service(SvcName) -> ok | {error, Reason}</name>
<fsummary>Stop a Diameter service.</fsummary>
<type>
-<v>SvcName = <seealso marker="#service_name">service_name()</seealso></v>
+<v>SvcName = &service_name;</v>
<v>Reason = term()</v>
</type>
<desc>
@@ -1547,17 +1700,15 @@ Stop a diameter service.</p>
<p>
Stopping a service causes all associated transport connections to be
broken.
-A DPR message with be sent as in the case of <seealso
-marker="#remove_transport">remove_transport/2</seealso>.</p>
+A DPR message with be sent as in the case of &remove_transport;.</p>
<note>
<p>
-Stopping a transport does not remove any associated transports:
-<seealso marker="#remove_transport">remove_transport/2</seealso> must
+Stopping a service does not remove any associated transports:
+&remove_transport; must
be called to remove transport configuration.</p>
</note>
-<marker id="subscribe"/>
</desc>
</func>
@@ -1567,12 +1718,11 @@ be called to remove transport configuration.</p>
<name>subscribe(SvcName) -> true</name>
<fsummary>Subscribe to event messages.</fsummary>
<type>
-<v>SvcName = <seealso marker="#service_name">service_name()</seealso></v>
+<v>SvcName = &service_name;</v>
</type>
<desc>
<p>
-Subscribe to <c><seealso
-marker="#service_event">service_event()</seealso></c> messages from
+Subscribe to <c>&service_event;</c> messages from
a service.</p>
<p>
@@ -1581,7 +1731,6 @@ that does not yet exist.
Doing so before adding transports is required to guarantee the
reception of all related events.</p>
-<marker id="unsubscribe"/>
</desc>
</func>
@@ -1591,7 +1740,7 @@ reception of all related events.</p>
<name>unsubscribe(SvcName) -> true</name>
<fsummary>Unsubscribe to event messages.</fsummary>
<type>
-<v>SvcName = <seealso marker="#service_name">service_name()</seealso></v>
+<v>SvcName = &service_name;</v>
</type>
<desc>
<p>
@@ -1608,9 +1757,7 @@ Unsubscribe to event messages from a service.</p>
<title>SEE ALSO</title>
<p>
-<seealso marker="diameter_app">diameter_app(3)</seealso>,
-<seealso marker="diameter_transport">diameter_transport(3)</seealso>,
-<seealso marker="diameter_dict">diameter_dict(4)</seealso></p>
+&man_app;, &man_transport;, &man_dict;</p>
</section>
diff --git a/lib/diameter/doc/src/diameter_app.xml b/lib/diameter/doc/src/diameter_app.xml
index 4a4b212787..304c69ebda 100644
--- a/lib/diameter/doc/src/diameter_app.xml
+++ b/lib/diameter/doc/src/diameter_app.xml
@@ -1,5 +1,10 @@
<?xml version="1.0" encoding="latin1" ?>
-<!DOCTYPE erlref SYSTEM "erlref.dtd">
+<!DOCTYPE erlref SYSTEM "erlref.dtd" [
+ <!ENTITY % also SYSTEM "seealso.ent" >
+ <!ENTITY % here SYSTEM "seehere.ent" >
+ %also;
+ %here;
+]>
<erlref>
<header>
@@ -41,15 +46,13 @@ Callback module of a Diameter application.</modulesummary>
<description>
<p>
-A diameter service as started by <seealso
-marker="diameter#start_service">diameter:start_service/2</seealso>
+A diameter service as started by &mod_start_service;
configures one of more Diameter applications, each of whose
configuration specifies a callback that handles messages specific to
the application.
The messages and AVPs of the application are defined in a
dictionary file whose format is documented in
-<seealso marker="diameter_dict">diameter_dict(4)</seealso>
-while the callback module is documented here.
+&man_dict; while the callback module is documented here.
The callback module implements the Diameter application-specific
functionality of a service.</p>
@@ -60,26 +63,24 @@ The functions themselves are of three distinct flavours:</p>
<list>
<item>
<p>
-<seealso marker="#peer_up">peer_up/3</seealso> and
-<seealso marker="#peer_down">peer_down/3</seealso> signal the
+&peer_up; and &peer_down; signal the
attainment or loss of connectivity with a Diameter peer.</p>
</item>
<item>
<p>
-<seealso marker="#pick_peer">pick_peer/4</seealso>,
-<seealso marker="#prepare_request">prepare_request/3</seealso>,
-<seealso marker="#prepare_retransmit">prepare_retransmit/3</seealso>,
-<seealso marker="#handle_answer">handle_answer/4</seealso>
-and <seealso marker="#handle_error">handle_error/4</seealso> are (or may
-be) called as a consequence of a call to <seealso
-marker="diameter#call">diameter:call/4</seealso> to send an outgoing
+&pick_peer;,
+&prepare_request;,
+&prepare_retransmit;,
+&handle_answer;
+and &handle_error; are (or may be) called as a consequence of a call
+to &mod_call; to send an outgoing
Diameter request message.</p>
</item>
<item>
<p>
-<seealso marker="#handle_request">handle_request/3</seealso>
+&handle_request;
is called in response to an incoming Diameter request message.</p>
</item>
@@ -92,10 +93,9 @@ is called in response to an incoming Diameter request message.</p>
The arities given for the the callback functions here assume no extra
arguments.
All functions will also be passed any extra arguments configured with
-the callback module itself when calling <seealso
-marker="diameter#start_service">diameter:start_service/2</seealso>
+the callback module itself when calling &mod_start_service;
and, for the call-specific callbacks, any extra arguments passed to
-<seealso marker="diameter#call">diameter:call/4</seealso>.</p>
+&mod_call;.</p>
</note>
<!-- ===================================================================== -->
@@ -112,7 +112,8 @@ and, for the call-specific callbacks, any extra arguments passed to
<item>
<p>
A record containing the identities of
-the local Diameter node and the remote Diameter peer having an established transport
+the local Diameter node and the remote Diameter peer having an
+established transport
connection, as well as the capabilities as
determined by capabilities exchange.
Each field of the record is a 2-tuple consisting of
@@ -127,7 +128,7 @@ mandatory values as the bare value.</p>
<item>
<p>
The representation of a Diameter message as passed to
-<seealso marker="diameter#call">diameter:call/4</seealso>.
+&mod_call;.
The record representation is as outlined in
<seealso
marker="diameter_dict#MESSAGE_RECORDS">diameter_dict(4)</seealso>:
@@ -144,7 +145,7 @@ whose head is a <c>#diameter_header{}</c> record and whose tail is a list
of <c>#diameter_avp{}</c> records.
This representation is used by diameter itself when relaying requests
as directed by the return value of a
-<seealso marker="#handle_request">handle_request/3</seealso>
+&handle_request;
callback.
It differs from the other other two in that it bypasses the checks for
messages that do not agree with their definitions in the dictionary in
@@ -168,13 +169,12 @@ Fields should not be set in return values except as documented.</p>
<tag><c>peer_ref() = term()</c></tag>
<item>
<p>
-A term identifying a transport connection with a Diameter peer.
-Should be treated opaquely.</p>
+A term identifying a transport connection with a Diameter peer.</p>
</item>
<marker id="peer"/>
-<tag><c>peer() = {<seealso marker="#peer_ref">peer_ref()</seealso>, <seealso marker="#capabilities">capabilities()</seealso>}</c></tag>
+<tag><c>peer() = {&peer_ref;, &capabilities;}</c></tag>
<item>
<p>
A tuple representing a Diameter peer connection.</p>
@@ -186,13 +186,9 @@ A tuple representing a Diameter peer connection.</p>
<item>
<p>
The state maintained by the application callback functions
-<seealso marker="#peer_up">peer_up/3</seealso>,
-<seealso marker="#peer_down">peer_down/3</seealso> and (optionally)
-<seealso marker="#pick_peer">pick_peer/4</seealso>.
+&peer_up;, &peer_down; and (optionally) &pick_peer;.
The initial state is configured in the call to
-<seealso
-marker="diameter#start_service">diameter:start_service/2</seealso>
-that configures the application on a service.
+&mod_start_service; that configures the application on a service.
Callback functions returning a state are evaluated in a common
service-specific process while
those not returning state are evaluated in a request-specific
@@ -213,18 +209,33 @@ process.</p>
<name>Mod:peer_up(SvcName, Peer, State) -> NewState</name>
<fsummary>Invoked when a transport connection has been established</fsummary>
<type>
-<v>SvcName = <seealso marker="diameter#service_name">diameter:service_name()</seealso></v>
-<v>Peer = <seealso marker="#peer">peer()</seealso></v>
-<v>State = NewState = <seealso marker="#state">state()</seealso></v>
+<v>SvcName = &mod_service_name;</v>
+<v>Peer = &peer;</v>
+<v>State = NewState = &state;</v>
</type>
<desc>
<p>
-Invoked when a transport connection has been established
-and a successful capabilities exchange has indicated that the peer
-supports the Diameter application of the application on which
-the callback module in question has been configured.</p>
+Invoked to signal the availability of a peer connection.
+In particular, capabilities exchange with the peer has indicated
+support for the application in question, the RFC 3539 watchdog state
+machine for the connection has reached state <c>OKAY</c> and Diameter
+messages can be both sent and received.</p>
+
+<note>
+<p>
+A watchdog state machine can reach state <c>OKAY</c> from state
+<c>SUSPECT</c> without a new capabilities exchange taking place.
+A new transport connection (and capabilities exchange) results in a
+new peer_ref().</p>
+</note>
+
+<note>
+<p>
+There is no requirement that a callback return before incoming
+requests are received: &handle_request; callbacks must be
+handled independently of &peer_up; and &peer_down;.</p>
+</note>
-<marker id="peer_down"/>
</desc>
</func>
@@ -232,88 +243,87 @@ the callback module in question has been configured.</p>
<name>Mod:peer_down(SvcName, Peer, State) -> NewState</name>
<fsummary>Invoked when a transport connection has been lost.</fsummary>
<type>
-<v>SvcName = <seealso marker="diameter#service_name">diameter:service_name()</seealso></v>
-<v>Peer = <seealso marker="#peer">peer()</seealso></v>
-<v>State = NewState = <seealso marker="#state">state()</seealso></v>
+<v>SvcName = &mod_service_name;</v>
+<v>Peer = &peer;</v>
+<v>State = NewState = &state;</v>
</type>
<desc>
<p>
-Invoked when a transport connection has been lost following a previous
-call to <seealso marker="#peer_up">peer_up/3</seealso>.</p>
+Invoked to signal that a peer connection is no longer available
+following a previous call to &peer_up;.
+In particular, that the RFC 3539 watchdog state machine for the
+connection has left state <c>OKAY</c> and the peer will no longer be a
+candidate in &pick_peer; callbacks.</p>
-<marker id="pick_peer"/>
</desc>
</func>
<func>
-<name>Mod:pick_peer(Candidates, Reserved, SvcName, State)
- -> {ok, Peer} | {Peer, NewState} | false</name>
+<name>Mod:pick_peer(Candidates, _Reserved, SvcName, State)
+ -> Selection | false</name>
<fsummary>Select a target peer for an outgoing request.</fsummary>
<type>
-<v>Candidates = [<seealso marker="#peer">peer()</seealso>]</v>
-<v>Peer = <seealso marker="#peer">peer()</seealso> | false</v>
-<v>SvcName = <seealso marker="diameter#service_name">diameter:service_name()</seealso></v>
-<v>State = NewState = <seealso marker="#state">state()</seealso></v>
+<v>Candidates = [&peer;]</v>
+<v>SvcName = &mod_service_name;</v>
+<v>State = NewState = &state;</v>
+<v>Selection = {ok, Peer} | {Peer, NewState}</v>
+<v>Peer = &peer; | false</v>
</type>
<desc>
<p>
-Invoked as a consequence of a call to <seealso
-marker="diameter#call">diameter:call/4</seealso> to select a destination
-peer for an outgoing request, the return value indicating the selected
-peer.</p>
+Invoked as a consequence of a call to &mod_call; to select a destination
+peer for an outgoing request.
+The return value indicates the selected peer.</p>
<p>
-The candidate peers list will only include those
-which are selected by any <c>filter</c> option specified in the call to
-<seealso marker="diameter#call">diameter:call/4</seealso>, and only
-those which have indicated support for the Diameter application in
-question.
+The candidate list contains only those peers that have advertised
+support for the Diameter application in question during capabilities
+exchange, that have not be excluded by a <c>filter</c> option in
+the call to &mod_call;
+and whose watchdog state machine is in the <c>OKAY</c> state.
The order of the elements is unspecified except that any
peers whose Origin-Host and Origin-Realm matches that of the
outgoing request (in the sense of a <c>{filter, {all, [host, realm]}}</c>
-option to <seealso marker="diameter#call">diameter:call/4</seealso>)
+option to &mod_call;)
will be placed at the head of the list.</p>
<p>
-The return values <c>false</c> and <c>{false, State}</c> are
-equivalent when callback state is mutable, as are
-<c>{ok, Peer}</c> and <c>{Peer, State}</c>.
-Returning a peer as <c>false</c> causes <c>{error, no_connection}</c>
-to be returned from <seealso marker="diameter#call">diameter:call/4</seealso>.
-Returning a <seealso marker="#peer">peer()</seealso> from an initial
-pick_peer/4 callback will result in a
-<seealso marker="#prepare_request">prepare_request/3</seealso> callback
-followed by either <seealso
-marker="#handle_answer">handle_answer/4</seealso>
-or <seealso marker="#handle_error">handle_error/4</seealso> depending
+A callback that returns a peer() will be followed by a
+&prepare_request;
+callback and, if the latter indicates that the request should be sent,
+by either &handle_answer;
+or &handle_error; depending
on whether or not an answer message is received from the peer.
-If transport with the peer is lost before this then a new <seealso
-marker="#pick_peer">pick_peer/4</seealso> callback takes place to
-select an alternate peer.</p>
-
-<p>
-Note that there is no guarantee that a <seealso
-marker="#pick_peer">pick_peer/4</seealso> callback to select
-an alternate peer will be followed by any additional callbacks, only
-that the initial <seealso
-marker="#pick_peer">pick_peer/4</seealso> will be, since a
+If the transport becomes unavailable after &prepare_request; then a
+new &pick_peer; callback may take place to
+failover to an alternate peer, after which &prepare_retransmit; takes the
+place of &prepare_request; in resending the
+request.
+There is no guarantee that a &pick_peer; callback to select
+an alternate peer will be followed by any additional callbacks since a
retransmission to an alternate peer is abandoned if an answer is
received from a previously selected peer.</p>
+<p>
+Returning <c>false</c> or <c>{false, NewState}</c> causes <c>{error,
+no_connection}</c> to be returned from &mod_call;.</p>
+
+<p>
+The return values <c>false</c> and <c>{false, State}</c> (that is,
+<c>NewState = State</c>) are equivalent, as are <c>{ok, Peer}</c> and
+<c>{Peer, State}</c>.</p>
+
<note>
<p>
-<c>{Peer, NewState}</c> and its equivalents can only be returned if
-the Diameter application in question was
-configured with the <seealso
-marker="diameter#application_opt">diameter:application_opt()</seealso>
-<c>{call_mutates_state, true}</c>.
+The return value <c>{Peer, NewState}</c> is only allowed if
+the Diameter application in question was configured with the
+&mod_application_opt; <c>{call_mutates_state, true}</c>.
Otherwise, the <c>State</c> argument is always
the intial value as configured on the application, not any subsequent
-value returned by a <seealso marker="#peer_up">peer_up/3</seealso>
-or <seealso marker="#peer_down">peer_down/3</seealso> callback.</p>
+value returned by a &peer_up;
+or &peer_down; callback.</p>
</note>
-<marker id="prepare_request"/>
</desc>
</func>
@@ -322,74 +332,81 @@ or <seealso marker="#peer_down">peer_down/3</seealso> callback.</p>
<name>Mod:prepare_request(Packet, SvcName, Peer) -> Action</name>
<fsummary>Return a request for encoding and transport.</fsummary>
<type>
-<v>Packet = <seealso marker="#packet">packet()</seealso></v>
-<v>SvcName = <seealso marker="diameter#service_name">diameter:service_name()</seealso></v>
-<v>Peer = <seealso marker="#peer">peer()</seealso></v>
-<v>Action = {send, <seealso marker="#packet">packet()</seealso> | <seealso marker="#message">message()</seealso>} | {discard, Reason} | discard</v>
+<v>Packet = &packet;</v>
+<v>SvcName = &mod_service_name;</v>
+<v>Peer = &peer;</v>
+<v>Action = Send | Discard | {eval_packet, Action, PostF}</v>
+<v>Send = {send, &packet; | &message;}</v>
+<v>Discard = {discard, Reason} | discard</v>
+<v>PostF = &mod_evaluable;}</v>
</type>
<desc>
<p>
Invoked to return a request for encoding and transport.
-Allows the sender to access the selected peer's capabilities
-in order to set (for example) <c>Destination-Host</c> and/or
-<c>Destination-Realm</c> in the outgoing request, although the
-callback need not be limited to this usage.
+Allows the sender to use the selected peer's capabilities
+to modify the outgoing request.
Many implementations may simply want to return <c>{send, Packet}</c></p>
<p>
-A returned <seealso marker="#packet">packet()</seealso> should set the request to be encoded in its
+A returned &packet; should set the
+request to be encoded in its
<c>msg</c> field and can set the <c>transport_data</c> field in order
-to pass information to the transport module.
-Extra arguments passed to <seealso
-marker="diameter#call">diameter:call/4</seealso> can be used to
-communicate transport data to the callback.
-A returned <seealso marker="#packet">packet()</seealso> can also set the <c>header</c> field to a
-<c>#diameter_header{}</c> record in order to specify values that should
-be preserved in the outgoing request, although this should typically
-not be necessary and allows the callback to set header values
-inappropriately.
+to pass information to the transport process.
+Extra arguments passed to &mod_call; can be used to
+communicate transport (or any other) data to the callback.</p>
+
+<p>
+A returned &packet; can set
+the <c>header</c> field to a
+<c>#diameter_header{}</c> to specify values that should
+be preserved in the outgoing request, values otherwise being those in
+the header record contained in <c>Packet</c>.
A returned <c>length</c>, <c>cmd_code</c> or <c>application_id</c> is
ignored.</p>
<p>
+A returned <c>PostF</c> will be evaluated on any encoded
+<c>#diameter_packet{}</c> prior to transmission, the <c>bin</c> field
+containing the encoded binary.
+The return value is ignored.</p>
+
+<p>
Returning <c>{discard, Reason}</c> causes the request to be aborted
-and the <seealso
-marker="diameter#call">diameter:call/4</seealso> for which the
+and the &mod_call; for which the
callback has taken place to return <c>{error, Reason}</c>.
Returning <c>discard</c> is equivalent to returning <c>{discard,
discarded}</c>.</p>
-<marker id="prepare_retransmit"/>
</desc>
</func>
<func>
-<name>Mod:prepare_retransmit(Packet, SvcName, Peer) -> Result</name>
+<name>Mod:prepare_retransmit(Packet, SvcName, Peer) -> Action</name>
<fsummary>Return a request for encoding and retransmission.</fsummary>
<type>
-<v>Packet = <seealso marker="#packet">packet()</seealso></v>
-<v>SvcName = <seealso marker="diameter#service_name">diameter:service_name()</seealso></v>
-<v>Peer = <seealso marker="#peer">peer()</seealso></v>
-<v>Result = {send, <seealso marker="#packet">packet()</seealso> | <seealso marker="#message">message()</seealso>} | {discard, Reason} | discard</v>
+<v>Packet = &packet;</v>
+<v>SvcName = &mod_service_name;</v>
+<v>Peer = &peer;</v>
+<v>Action = Send | Discard | {eval_packet, Action, PostF}</v>
+<v>Send = {send, &packet; | &message;}</v>
+<v>Discard = {discard, Reason} | discard</v>
+<v>PostF = &mod_evaluable;}</v>
</type>
<desc>
<p>
Invoked to return a request for encoding and retransmission.
-Has the same role as <seealso
-marker="#prepare_request">prepare_request/3</seealso> in the case that
+Has the same role as &prepare_request; in the case that
a peer connection is lost an an alternate peer selected but the
-argument <seealso marker="#packet">packet()</seealso> is as returned by the initial
-<c>prepare_request/3</c>.</p>
+argument &packet; is as returned
+by the initial &prepare_request;.</p>
<p>
Returning <c>{discard, Reason}</c> causes the request to be aborted
-and a <seealso
-marker="#handle_error">handle_error/4</seealso> callback to
+and a &handle_error; callback to
take place with <c>Reason</c> as initial argument.
Returning <c>discard</c> is equivalent to returning <c>{discard,
discarded}</c>.</p>
-<marker id="handle_answer"/>
</desc>
</func>
@@ -397,51 +414,43 @@ discarded}</c>.</p>
<name>Mod:handle_answer(Packet, Request, SvcName, Peer) -> Result</name>
<fsummary>Receive an answer message from a peer.</fsummary>
<type>
-<v>Packet = <seealso marker="#packet">packet()</seealso></v>
-<v>Request = <seealso marker="#message">message()</seealso></v>
-<v>SvcName = <seealso marker="diameter#service_name">diameter:service_name()</seealso></v>
-<v>Peer = <seealso marker="#peer">peer()</seealso></v>
+<v>Packet = &packet;</v>
+<v>Request = &message;</v>
+<v>SvcName = &mod_service_name;</v>
+<v>Peer = &peer;</v>
<v>Result = term()</v>
</type>
<desc>
<p>
Invoked when an answer message is received from a peer.
-The return value is returned from the call to <seealso
-marker="diameter#call">diameter:call/4</seealso> for which the
-callback takes place unless the <c>detach</c> option was
-specified.</p>
+The return value is returned from &mod_call; unless the
+<c>detach</c> option was specified.</p>
<p>
-The decoded answer record is in the <c>msg</c> field of the argument
-<seealso marker="#packet">packet()</seealso>,
-the undecoded binary in the <c>packet</c> field.
+The decoded answer record and undecoded binary are in the <c>msg</c>
+and <c>bin</c> fields of the argument
+&packet; respectively.
<c>Request</c> is the outgoing request message as was returned from
-<seealso marker="#prepare_request">prepare_request/3</seealso> or
-<seealso marker="#prepare_retransmit">prepare_retransmit/3</seealso>
-before the request was passed to the transport.</p>
+&prepare_request; or &prepare_retransmit;.</p>
<p>
-For any given call to <seealso
-marker="diameter#call">diameter:call/4</seealso> there is at most one
-call to the handle_answer callback of the application in question: any
+For any given call to &mod_call; there is at most one
+&handle_answer; callback: any
duplicate answer (due to retransmission or otherwise) is discarded.
-Similarly, only one of <c>handle_answer/4</c> or <c>handle_error/4</c> is
-called for any given request.</p>
+Similarly, only one of &handle_answer; or
+&handle_error; is called.</p>
<p>
By default, an incoming answer message that cannot be successfully
-decoded causes the request process in question to fail, causing the
-relevant call to <seealso
-marker="diameter#call">diameter:call/4</seealso>
-to return <c>{error, failure} (unless the <c>detach</c> option was
-specified)</c>.
-In particular, there is no <c>handle_error/4</c> callback in this
+decoded causes the request process to fail, causing
+&mod_call;
+to return <c>{error, failure}</c> unless the <c>detach</c> option was
+specified.
+In particular, there is no &handle_error; callback in this
case.
-Application configuration may change this behaviour as described for
-<seealso
-marker="diameter#start_service">diameter:start_service/2</seealso>.</p>
+The &mod_application_opt;
+<c>answer_errors</c> can be set to change this behaviour.</p>
-<marker id="handle_error"/>
</desc>
</func>
@@ -450,30 +459,26 @@ marker="diameter#start_service">diameter:start_service/2</seealso>.</p>
<fsummary>Return an error from a outgoing request.</fsummary>
<type>
<v>Reason = timeout | failover | term()</v>
-<v>Request = <seealso marker="#message">message()</seealso></v>
-<v>SvcName = <seealso marker="diameter#service_name">diameter:service_name()</seealso></v>
-<v>Peer = <seealso marker="#peer">peer()</seealso></v>
+<v>Request = &message;</v>
+<v>SvcName = &mod_service_name;</v>
+<v>Peer = &peer;</v>
<v>Result = term()</v>
</type>
<desc>
<p>
-Invoked when an error occurs before an answer message is received from
-a peer in response to an outgoing request.
-The return value is returned from the call to <seealso
-marker="diameter#call">diameter:call/4</seealso> for which the
-callback takes place (unless the <c>detach</c> option was
-specified).</p>
+Invoked when an error occurs before an answer message is received
+in response to an outgoing request.
+The return value is returned from &mod_call; unless the
+<c>detach</c> option was specified.</p>
<p>
Reason <c>timeout</c> indicates that an answer message has not been
-received within the required time.
+received within the time specified with the corresponding &mod_call_opt;.
Reason <c>failover</c> indicates
that the transport connection to the peer to which the request has
-been sent has been lost but that not alternate node was available,
-possibly because a <seealso marker="#pick_peer">pick_peer/4</seealso>
-callback returned false.</p>
+been sent has become unavailable and that not alternate peer was
+not selected.</p>
-<marker id="handle_request"/>
</desc>
</func>
@@ -481,21 +486,23 @@ callback returned false.</p>
<name>Mod:handle_request(Packet, SvcName, Peer) -> Action</name>
<fsummary>Receive an incoming request.</fsummary>
<type>
-<v>Packet = <seealso marker="#packet">packet()</seealso></v>
+<v>Packet = &packet;</v>
<v>SvcName = term()</v>
-<v>Peer = <seealso marker="#peer">peer()</seealso></v>
-<v>Action = Reply | {relay, [Opt]} | discard | {eval, Action, PostF}</v>
-<v>Reply = {reply, <seealso marker="#message">message()</seealso>}
+<v>Peer = &peer;</v>
+<v>Action = Reply
+ | {relay, [Opt]}
+ | discard
+ | {eval|eval_packet, Action, PostF}</v>
+<v>Reply = {reply, &packet; | &message;}
| {protocol_error, 3000..3999}</v>
-<v>Opt = <seealso marker="diameter#call_opt">diameter:call_opt()</seealso></v>
-<v>PostF = <seealso marker="diameter#evaluable">diameter:evaluable()</seealso></v>
+<v>Opt = &mod_call_opt;</v>
+<v>PostF = &mod_evaluable;</v>
</type>
<desc>
<p>
Invoked when a request message is received from a peer.
The application in which the callback takes place (that is, the
-callback module as configured with <seealso
-marker="diameter#start_service">diameter:start_service/2</seealso>)
+callback module as configured with &mod_start_service;)
is determined by the Application Identifier in the header of the
incoming request message, the selected module being the one
whose corresponding <seealso
@@ -504,19 +511,19 @@ itself as defining either the application in question or the Relay
application.</p>
<p>
-The argument <seealso marker="#packet">packet()</seealso> has the following signature.</p>
+The argument &packet; has the following signature.</p>
-<code>
+<pre>
#diameter_packet{header = #diameter_header{},
avps = [#diameter_avp{}],
msg = record() | undefined,
- errors = [<seealso marker="diameter_dict#DATA_TYPES">Unsigned32()</seealso> | {<seealso marker="diameter_dict#DATA_TYPES">Unsigned32()</seealso>, #diameter_avp{}}],
+ errors = [&dict_Unsigned32; | {&dict_Unsigned32;, #diameter_avp{}}],
bin = binary(),
transport_data = term()}
-</code>
+</pre>
<p>
-The <c>msg</c> field will be <c>undefined</c> only in case the request has
+The <c>msg</c> field will be <c>undefined</c> in case the request has
been received in the relay application.
Otherwise it contains the record representing the request as outlined
in <seealso
@@ -527,8 +534,8 @@ The <c>errors</c> field specifies any Result-Code's identifying errors
that were encountered in decoding the request.
In this case diameter will set both Result-Code and
Failed-AVP AVP's in a returned
-answer <seealso marker="#message">message()</seealso> before sending it to the peer:
-the returned <seealso marker="#message">message()</seealso> need only set any other required AVP's.
+answer &message; before sending it to the peer:
+the returned &message; need only set any other required AVP's.
Note that the errors detected by diameter are all of the 5xxx series
(Permanent Failures).
The <c>errors</c> list is empty if the request has been received in
@@ -538,19 +545,24 @@ the relay application.</p>
The <c>transport_data</c> field contains an arbitrary term passed into
diameter from the transport module in question, or the atom
<c>undefined</c> if the transport specified no data.
-The term is preserved in the <seealso marker="#packet">packet()</seealso> containing any answer message
-sent back to the transport process unless another value is explicitly
-specified.</p>
+The term is preserved if a &message; is returned but must be set
+explicitly in a returned &packet;.</p>
<p>
The semantics of each of the possible return values are as follows.</p>
<taglist>
-<tag><c>{reply, <seealso marker="#message">message()</seealso>}</c></tag>
+<tag><c>{reply, &packet; | &message;}</c></tag>
<item>
<p>
-Send the specified answer message to the peer.</p>
+Send the specified answer message to the peer.
+In the case of a &packet;, the
+message to be sent must be set in the
+<c>msg</c> field and the <c>header</c> field can be set to a
+<c>#diameter_header{}</c> to specify values that should be
+preserved in the outgoing answer, appropriate values otherwise
+being set by diameter.</p>
</item>
<tag><c>{protocol_error, 3000..3999}</c></tag>
@@ -559,9 +571,9 @@ Send the specified answer message to the peer.</p>
Send an answer message to the peer containing the specified
protocol error.
Equivalent to</p>
-<code>
+<pre>
{reply, ['answer-message' | Avps]
-</code>
+</pre>
<p>
where <c>Avps</c> sets the Origin-Host, Origin-Realm, the specified
Result-Code and (if the request sent one) Session-Id AVP's.</p>
@@ -580,37 +592,47 @@ Relay a request to another peer in the role of a Diameter relay agent.
If a routing loop is detected then the request is answered with
3005 (DIAMETER_LOOP_DETECTED).
Otherwise a Route-Record AVP (containing the sending peer's Origin-Host) is
-added to the request and <seealso marker="#pick_peer">pick_peer/4</seealso>
-and subsequent callbacks take place just as if <seealso
-marker="diameter#call">diameter:call/4</seealso> had been called
+added to the request and &pick_peer;
+and subsequent callbacks take place just as if &mod_call; had been called
explicitly.
The End-to-End Identifier of the incoming request is preserved in the
header of the relayed request.</p>
<p>
The returned <c>Opts</c> should not specify <c>detach</c>.
-A subsequent <seealso marker="#handle_answer">handle_answer/4</seealso>
+A subsequent &handle_answer;
callback for the relayed request must return its first
argument, the <c>#diameter_packet{}</c> record containing the answer
message.
Note that the <c>extra</c> option can be specified to supply arguments
that can distinguish the relay case from others if so desired.
Any other return value (for example, from a
-<seealso marker="#handle_error">handle_error/4</seealso> callback)
+&handle_error; callback)
causes the request to be answered with 3002 (DIAMETER_UNABLE_TO_DELIVER).</p>
</item>
<tag><c>discard</c></tag>
<item>
<p>
-Discard the request.</p>
+Discard the request.
+No answer message is sent to the peer.</p>
</item>
<tag><c>{eval, Action, PostF}</c></tag>
<item>
<p>
Handle the request as if <c>Action</c> has been returned and then
-evaluate <c>PostF</c> in the request process.</p>
+evaluate <c>PostF</c> in the request process.
+The return value is ignored.</p>
+</item>
+
+<tag><c>{eval_packet, Action, PostF}</c></tag>
+<item>
+<p>
+Like <c>eval</c> but evaluate <c>PostF</c> on any encoded
+<c>#diameter_packet{}</c> prior to transmission, the <c>bin</c> field
+containing the encoded binary.
+The return value is ignored.</p>
</item>
</taglist>
diff --git a/lib/diameter/doc/src/diameter_compile.xml b/lib/diameter/doc/src/diameter_compile.xml
index 7a6ca48798..eb6de80c11 100644
--- a/lib/diameter/doc/src/diameter_compile.xml
+++ b/lib/diameter/doc/src/diameter_compile.xml
@@ -1,5 +1,10 @@
<?xml version="1.0" encoding="iso-8859-1" ?>
-<!DOCTYPE comref SYSTEM "comref.dtd">
+<!DOCTYPE comref SYSTEM "comref.dtd" [
+ <!ENTITY % also SYSTEM "seealso.ent" >
+ <!ENTITY % here SYSTEM "seehere.ent" >
+ %also;
+ %here;
+]>
<comref>
<header>
@@ -122,7 +127,7 @@ Returns 0 on success, non-zero on failure.</p>
<title>SEE ALSO</title>
<p>
-<seealso marker="diameter_dict">diameter_dict(4)</seealso></p>
+&man_dict;</p>
</section>
diff --git a/lib/diameter/doc/src/diameter_dict.xml b/lib/diameter/doc/src/diameter_dict.xml
index 98adebf145..4a6cccc276 100644
--- a/lib/diameter/doc/src/diameter_dict.xml
+++ b/lib/diameter/doc/src/diameter_dict.xml
@@ -1,5 +1,10 @@
<?xml version="1.0" encoding="latin1" ?>
-<!DOCTYPE erlref SYSTEM "fileref.dtd">
+<!DOCTYPE erlref SYSTEM "fileref.dtd" [
+ <!ENTITY % also SYSTEM "seealso.ent" >
+ <!ENTITY % here SYSTEM "seehere.ent" >
+ %also;
+ %here;
+]>
<fileref>
<header>
@@ -40,8 +45,7 @@ under the License.
<description>
<p>
-A diameter service as configured with <seealso
-marker="diameter#start_service">diameter:start_service/2</seealso>
+A diameter service as configured with &mod_start_service;
specifies one or more supported Diameter applications.
Each Diameter application specifies a dictionary module that knows how
to encode and decode its messages and AVPs.
@@ -58,8 +62,7 @@ resulting dictionaries modules on a service.</p>
The codec generation also results in a hrl file that defines records
for the messages and grouped AVPs defined for the application, these
records being what a user of the diameter application sends and receives.
-(Modulo other available formats as discussed in <seealso
-marker="diameter_app">diameter_app(3)</seealso>.)
+(Modulo other available formats as discussed in &man_app;.)
These records and the underlying Erlang data types corresponding to
Diameter data formats are discussed in <seealso
marker="#MESSAGE_RECORDS">MESSAGE RECORDS</seealso> and <seealso
@@ -125,9 +128,9 @@ is used to identify the relevant dictionary module.</p>
<p>
Example:</p>
-<code>
+<pre>
@id 16777231
-</code>
+</pre>
</item>
@@ -146,9 +149,9 @@ with existing modules in the system.</p>
<p>
Example:</p>
-<code>
+<pre>
@name etsi_e2
-</code>
+</pre>
</item>
@@ -169,9 +172,9 @@ different Diameter applications.</p>
<p>
Example:</p>
-<code>
+<pre>
@prefix etsi_e2
-</code>
+</pre>
</item>
@@ -189,9 +192,9 @@ The section has empty content.</p>
<p>
Example:</p>
-<code>
+<pre>
@vendor 13019 ETSI
-</code>
+</pre>
</item>
@@ -205,13 +208,13 @@ The section content consists of AVP names.</p>
<p>
Example:</p>
-<code>
+<pre>
@avp_vendor_id 2937
WWW-Auth
Domain-Index
Region-Set
-</code>
+</pre>
</item>
@@ -244,9 +247,9 @@ All dictionaries should typically inherit RFC3588 AVPs from
<p>
Example:</p>
-<code>
+<pre>
@inherits diameter_gen_base_rfc3588
-</code>
+</pre>
</item>
@@ -268,12 +271,12 @@ none are to be set.</p>
<p>
Example:</p>
-<code>
+<pre>
@avp_types
Location-Information 350 Grouped MV
Requested-Information 353 Enumerated V
-</code>
+</pre>
<warning>
<p>
@@ -298,11 +301,11 @@ encode/decode.</p>
<p>
Example:</p>
-<code>
+<pre>
@custom_types rfc4005_avps
Framed-IP-Address
-</code>
+</pre>
</item>
<tag><c>@codecs Mod</c></tag>
@@ -315,11 +318,11 @@ Like <c>@custom_types</c> but requires the specified module to export
<p>
Example:</p>
-<code>
+<pre>
@codecs rfc4005_avps
Framed-IP-Address
-</code>
+</pre>
</item>
<tag><c>@messages</c></tag>
@@ -330,7 +333,7 @@ The section content consists of definitions of the form specified in
section 3.2 of RFC 3588, "Command Code ABNF specification".</p>
<!-- RFC 4740 RTR/RTA -->
-<code>
+<pre>
@messages
RTR ::= &lt; Diameter Header: 287, REQ, PXY >
@@ -363,7 +366,7 @@ RTA ::= &lt; Diameter Header: 287, PXY >
* [ Proxy-Info ]
* [ Route-Record ]
* [ AVP ]
-</code>
+</pre>
</item>
@@ -378,14 +381,14 @@ section 4.4 of RFC 3588, "Grouped AVP Values".</p>
<p>
Example:</p>
-<code>
+<pre>
@grouped
SIP-Deregistration-Reason ::= &lt; AVP Header: 383 >
{ SIP-Reason-Code }
[ SIP-Reason-Info ]
* [ AVP ]
-</code>
+</pre>
<p>
Specifying a Vendor-Id in the definition of a grouped AVP is
@@ -408,14 +411,14 @@ otherwise defined in another dictionary.</p>
<p>
Example:</p>
-<code>
+<pre>
@enum SIP-Reason-Code
PERMANENT_TERMINATION 0
NEW_SIP_SERVER_ASSIGNED 1
SIP_SERVER_CHANGE 2
REMOVE_SIP_SERVER 3
-</code>
+</pre>
</item>
<tag><c>@end</c></tag>
@@ -450,22 +453,22 @@ contained in the message or grouped AVP in the order specified in the
definition in question.
For example, the grouped AVP</p>
-<code>
+<pre>
SIP-Deregistration-Reason ::= &lt; AVP Header: 383 >
{ SIP-Reason-Code }
[ SIP-Reason-Info ]
* [ AVP ]
-</code>
+</pre>
<p>
will result in the following record definition given an empty
prefix.</p>
-<code>
+<pre>
-record('SIP-Deregistration-Reason' {'SIP-Reason-Code',
'SIP-Reason-Info',
'AVP'}).
-</code>
+</pre>
<p>
The values encoded in the fields of generated records depends on the
@@ -488,11 +491,9 @@ types being described below.</p>
The data formats defined in sections 4.2 ("Basic AVP Data
Formats") and 4.3 ("Derived AVP Data Formats") of RFC 3588 are encoded
as values of the types defined here.
-Values are passed to <seealso
-marker="diameter#call">diameter:call/4</seealso>
+Values are passed to &mod_call;
in a request record when sending a request, returned in a resulting
-answer record and passed to a <seealso
-marker="diameter_app#handle_request">handle_request</seealso>
+answer record and passed to a &app_handle_request;
callback upon reception of an incoming request.</p>
<p>
@@ -507,7 +508,7 @@ callback upon reception of an incoming request.</p>
<marker id="Float64"/>
<marker id="Grouped"/>
-<code>
+<pre>
OctetString() = [0..255]
Integer32() = -2147483647..2147483647
Integer64() = -9223372036854775807..9223372036854775807
@@ -516,7 +517,7 @@ Unsigned64() = 0..18446744073709551615
Float32() = '-infinity' | float() | infinity
Float64() = '-infinity' | float() | infinity
Grouped() = record()
-</code>
+</pre>
<p>
On encode, an OctetString() can be specified as an iolist(),
@@ -530,10 +531,10 @@ section.</p>
<em>Derived AVP Data Formats</em></p>
<marker id="Address"/>
-<code>
+<pre>
Address() = OctetString()
| tuple()
-</code>
+</pre>
<p>
On encode, an OctetString() IPv4 address is parsed in the usual
@@ -545,7 +546,7 @@ An IPv6 tuple() has length 8 and contains values of type 0..65535.
The tuple representation is used on decode.</p>
<marker id="Time"/>
-<code>
+<pre>
Time() = {date(), time()}
where
@@ -559,7 +560,7 @@ where
Hour = 0..23
Minute = 0..59
Second = 0..59
-</code>
+</pre>
<p>
Additionally, values that can be encoded are
@@ -569,9 +570,9 @@ In particular, only values between <c>{{1968,1,20},{3,14,8}}</c>
and <c>{{2104,2,26},{9,42,23}}</c> (both inclusive) can be encoded.</p>
<marker id="UTF8String"/>
-<code>
+<pre>
UTF8String() = [integer()]
-</code>
+</pre>
<p>
List elements are the UTF-8 encodings of the individual characters
@@ -579,15 +580,15 @@ in the string.
Invalid codepoints will result in encode/decode failure.</p>
<marker id="DiameterIdentity"/>
-<code>
+<pre>
DiameterIdentity() = OctetString()
-</code>
+</pre>
<p>
A value must have length at least 1.</p>
<marker id="DiameterURI"/>
-<code>
+<pre>
DiameterURI() = OctetString()
| #diameter_URI{type = Type,
fqdn = FQDN,
@@ -602,7 +603,7 @@ where
Port = integer()
Transport = sctp | tcp
Protocol = diameter | radius | 'tacacs+'
-</code>
+</pre>
<p>
On encode, fields port, transport and protocol default to 3868, sctp
@@ -612,9 +613,9 @@ section 4.3 of RFC 3588.
The record representation is used on decode.</p>
<marker id="Enumerated"/>
-<code>
+<pre>
Enumerated() = Integer32()
-</code>
+</pre>
<p>
On encode, values can be specified using the macros defined in a
@@ -622,10 +623,10 @@ dictionary's hrl file.</p>
<marker id="IPFilterRule"/>
<marker id="QoSFilterRule"/>
-<code>
+<pre>
IPFilterRule() = OctetString()
QoSFilterRule() = OctetString()
-</code>
+</pre>
<p>
Values of these types are not currently parsed by diameter.</p>
@@ -639,9 +640,7 @@ Values of these types are not currently parsed by diameter.</p>
<title>SEE ALSO</title>
<p>
-<seealso marker="diameterc">diameterc(1)</seealso>,
-<seealso marker="diameter">diameter(3)</seealso>,
-<seealso marker="diameter_app">diameter_app(3)</seealso></p>
+&man_compile;, &man_main;, &man_app;</p>
</section>
diff --git a/lib/diameter/doc/src/diameter_sctp.xml b/lib/diameter/doc/src/diameter_sctp.xml
index 955169349c..a023a9bc08 100644
--- a/lib/diameter/doc/src/diameter_sctp.xml
+++ b/lib/diameter/doc/src/diameter_sctp.xml
@@ -1,5 +1,10 @@
<?xml version="1.0" encoding="latin1" ?>
-<!DOCTYPE erlref SYSTEM "erlref.dtd">
+<!DOCTYPE erlref SYSTEM "erlref.dtd" [
+ <!ENTITY % also SYSTEM "seealso.ent" >
+ <!ENTITY % here SYSTEM "seehere.ent" >
+ %also;
+ %here;
+]>
<erlref>
<header>
@@ -38,12 +43,12 @@ under the License.
<description>
<p>
-This module implements diameter transport over SCTP using gen_sctp.
+This module implements diameter transport over SCTP using <seealso
+marker="kernel:gen_sctp">gen_sctp</seealso>.
It can be specified as the value of a transport_module option to
-<seealso
-marker="diameter#add_transport">diameter:add_transport/2</seealso>
+&mod_add_transport;
and implements the behaviour documented in
-<seealso marker="diameter_transport">diameter_transport(3)</seealso>.</p>
+&man_transport;.</p>
<marker id="start"/>
</description>
@@ -58,7 +63,7 @@ and implements the behaviour documented in
<fsummary>Start a transport process.</fsummary>
<type>
<v>Type = connect | accept</v>
-<v>Ref = <seealso marker="diameter#transport_ref">diameter:transport_ref()</seealso></v>
+<v>Ref = &mod_transport_ref;</v>
<v>Svc = #diameter_service{}</v>
<v>Opt = {raddr, <seealso marker="kernel:inet#type-ip_address">inet:ip_address()</seealso>} | {rport, integer()} | term()</v>
<v>Pid = pid()</v>
@@ -68,8 +73,7 @@ and implements the behaviour documented in
<desc>
<p>
-The start function required by <seealso
-marker="diameter_transport#start">diameter_transport(3)</seealso>.</p>
+The start function required by &man_transport;.</p>
<p>
The only diameter_sctp-specific argument is the options list.
@@ -114,16 +118,13 @@ diameter_sctp uses the <c>transport_data</c> field of
the <c>#diameter_packet{}</c> record to communicate the stream on which an
inbound message has been received, or on which an outbound message
should be sent: the value will be of the form <c>{stream, Id}</c>
-on an inbound message passed to a <seealso
-marker="diameter_app#handle_request">handle_request</seealso> or <seealso
-marker="diameter_app#handle_answer">handle_answer</seealso> callback.
+on an inbound message passed to a &app_handle_request; or
+&app_handle_answer; callback.
For an outbound message, either <c>undefined</c> (explicitly or
by receiving the outbound message as a <c>binary()</c>) or a tuple
-should be set in the return value of <seealso
-marker="diameter_app#handle_request">handle_request</seealso>
+should be set in the return value of &app_handle_request;
(typically by retaining the value passed into this function)
-or <seealso
-marker="diameter_app#prepare_request">prepare_request</seealso>.
+or &app_prepare_request;.
The value <c>undefined</c> uses a "next outbound stream" id and
increments this modulo the total number outbound streams.
That is, successive values of <c>undefined</c> cycle through all
@@ -144,7 +145,8 @@ outbound streams.</p>
<title>SEE ALSO</title>
<p>
-<seealso marker="diameter_transport">diameter_transport(3)</seealso>,
+&man_main;,
+&man_transport;,
<seealso marker="kernel:gen_sctp">gen_sctp(3)</seealso>,
<seealso marker="kernel:inet">inet(3)</seealso></p>
diff --git a/lib/diameter/doc/src/diameter_tcp.xml b/lib/diameter/doc/src/diameter_tcp.xml
index 3ffcebfd90..be8a938115 100644
--- a/lib/diameter/doc/src/diameter_tcp.xml
+++ b/lib/diameter/doc/src/diameter_tcp.xml
@@ -1,5 +1,10 @@
<?xml version="1.0" encoding="latin1" ?>
-<!DOCTYPE erlref SYSTEM "erlref.dtd">
+<!DOCTYPE erlref SYSTEM "erlref.dtd" [
+ <!ENTITY % also SYSTEM "seealso.ent" >
+ <!ENTITY % here SYSTEM "seehere.ent" >
+ %also;
+ %here;
+]>
<erlref>
<header>
@@ -41,10 +46,9 @@ under the License.
This module implements diameter transport over TCP using <seealso
marker="kernel:gen_tcp">gen_tcp</seealso>.
It can be specified as the value of a <c>transport_module</c> option to
-<seealso
-marker="diameter#add_transport">diameter:add_transport/2</seealso>
+&mod_add_transport;
and implements the behaviour documented in
-<seealso marker="diameter_transport">diameter_transport(3)</seealso>.
+&man_transport;.
TLS security is supported, both as an upgrade following
capabilities exchange as specified by RFC 3588 and
at connection establishment as in the current draft standard.</p>
@@ -66,7 +70,7 @@ before configuring TLS capability on diameter transports.</p>
<fsummary>Start a transport process.</fsummary>
<type>
<v>Type = connect | accept</v>
-<v>Ref = <seealso marker="diameter#transport_ref">diameter:transport_ref()</seealso></v>
+<v>Ref = &mod_transport_ref;</v>
<v>Svc = #diameter_service{}</v>
<v>Opt = OwnOpt | SslOpt | TcpOpt</v>
<v>Pid = pid()</v>
@@ -81,8 +85,7 @@ before configuring TLS capability on diameter transports.</p>
<desc>
<p>
-The start function required by <seealso
-marker="diameter_transport#start">diameter_transport(3)</seealso>.</p>
+The start function required by &man_transport;.</p>
<p>
The only diameter_tcp-specific argument is the options list.
@@ -115,10 +118,8 @@ Note that the option <c>ip</c> specifies the local address.</p>
An <c>ssl_options</c> list must be specified if and only if
the transport in question has set <c>Inband-Security-Id</c> to
1 (<c>TLS</c>), as
-specified to either <seealso
-marker="diameter#start_service">start_service/2</seealso> or
-<seealso
-marker="diameter#add_transport">add_transport/2</seealso>,
+specified to either &mod_start_service; or
+&mod_add_transport;,
so that the transport process will receive notification of
whether or not to commence with a TLS handshake following capabilities
exchange.
@@ -149,8 +150,8 @@ The returned local address list has length one.</p>
<title>SEE ALSO</title>
<p>
-<seealso marker="diameter">diameter(3)</seealso>,
-<seealso marker="diameter_transport">diameter_transport(3)</seealso>,
+&man_main;,
+&man_transport;,
<seealso marker="kernel:gen_tcp">gen_tcp(3)</seealso>,
<seealso marker="kernel:inet">inet(3)</seealso>,
<seealso marker="ssl:ssl">ssl(3)</seealso></p>
diff --git a/lib/diameter/doc/src/diameter_transport.xml b/lib/diameter/doc/src/diameter_transport.xml
index 9ab750e560..0507af63a8 100644
--- a/lib/diameter/doc/src/diameter_transport.xml
+++ b/lib/diameter/doc/src/diameter_transport.xml
@@ -1,5 +1,10 @@
<?xml version="1.0" encoding="latin1" ?>
-<!DOCTYPE erlref SYSTEM "erlref.dtd">
+<!DOCTYPE erlref SYSTEM "erlref.dtd" [
+ <!ENTITY % also SYSTEM "seealso.ent" >
+ <!ENTITY % here SYSTEM "seehere.ent" >
+ %also;
+ %here;
+]>
<erlref>
<header>
@@ -38,8 +43,7 @@ under the License.
<description>
<p>
-A module specified as a <c>transport_module</c> to <seealso
-marker="diameter#add_transport">diameter:add_transport/2</seealso>
+A module specified as a <c>transport_module</c> to &mod_add_transport;
must implement the interface documented here.
The interface consists of a function with which
diameter starts a transport process and a message interface with which
@@ -54,14 +58,14 @@ parent).</p>
<funcs>
<func>
-<name>Mod:start({Type, Ref}, Svc, Opts)
+<name>Mod:start({Type, Ref}, Svc, Config)
-> {ok, Pid} | {ok, Pid, LAddrs} | {error, Reason}</name>
<fsummary>Start a transport process.</fsummary>
<type>
<v>Type = connect | accept</v>
-<v>Ref = <seealso marker="diameter#transport_ref">diameter:transport_ref()</seealso></v>
+<v>Ref = &mod_transport_ref;</v>
<v>Svc = #diameter_service{}</v>
-<v>Opts = term()</v>
+<v>Config = term()</v>
<v>Pid = pid()</v>
<v>LAddrs = [<seealso marker="kernel:inet#type-ip_address">inet:ip_address()</seealso>]</v>
<v>Reason = term()</v>
@@ -69,44 +73,47 @@ parent).</p>
<desc>
<p>
Start a transport process.
-Called by diameter as a consequence of a call to <seealso
-marker="diameter#add_transport">diameter:add_transport/2</seealso> in
+Called by diameter as a consequence of a call to &mod_add_transport; in
order to establish or accept a transport connection respectively.
A transport process maintains a connection with a single remote peer.</p>
<p>
-The first argument indicates whether the transport process in question
+<c>Type</c> indicates whether the transport process in question
is being started for a connecting (<c>connect</c>) or listening
(<c>accept</c>) transport.
In the latter case, transport processes are started as required to
-accept connections from multiple peers.
-Ref is in each case the same value that was returned from the
-call to <seealso
-marker="diameter#add_transport">diameter:add_transport/2</seealso>
+accept connections from multiple peers.</p>
+
+<p>
+Ref is the value that was returned from the call to &mod_add_transport;
that has lead to starting of a transport process.</p>
<p>
-A transport process must implement the message interface documented below.
-It should retain the pid of its parent, monitor the parent and terminate if
-it dies.
-It should not link to the parent.
-It should exit if its transport connection with its peer is lost.</p>
+<c>Svc</c> contains the capabilities passed to &mod_start_service; and
+&mod_add_transport;,
+values passed to the latter overriding those passed to the former.</p>
+
+<p>
+<c>Config</c> is as passed in <c>transport_config</c> tuple in the
+&mod_transport_opt;
+list passed to &mod_add_transport;.</p>
<p>
-The capabilities in the <c>#diameter_service{}</c> record are as
-passed to <seealso
-marker="diameter#start_service">diameter:start_service/2</seealso> and
-<seealso
-marker="diameter#add_transport">diameter:add_transport/2</seealso>,
-values passed to the latter overriding those passed to the former.
The start function should use the <c>Host-IP-Address</c> list and/or
-<c>Opts</c> to select an appropriate list of local IP addresses,
+<c>Config</c> to select an appropriate list of local IP addresses,
and should return this list if different from the
<c>#diameter_service{}</c> addresses.
The returned list is used to populate <c>Host-IP-Address</c> AVPs in
outgoing capabilities exchange messages, the
<c>#diameter_service{}</c> addresses being used otherwise.</p>
+<p>
+A transport process must implement the message interface documented below.
+It should retain the pid of its parent, monitor the parent and terminate if
+it dies.
+It should not link to the parent.
+It should exit if its transport connection with its peer is lost.</p>
+
<marker id="MESSAGES"/>
</desc>
</func>
@@ -141,9 +148,9 @@ contains the binary to send.</p>
<tag><c>{diameter, {close, Pid}}</c></tag>
<item>
<p>
-A request to close the transport connection.
-The transport process should terminate after closing the
-connection.
+A request to terminate the transport process after having received DPA
+in response to DPR.
+The transport process should exit.
<c>Pid</c> is the pid() of the parent process.</p>
</item>
@@ -237,8 +244,7 @@ A transport must exit if a handshake is not successful.</p>
<title>SEE ALSO</title>
<p>
-<seealso marker="diameter_tcp">diameter_tcp(3)</seealso>,
-<seealso marker="diameter_sctp">diameter_sctp(3)</seealso></p>
+&man_tcp;, &man_sctp;</p>
</section>
diff --git a/lib/diameter/doc/src/files.mk b/lib/diameter/doc/src/files.mk
index 79d53abceb..89ec1031e6 100644
--- a/lib/diameter/doc/src/files.mk
+++ b/lib/diameter/doc/src/files.mk
@@ -2,7 +2,7 @@
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2010-2011. All Rights Reserved.
+# Copyright Ericsson AB 2010-2012. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
diff --git a/lib/diameter/doc/src/notes.xml b/lib/diameter/doc/src/notes.xml
index e57958ac09..b89d84a4f6 100644
--- a/lib/diameter/doc/src/notes.xml
+++ b/lib/diameter/doc/src/notes.xml
@@ -1,11 +1,17 @@
<?xml version="1.0" encoding="latin1" ?>
-<!DOCTYPE chapter SYSTEM "chapter.dtd">
+<!DOCTYPE chapter SYSTEM "chapter.dtd" [
+ <!ENTITY % also SYSTEM "seealso.ent" >
+ <!ENTITY % here SYSTEM "seehere.ent" >
+ %also;
+ %here;
+]>
<chapter>
<header>
<copyright>
<year>2011</year>
+<year>2012</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -136,6 +142,33 @@ first.</p>
</section>
+<section><title>Diameter 1.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Fix fault in sending of 'closed' events.</p>
+ <p>
+ The fault made it possible for the 'closed' event not to
+ be sent following a failed capabilities exchange.</p>
+ <p>
+ Own Id: OTP-9824</p>
+ </item>
+ <item>
+ <p>
+ Fix faulty diameterc -name/-prefix.</p>
+ <p>
+ A minor blunder when introducing the new dictionary
+ parser in diameter-1.0 broke these options.</p>
+ <p>
+ Own Id: OTP-9826</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Diameter 1.0</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -395,8 +428,7 @@ Known issues or limitations:</p>
Some agent-related functionality is not entirely complete.
In particular, support for proxy agents, that advertise specific
Diameter applications but otherwise relay messages in much the same
-way as relay agents (for which a <seealso
-marker="diameter_app#handle_request">handle_request/3</seealso>
+way as relay agents (for which a &handle_request;
callback can return a <c>relay</c> tuple), will be completed in an
upcoming release.
There may also be more explicit support for redirect agents, although
@@ -428,8 +460,7 @@ could likely be expanded upon.</p>
<item>
<p>
-The function <seealso
-marker="diameter#service_info">diameter:service_info/2</seealso>
+The function &service_info;
can be used to retrieve information about a started service
(statistics, information about connected peers, etc) but
this is not yet documented and both the input and output may change
diff --git a/lib/diameter/doc/src/seealso.ent b/lib/diameter/doc/src/seealso.ent
new file mode 100644
index 0000000000..6f67630220
--- /dev/null
+++ b/lib/diameter/doc/src/seealso.ent
@@ -0,0 +1,112 @@
+<?xml version="1.0" encoding="iso-8859-1" ?>
+
+<!--
+
+%CopyrightBegin%
+
+Copyright Ericsson AB 2012. All Rights Reserved.
+
+The contents of this file are subject to the Erlang Public License,
+Version 1.1, (the "License"); you may not use this file except in
+compliance with the License. You should have received a copy of the
+Erlang Public License along with this software. If not, it can be
+retrieved online at http://www.erlang.org/.
+
+Software distributed under the License is distributed on an "AS IS"
+basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+the License for the specific language governing rights and limitations
+under the License.
+
+%CopyrightEnd%
+
+-->
+
+<!--
+
+Entities for cross references: less to type, easier to read and
+less error prone.
+
+Additional intra-document entities are generated by seehere.sed.
+That each definition is on a single (hideously long) line is
+significant.
+
+-->
+
+<!-- diameter -->
+
+<!ENTITY mod_add_transport '<seealso marker="diameter#add_transport-2">diameter:add_transport/2</seealso>'>
+<!ENTITY mod_call '<seealso marker="diameter#call-4">diameter:call/4</seealso>'>
+<!ENTITY mod_origin_state_id '<seealso marker="diameter#origin_state_id-0">diameter:origin_state_id/0</seealso>'>
+<!ENTITY mod_remove_transport '<seealso marker="diameter#remove_transport-2">diameter:remove_transport/2</seealso>'>
+<!ENTITY mod_service_info '<seealso marker="diameter#service_info-2">diameter:service_info/2</seealso>'>
+<!ENTITY mod_services '<seealso marker="diameter#services-0">diameter:services/0</seealso>'>
+<!ENTITY mod_start_service '<seealso marker="diameter#start_service-2">diameter:start_service/2</seealso>'>
+<!ENTITY mod_stop_service '<seealso marker="diameter#stop_service-1">diameter:stop_service/1</seealso>'>
+<!ENTITY mod_subscribe '<seealso marker="diameter#subscribe-1">diameter:subscribe/1</seealso>'>
+
+<!ENTITY mod_application_alias '<seealso marker="diameter#application_alias">diameter:application_alias()</seealso>'>
+<!ENTITY mod_application_module '<seealso marker="diameter#application_module">diameter:application_module()</seealso>'>
+<!ENTITY mod_application_opt '<seealso marker="diameter#application_opt">diameter:application_opt()</seealso>'>
+<!ENTITY mod_call_opt '<seealso marker="diameter#call_opt">diameter:call_opt()</seealso>'>
+<!ENTITY mod_capability '<seealso marker="diameter#capability">diameter:capability()</seealso>'>
+<!ENTITY mod_evaluable '<seealso marker="diameter#evaluable">diameter:evaluable()</seealso>'>
+<!ENTITY mod_peer_filter '<seealso marker="diameter#peer_filter">diameter:peer_filter()</seealso>'>
+<!ENTITY mod_service_event '<seealso marker="diameter#service_event">diameter:service_event()</seealso>'>
+<!ENTITY mod_service_name '<seealso marker="diameter#service_name">diameter:service_name()</seealso>'>
+<!ENTITY mod_service_opt '<seealso marker="diameter#service_opt">diameter:service_opt()</seealso>'>
+<!ENTITY mod_transport_opt '<seealso marker="diameter#transport_opt">diameter:transport_opt()</seealso>'>
+<!ENTITY mod_transport_ref '<seealso marker="diameter#transport_ref">diameter:transport_ref()</seealso>'>
+
+<!ENTITY capabilities_cb '<seealso marker="#capabilities_cb">capabilities_cb</seealso>'>
+<!ENTITY capx_timeout '<seealso marker="#capx_timeout">capx_timeout</seealso>'>
+<!ENTITY disconnect_cb '<seealso marker="#disconnect_cb">disconnect_cb</seealso>'>
+<!ENTITY transport_config '<seealso marker="#transport_config">transport_config</seealso>'>
+<!ENTITY transport_module '<seealso marker="#transport_module">transport_module</seealso>'>
+<!ENTITY reconnect_timer '<seealso marker="#reconnect_timer">reconnect_timer</seealso>'>
+<!ENTITY watchdog_timer '<seealso marker="#watchdog_timer">watchdog_timer</seealso>'>
+
+<!-- diameter_app -->
+
+<!ENTITY app_handle_answer '<seealso marker="diameter_app#Mod:handle_answer-4">handle_answer/4</seealso>'>
+<!ENTITY app_handle_request '<seealso marker="diameter_app#Mod:handle_request-3">handle_request/3</seealso>'>
+<!ENTITY app_handle_error '<seealso marker="diameter_app#Mod:handle_error-4">handle_error/4</seealso>'>
+<!ENTITY app_peer_down '<seealso marker="diameter_app#Mod:peer_down-3">peer_up/3</seealso>'>
+<!ENTITY app_peer_up '<seealso marker="diameter_app#Mod:peer_up-3">peer_up/3</seealso>'>
+<!ENTITY app_pick_peer '<seealso marker="diameter_app#Mod:pick_peer-4">pick_peer/4</seealso>'>
+<!ENTITY app_prepare_retransmit '<seealso marker="diameter_app#Mod:prepare_retransmit-3">prepare_retransmit/3</seealso>'>
+<!ENTITY app_prepare_request '<seealso marker="diameter_app#Mod:prepare_request-3">prepare_request/3</seealso>'>
+
+<!ENTITY app_capabilities '<seealso marker="diameter_app#capabilities">diameter_app:capabilities()</seealso>'>
+<!ENTITY app_message '<seealso marker="diameter_app#message">diameter_app:message()</seealso>'>
+<!ENTITY app_packet '<seealso marker="diameter_app#packet">diameter_app:packet()</seealso>'>
+<!ENTITY app_peer '<seealso marker="diameter_app#peer">diameter_app:peer()</seealso>'>
+<!ENTITY app_peer_ref '<seealso marker="diameter_app#peer_ref">diameter_app:peer_ref()</seealso>'>
+<!ENTITY app_state '<seealso marker="diameter_app#state">diameter_app:state()</seealso>'>
+
+<!-- diameter_dict -->
+
+<!ENTITY dict_data_types '<seealso marker="diameter_dict#DATA_TYPES">diameter_dict(4)</seealso>'>
+
+<!ENTITY dict_Address '<seealso marker="diameter_dict#DATA_TYPES">Address()</seealso>'>
+<!ENTITY dict_DiameterIdentity '<seealso marker="diameter_dict#DATA_TYPES">DiameterIdentity()</seealso>'>
+<!ENTITY dict_Grouped '<seealso marker="diameter_dict#DATA_TYPES">Grouped()</seealso>'>
+<!ENTITY dict_OctetString '<seealso marker="diameter_dict#DATA_TYPES">OctetString()</seealso>'>
+<!ENTITY dict_Time '<seealso marker="diameter_dict#DATA_TYPES">Time()</seealso>'>
+<!ENTITY dict_UTF8String '<seealso marker="diameter_dict#DATA_TYPES">UTF8String()</seealso>'>
+<!ENTITY dict_Unsigned32 '<seealso marker="diameter_dict#DATA_TYPES">Unsigned32()</seealso>'>
+
+<!-- diameter_transport -->
+
+<!ENTITY transport_start
+ '<seealso marker="diameter_transport#Mod:start-3">start/3</seealso>'>
+
+<!-- reference pages -->
+
+<!ENTITY man_compile '<seealso marker="diameterc">diameterc(1)</seealso>'>
+<!ENTITY man_main '<seealso marker="diameter">diameter(3)</seealso>'>
+<!ENTITY man_app '<seealso marker="diameter_app">diameter_app(3)</seealso>'>
+<!ENTITY man_dict '<seealso marker="diameter_dict">diameter_dict(4)</seealso>'>
+<!ENTITY man_transport
+ '<seealso marker="diameter_transport">diameter_transport(3)</seealso>'>
+<!ENTITY man_sctp '<seealso marker="diameter_sctp">diameter_sctp(3)</seealso>'>
+<!ENTITY man_tcp '<seealso marker="diameter_tcp">diameter_tcp(3)</seealso>'>
diff --git a/lib/diameter/doc/src/seehere.sed b/lib/diameter/doc/src/seehere.sed
new file mode 100644
index 0000000000..c62a783d40
--- /dev/null
+++ b/lib/diameter/doc/src/seehere.sed
@@ -0,0 +1,35 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2012. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+
+#
+# Map entities for inter-document references to ones for
+# intra-document references like this:
+#
+# <!ENTITY aaa_xxx '<seealso marker="bbb#yyy">ccc:zzz</seealso>'>
+#
+# ===>
+#
+# <!ENTITY xxx '<seealso marker="#yyy">zzz</seealso>'>
+#
+
+/<!ENTITY/!d
+/#/!d
+/"#/d
+s@ [^_]*_@ @
+s@"[^#]*#@"#@
+s@>[^:]*:@>@
diff --git a/lib/diameter/make/release_targets.mk b/lib/diameter/make/release_targets.mk
deleted file mode 100644
index 5a3b585cbc..0000000000
--- a/lib/diameter/make/release_targets.mk
+++ /dev/null
@@ -1,92 +0,0 @@
-#
-# %CopyrightBegin%
-#
-# Copyright Ericsson AB 1997-2011. All Rights Reserved.
-#
-# The contents of this file are subject to the Erlang Public License,
-# Version 1.1, (the "License"); you may not use this file except in
-# compliance with the License. You should have received a copy of the
-# Erlang Public License along with this software. If not, it can be
-# retrieved online at http://www.erlang.org/.
-#
-# Software distributed under the License is distributed on an "AS IS"
-# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-# the License for the specific language governing rights and limitations
-# under the License.
-#
-# %CopyrightEnd%
-#
-
-ifeq ($(TOPDOC),)
-$(HTMLDIR)/index.html: $(XML_FILES)
- date=`date +"%B %e %Y"`; \
- $(XSLTPROC) --noout --stringparam outdir $(HTMLDIR) --stringparam docgen "$(DOCGEN)" --stringparam topdocdir "$(TOPDOCDIR)" \
- --stringparam pdfdir "$(PDFDIR)" \
- --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" --stringparam appver "$(VSN)" --xinclude \
- -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_html_entities $(DOCGEN)/priv/xsl/db_html.xsl book.xml
-endif
-
-$(HTMLDIR)/users_guide.html: $(XML_FILES)
- date=`date +"%B %e %Y"`; \
- $(XSLTPROC) --noout --stringparam outdir $(HTMLDIR) --stringparam docgen "$(DOCGEN)" --stringparam topdocdir "$(TOPDOCDIR)" \
- --stringparam pdfdir "$(PDFDIR)" \
- --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" --stringparam appver "$(VSN)" --xinclude \
- -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_html_entities $(DOCGEN)/priv/xsl/db_html.xsl book.xml
-
-
-%.fo: $(XML_FILES)
- date=`date +"%B %e %Y"`; \
- $(XSLTPROC) --stringparam docgen "$(DOCGEN)" --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" \
- --stringparam appver "$(VSN)" --xinclude \
- -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_html_entities $(DOCGEN)/priv/xsl/db_pdf.xsl book.xml > $@
-
-
-
-# ------------------------------------------------------------------------
-# The following targets just exist in the documentation directory
-# ------------------------------------------------------------------------
-ifneq ($(XML_FILES),)
-
-# ----------------------------------------------------
-# Generation of application index data
-# ----------------------------------------------------
-$(HTMLDIR)/$(APPLICATION).eix: $(XML_FILES)
- date=`date +"%B %e %Y"`; \
- $(XSLTPROC) --stringparam docgen "$(DOCGEN)" \
- --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" --stringparam appver "$(VSN)" --xinclude \
- -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_html_entities $(DOCGEN)/priv/xsl/db_eix.xsl book.xml > $@
-
-docs:
-#docs: $(HTMLDIR)/$(APPLICATION).eix
-
-# ----------------------------------------------------
-# Local documentation target for testing
-# ----------------------------------------------------
-local_docs: TOPDOCDIR=.
-local_docs: local_copy_of_topdefs docs
-
-local_html: TOPDOCDIR=.
-local_html: local_copy_of_topdefs html
-
-local_copy_of_topdefs:
- $(INSTALL) $(DOCGEN)/priv/css/otp_doc.css $(HTMLDIR)
- $(INSTALL) $(DOCGEN)/priv/images/erlang-logo.png $(HTMLDIR)
- $(INSTALL) $(DOCGEN)/priv/images/erlang-logo.gif $(HTMLDIR)
- $(INSTALL_DIR) $(HTMLDIR)/js/flipmenu
- $(INSTALL) $(DOCGEN)/priv/js/flipmenu/flip_closed.gif \
- $(DOCGEN)/priv/js/flipmenu/flip_open.gif \
- $(DOCGEN)/priv/js/flipmenu/flip_static.gif \
- $(DOCGEN)/priv/js/flipmenu/flipmenu.js $(HTMLDIR)/js/flipmenu
-
-endif
-
-# ----------------------------------------------------
-# Standard release target
-# ----------------------------------------------------
-
-ifneq ($(PREFIX),)
-
-release release_docs release_tests release_html:
- $(MAKE) $(MFLAGS) RELEASE_PATH=$(PREFIX) $(TARGET_MAKEFILE) $@_spec
-
-endif
diff --git a/lib/diameter/make/rules.mk.in b/lib/diameter/make/rules.mk.in
deleted file mode 100644
index cd3c297d75..0000000000
--- a/lib/diameter/make/rules.mk.in
+++ /dev/null
@@ -1,193 +0,0 @@
-#-*-makefile-*- ; force emacs to enter makefile-mode
-# ----------------------------------------------------
-# %CopyrightBegin%
-#
-# Copyright Ericsson AB 2009-2011. All Rights Reserved.
-#
-# The contents of this file are subject to the Erlang Public License,
-# Version 1.1, (the "License"); you may not use this file except in
-# compliance with the License. You should have received a copy of the
-# Erlang Public License along with this software. If not, it can be
-# retrieved online at http://www.erlang.org/.
-#
-# Software distributed under the License is distributed on an "AS IS"
-# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-# the License for the specific language governing rights and limitations
-# under the License.
-#
-# %CopyrightEnd%
-
-.SUFFIXES: .erl .beam .yrl .hrl .xml .xmlsrc .html \
- .3 .1 .pdf .fo .el .elc
-
-# ----------------------------------------------------
-# Common macros
-# ----------------------------------------------------
-DEFAULT_TARGETS = opt debug release release_docs clean docs
-
-
-# Slash separated list of return values from $(origin VAR)
-# that are untrusted - set default in this file instead.
-# The list is not space separated since some return values
-# contain space, and we want to use $(findstring ...) to
-# search the list.
-DUBIOUS_ORIGINS = /undefined/environment/
-
-
-# # ----------------------------------------------------
-# # TARGET definition
-# # ----------------------------------------------------
-# # TARGET = @TARGET@
-# ifneq ($(OVERRIDE_TARGET),)
-# ifneq ($(TARGET), $(OVERRIDE_TARGET))
-# $(warning overriding $$(TARGET) = \
-# "$(TARGET)" \
-# with \
-# $$(OVERRIDE_TARGET) = \
-# "$(OVERRIDE_TARGET)")
-# override TARGET := $(OVERRIDE_TARGET)
-# endif
-# endif
-#
-
-# ----------------------------------------------------
-# Command macros
-# ----------------------------------------------------
-PREFIX = @prefix@
-INSTALL = @INSTALL@
-INSTALL_DIR = @INSTALL_DIR@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_DATA = @INSTALL_DATA@
-
-
-# ----------------------------------------------------
-# Erlang language section
-# ----------------------------------------------------
-ERL_ROOT_DIR = @ERLANG_ROOT_DIR@
-ERL_LIB_DIR = @ERLANG_LIB_DIR@
-DOCGEN_DIR = @ERLANG_LIB_DIR_erl_docgen@
-TEST_SERVER_DIR = @ERLANG_LIB_VER_test_server@
-EMULATOR = beam
-ERL_COMPILE_FLAGS += +debug_info
-ERLC_WFLAGS = -W
-ERLC = $(ERL_ROOT_DIR)/bin/erlc $(ERLC_WFLAGS) $(ERLC_FLAGS)
-ERL = $(ERL_ROOT_DIR)/bin/erl -boot start_clean
-#ERLC = @ERLC@ $(ERLC_WFLAGS) $(ERLC_FLAGS)
-#ERL = @ERL@ -boot start_clean
-
-ifneq (,$(findstring $(origin EBIN),$(DUBIOUS_ORIGINS)))
-EBIN = ../../ebin
-endif
-
-# Generated (non ebin) files...
-ifneq (,$(findstring $(origin EGEN),$(DUBIOUS_ORIGINS)))
-EGEN = .
-endif
-
-ifneq (,$(findstring $(origin ESRC),$(DUBIOUS_ORIGINS)))
-ESRC = .
-endif
-
-$(EBIN)/%.beam: $(EGEN)/%.erl
- $(ERLC) $(ERL_COMPILE_FLAGS) -o$(EBIN) $<
-
-$(EBIN)/%.beam: $(ESRC)/%.erl
- $(ERLC) $(ERL_COMPILE_FLAGS) -o$(EBIN) $<
-
-.erl.beam:
- $(ERLC) $(ERL_COMPILE_FLAGS) -o$(dir $@) $<
-
-
-#
-# When .erl files are automatically created GNU make removes them if
-# they were the result of a chain of implicit rules. To prevent this
-# we say that all .erl files are "precious".
-#
-.PRECIOUS: %.erl %.fo
-
-
-# ----------------------------------------------------
-# Documentation section
-# ----------------------------------------------------
-# export VSN
-
-# TOPDOCDIR=../../../../doc
-
-DOCDIR = ..
-
-PDFDIR=$(DOCDIR)/pdf
-
-HTMLDIR = $(DOCDIR)/html
-
-MAN1DIR = $(DOCDIR)/man1
-MAN2DIR = $(DOCDIR)/man2
-MAN3DIR = $(DOCDIR)/man3
-MAN4DIR = $(DOCDIR)/man4
-MAN6DIR = $(DOCDIR)/man6
-MAN9DIR = $(DOCDIR)/man9
-
-# HTML & GIF files that always are generated and must be delivered
-XML_COLL_FILES = $(XML_APPLICATION_FILES) $(XML_PART_FILES)
-DEFAULT_HTML_FILES = \
- $(XML_COLL_FILES:%.xml=$(HTMLDIR)/%_frame.html) \
- $(XML_COLL_FILES:%.xml=$(HTMLDIR)/%_first.html) \
- $(XML_COLL_FILES:%.xml=$(HTMLDIR)/%_term.html) \
- $(XML_COLL_FILES:%.xml=$(HTMLDIR)/%_cite.html) \
- $(XML_APPLICATION_FILES:%.xml=$(HTMLDIR)/%_index.html) \
- $(XML_APPLICATION_FILES:%.xml=$(HTMLDIR)/%.kwc) \
- $(HTMLDIR)/index.html
-
-DEFAULT_GIF_FILES = $(HTMLDIR)/min_head.gif
-
-#
-# Flags & Commands
-#
-XSLTPROC = @XSLTPROC@
-FOP = @FOP@
-
-DOCGEN=$(DOCGEN_DIR)
-
-$(MAN1DIR)/%.1:: %.xml
- date=`date +"%B %e %Y"`; \
- xsltproc --output "$@" --stringparam company "Ericsson AB" --stringparam docgen "$(DOCGEN)" --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" --stringparam appver "$(VSN)" --xinclude -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_man_entities $(DOCGEN)/priv/xsl/db_man.xsl $<
-
-
-$(MAN2DIR)/%.2:: %.xml
- date=`date +"%B %e %Y"`; \
- xsltproc --output "$@" --stringparam company "Ericsson AB" --stringparam docgen "$(DOCGEN)" --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" --stringparam appver "$(VSN)" --xinclude -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_man_entities $(DOCGEN)/priv/xsl/db_man.xsl $<
-
-
-$(MAN3DIR)/%.3:: %.xml
- date=`date +"%B %e %Y"`; \
- xsltproc --output "$@" --stringparam company "Ericsson AB" --stringparam docgen "$(DOCGEN)" --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" --stringparam appver "$(VSN)" --xinclude -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_man_entities $(DOCGEN)/priv/xsl/db_man.xsl $<
-
-# left for compatibility
-$(MAN4DIR)/%.4:: %.xml
- date=`date +"%B %e %Y"`; \
- xsltproc --output "$@" --stringparam company "Ericsson AB" --stringparam docgen "$(DOCGEN)" --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" --stringparam appver "$(VSN)" --xinclude -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_man_entities $(DOCGEN)/priv/xsl/db_man.xsl $<
-
-$(MAN4DIR)/%.5:: %.xml
- date=`date +"%B %e %Y"`; \
- xsltproc --output "$@" --stringparam company "Ericsson AB" --stringparam docgen "$(DOCGEN)" --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" --stringparam appver "$(VSN)" --xinclude -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_man_entities $(DOCGEN)/priv/xsl/db_man.xsl $<
-
-# left for compatibility
-$(MAN6DIR)/%.6:: %_app.xml
- date=`date +"%B %e %Y"`; \
- xsltproc --output "$@" --stringparam company "Ericsson AB" --stringparam docgen "$(DOCGEN)" --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" --stringparam appver "$(VSN)" --xinclude -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_man_entities $(DOCGEN)/priv/xsl/db_man.xsl $<
-
-$(MAN6DIR)/%.7:: %_app.xml
- date=`date +"%B %e %Y"`; \
- xsltproc --output "$@" --stringparam company "Ericsson AB" --stringparam docgen "$(DOCGEN)" --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" --stringparam appver "$(VSN)" --xinclude -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_man_entities $(DOCGEN)/priv/xsl/db_man.xsl $<
-
-$(MAN9DIR)/%.9:: %.xml
- date=`date +"%B %e %Y"`; \
- xsltproc --output "$@" --stringparam company "Ericsson AB" --stringparam docgen "$(DOCGEN)" --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" --stringparam appver "$(VSN)" --xinclude -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_man_entities $(DOCGEN)/priv/xsl/db_man.xsl $<
-
-
-.xmlsrc.xml:
- escript $(DOCGEN)/priv/bin/codeline_preprocessing.escript $< $@
-
-.fo.pdf:
- $(FOP) -fo $< -pdf $@
-
diff --git a/lib/diameter/make/subdir.mk b/lib/diameter/make/subdir.mk
deleted file mode 100644
index 24b08080ae..0000000000
--- a/lib/diameter/make/subdir.mk
+++ /dev/null
@@ -1,53 +0,0 @@
-#
-# %CopyrightBegin%
-#
-# Copyright Ericsson AB 1997-2011. All Rights Reserved.
-#
-# The contents of this file are subject to the Erlang Public License,
-# Version 1.1, (the "License"); you may not use this file except in
-# compliance with the License. You should have received a copy of the
-# Erlang Public License along with this software. If not, it can be
-# retrieved online at http://www.erlang.org/.
-#
-# Software distributed under the License is distributed on an "AS IS"
-# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-# the License for the specific language governing rights and limitations
-# under the License.
-#
-# %CopyrightEnd%
-#
-# Make include file for otp
-
-.PHONY: debug opt release docs release_docs tests release_tests \
- clean depend valgrind
-
-#
-# Targets that don't affect documentation directories
-#
-opt debug release docs release_docs tests release_tests clean depend valgrind:
- @set -e ; \
- app_pwd=`pwd` ; \
- if test -f vsn.mk; then \
- echo "=== Entering application" `basename $$app_pwd` ; \
- fi ; \
- case "$(MAKE)" in *clearmake*) tflag="-T";; *) tflag="";; esac; \
- for d in $(SUB_DIRS); do \
- if test -f $$d/SKIP ; then \
- echo "=== Skipping subdir $$d, reason:" ; \
- cat $$d/SKIP ; \
- echo "===" ; \
- else \
- if test ! -d $$d ; then \
- echo "=== Skipping subdir $$d, it is missing" ; \
- else \
- xflag="" ; \
- if test -f $$d/ignore_config_record.inf; then \
- xflag=$$tflag ; \
- fi ; \
- (cd $$d && $(MAKE) $$xflag $@) || exit $$? ; \
- fi ; \
- fi ; \
- done ; \
- if test -f vsn.mk; then \
- echo "=== Leaving application" `basename $$app_pwd` ; \
- fi
diff --git a/lib/diameter/make/target.mk b/lib/diameter/make/target.mk
deleted file mode 100644
index 4ae470b9e2..0000000000
--- a/lib/diameter/make/target.mk
+++ /dev/null
@@ -1,33 +0,0 @@
-ifeq ($(OVERRIDE_TARGET),)
-
-ifeq ($(TARGET),)
-
-TARGET := $(shell $(DIAMETER_TOP)/autoconf/config.guess)
-
-else
-
-endif
-
-else
-
-ifneq ($(TARGET),)
-
-ifneq ($(TARGET), $(OVERRIDE_TARGET))
-$(warning overriding $$(TARGET) = \
- "$(TARGET)" \
- with \
- $$(OVERRIDE_TARGET) = \
- "$(OVERRIDE_TARGET)")
-else
-endif
-
-override TARGET := $(OVERRIDE_TARGET)
-
-else
-
-TARGET := $(OVERRIDE_TARGET)
-
-endif
-
-endif
-
diff --git a/lib/diameter/src/Makefile b/lib/diameter/src/Makefile
index 99c343275b..060659bce9 100644
--- a/lib/diameter/src/Makefile
+++ b/lib/diameter/src/Makefile
@@ -16,13 +16,8 @@
#
# %CopyrightEnd%
-ifeq ($(ERL_TOP),)
-include $(DIAMETER_TOP)/make/target.mk
-include $(DIAMETER_TOP)/make/$(TARGET)/rules.mk
-else
include $(ERL_TOP)/make/target.mk
include $(ERL_TOP)/make/$(TARGET)/otp.mk
-endif
# ----------------------------------------------------
# Application version
@@ -210,11 +205,7 @@ dialyze: opt $(PLT)
# Release targets
# ----------------------------------------------------
-ifeq ($(ERL_TOP),)
-include $(DIAMETER_TOP)/make/release_targets.mk
-else
include $(ERL_TOP)/make/otp_release_targets.mk
-endif
# Can't $(INSTALL_DIR) more than one directory at a time on Solaris.
diff --git a/lib/diameter/src/base/diameter.appup.src b/lib/diameter/src/base/diameter.appup.src
index 9b2a7d18ab..5655f98c1b 100644
--- a/lib/diameter/src/base/diameter.appup.src
+++ b/lib/diameter/src/base/diameter.appup.src
@@ -20,30 +20,38 @@
{"%VSN%",
[
- {"0.9", [{restart_application, diameter}]},
- {"0.10", [{restart_application, diameter}]},
- {"1.0", [{restart_application, diameter}]},
- {"1.1", [%% new code
- {add_module, diameter_transport},
- %% modified code
- {load, diameter_sctp},
- {load, diameter_stats},
- {load, diameter_service},
- {load, diameter_config},
- {load, diameter_codec},
- {load, diameter_watchdog},
- {load, diameter_peer},
- {load, diameter_peer_fsm},
- {load, diameter},
- %% unmodified but including modified diameter.hrl
- {load, diameter_callback},
- {load, diameter_capx},
- {load, diameter_types}]}
+ {"0.9", [{restart_application, diameter}]},
+ {"0.10", [{restart_application, diameter}]},
+ {"1.0", [{restart_application, diameter}]},
+ {"1.1", [{restart_application, diameter}]},
+ {"1.2", [{load, diameter},
+ {load, diameter_capx},
+ {load, diameter_codec},
+ {load, diameter_peer},
+ {load, diameter_reg},
+ %% order significant from here
+ {load, diameter_session},
+ {load, diameter_peer_fsm},
+ {load, diameter_service},
+ {load, diameter_watchdog},
+ {load, diameter_config}]},
+ {"1.2.1", [{load, diameter},
+ {load, diameter_capx},
+ {load, diameter_peer},
+ {load, diameter_reg},
+ %% order significant from here
+ {load, diameter_session},
+ {load, diameter_peer_fsm},
+ {load, diameter_service},
+ {load, diameter_watchdog},
+ {load, diameter_config}]}
],
[
- {"0.9", [{restart_application, diameter}]},
- {"0.10", [{restart_application, diameter}]},
- {"1.0", [{restart_application, diameter}]},
- {"1.1", [{restart_application, diameter}]}
+ {"0.9", [{restart_application, diameter}]},
+ {"0.10", [{restart_application, diameter}]},
+ {"1.0", [{restart_application, diameter}]},
+ {"1.1", [{restart_application, diameter}]},
+ {"1.2", [{restart_application, diameter}]},
+ {"1.2.1", [{restart_application, diameter}]}
]
}.
diff --git a/lib/diameter/src/base/diameter.erl b/lib/diameter/src/base/diameter.erl
index 4f90b741ae..8f9901907a 100644
--- a/lib/diameter/src/base/diameter.erl
+++ b/lib/diameter/src/base/diameter.erl
@@ -44,6 +44,8 @@
stop/0]).
-export_type([evaluable/0,
+ restriction/0,
+ sequence/0,
app_alias/0,
service_name/0,
capability/0,
@@ -280,11 +282,23 @@ call(SvcName, App, Message) ->
| fun()
| maybe_improper_list(evaluable(), list()).
+-type sequence()
+ :: {'Unsigned32'(), 0..32}.
+
+-type restriction()
+ :: false
+ | node
+ | nodes
+ | [node()]
+ | evaluable().
+
%% Options passed to start_service/2
-type service_opt()
:: capability()
- | {application, [application_opt()]}.
+ | {application, [application_opt()]}
+ | {restrict_connections, restriction()}
+ | {sequence, sequence() | evaluable()}.
-type application_opt()
:: {alias, app_alias()}
@@ -316,6 +330,8 @@ call(SvcName, App, Message) ->
| {applications, [app_alias()]}
| {capabilities, [capability()]}
| {capabilities_cb, evaluable()}
+ | {capx_timeout, 'Unsigned32'()}
+ | {disconnect_cb, evaluable()}
| {watchdog_timer, 'Unsigned32'() | {module(), atom(), list()}}
| {reconnect_timer, 'Unsigned32'()}
| {private, any()}.
diff --git a/lib/diameter/src/base/diameter_capx.erl b/lib/diameter/src/base/diameter_capx.erl
index 6c4d60ee9b..190d37262b 100644
--- a/lib/diameter/src/base/diameter_capx.erl
+++ b/lib/diameter/src/base/diameter_capx.erl
@@ -141,7 +141,9 @@ cap('Host-IP-Address', Vs)
when is_list(Vs) ->
lists:map(fun ipaddr/1, Vs);
-cap('Firmware-Revision', V) ->
+cap(K, V)
+ when K == 'Firmware-Revision';
+ K == 'Origin-State-Id' ->
[V];
cap(_, Vs)
@@ -149,7 +151,7 @@ cap(_, Vs)
Vs;
cap(K, V) ->
- ?THROW({invalid, K, V}).
+ ?THROW({invalid, {K,V}}).
ipaddr(A) ->
try
diff --git a/lib/diameter/src/base/diameter_codec.erl b/lib/diameter/src/base/diameter_codec.erl
index 421e280422..a94d37f7a8 100644
--- a/lib/diameter/src/base/diameter_codec.erl
+++ b/lib/diameter/src/base/diameter_codec.erl
@@ -333,6 +333,9 @@ decode_header(_) ->
%% wraparound counter. The 8-bit counter is incremented each time the
%% system is restarted.
+sequence_numbers({_,_} = T) ->
+ T;
+
sequence_numbers(#diameter_packet{bin = Bin})
when is_binary(Bin) ->
sequence_numbers(Bin);
diff --git a/lib/diameter/src/base/diameter_config.erl b/lib/diameter/src/base/diameter_config.erl
index e47f63f814..63d28f25a2 100644
--- a/lib/diameter/src/base/diameter_config.erl
+++ b/lib/diameter/src/base/diameter_config.erl
@@ -97,6 +97,9 @@
-record(monitor, {mref = make_ref() :: reference(),
service}). %% name
+%% The default sequence mask.
+-define(NOMASK, {0,32}).
+
%% Time to lay low before restarting a dead service.
-define(RESTART_SLEEP, 2000).
@@ -549,9 +552,11 @@ make_config(SvcName, Opts) ->
ok = encode_CER(COpts),
- Os = split(Opts, [{[fun erlang:is_boolean/1], false, share_peers},
- {[fun erlang:is_boolean/1], false, use_shared_peers},
- {[fun erlang:is_pid/1, false], false, monitor}]),
+ Os = split(Opts, fun opt/2, [{false, share_peers},
+ {false, use_shared_peers},
+ {false, monitor},
+ {?NOMASK, sequence},
+ {nodes, restrict_connections}]),
%% share_peers and use_shared_peers are currently undocumented.
#service{name = SvcName,
@@ -559,11 +564,66 @@ make_config(SvcName, Opts) ->
capabilities = Caps},
options = Os}.
+split(Opts, F, Defs) ->
+ [{K, F(K, get_opt(K, Opts, D))} || {D,K} <- Defs].
+
+opt(K, false = B)
+ when K /= sequence ->
+ B;
+
+opt(K, true = B)
+ when K == share_peer;
+ K == use_shared_peers ->
+ B;
+
+opt(monitor, P)
+ when is_pid(P) ->
+ P;
+
+opt(restrict_connections, T)
+ when T == node;
+ T == nodes;
+ T == [];
+ is_atom(hd(T)) ->
+ T;
+
+opt(restrict_connections = K, F) ->
+ try diameter_lib:eval(F) of %% no guarantee that it won't fail later
+ Nodes when is_list(Nodes) ->
+ F;
+ V ->
+ ?THROW({value, {K,V}})
+ catch
+ E:R ->
+ ?THROW({value, {K, E, R, ?STACK}})
+ end;
+
+opt(sequence, {_,_} = T) ->
+ sequence(T);
+
+opt(sequence = K, F) ->
+ try diameter_lib:eval(F) of
+ T -> sequence(T)
+ catch
+ E:R ->
+ ?THROW({value, {K, E, R, ?STACK}})
+ end;
+
+opt(K, _) ->
+ ?THROW({value, K}).
+
+sequence({H,N} = T)
+ when 0 =< N, N =< 32, 0 =< H, 0 == H bsr N ->
+ T;
+
+sequence(_) ->
+ ?THROW({value, sequence}).
+
make_caps(Caps, Opts) ->
case diameter_capx:make_caps(Caps, Opts) of
{ok, T} ->
T;
- {error, {Reason, _}} ->
+ {error, Reason} ->
?THROW(Reason)
end.
@@ -663,21 +723,6 @@ get_opt(Key, List, Def) ->
_ -> ?THROW({arity, Key})
end.
-split(Opts, Defs) ->
- [{K, value(D, Opts)} || {_,_,K} = D <- Defs].
-
-value({Preds, Def, Key}, Opts) ->
- V = get_opt(Key, Opts, Def),
- lists:any(fun(P) -> pred(P,V) end, Preds)
- orelse ?THROW({value, Key}),
- V.
-
-pred(F, V)
- when is_function(F) ->
- F(V);
-pred(T, V) ->
- T == V.
-
cb(M,F) ->
try M:F() of
V -> V
diff --git a/lib/diameter/src/base/diameter_peer.erl b/lib/diameter/src/base/diameter_peer.erl
index 46b2ba9465..1b2f32ddff 100644
--- a/lib/diameter/src/base/diameter_peer.erl
+++ b/lib/diameter/src/base/diameter_peer.erl
@@ -76,7 +76,7 @@ notify(SvcName, T) ->
%%% # start/3
%%% ---------------------------------------------------------------------------
-%% From old code: make is restart.
+%% From old code: make it restart.
start(_T, _Opts, #diameter_service{}) ->
{error, restart}.
diff --git a/lib/diameter/src/base/diameter_peer_fsm.erl b/lib/diameter/src/base/diameter_peer_fsm.erl
index 302540e76b..c4320fcb99 100644
--- a/lib/diameter/src/base/diameter_peer_fsm.erl
+++ b/lib/diameter/src/base/diameter_peer_fsm.erl
@@ -48,43 +48,64 @@
-include("diameter_internal.hrl").
-include("diameter_gen_base_rfc3588.hrl").
+%% Values of Disconnect-Cause in DPR.
-define(GOAWAY, ?'DIAMETER_BASE_DISCONNECT-CAUSE_DO_NOT_WANT_TO_TALK_TO_YOU').
-define(REBOOT, ?'DIAMETER_BASE_DISCONNECT-CAUSE_REBOOTING').
+-define(BUSY, ?'DIAMETER_BASE_DISCONNECT-CAUSE_BUSY').
-define(NO_INBAND_SECURITY, 0).
-define(TLS, 1).
%% Keys in process dictionary.
--define(CB_KEY, cb). %% capabilities callback
--define(DWA_KEY, dwa). %% outgoing DWA
--define(Q_KEY, q). %% transport start queue
--define(START_KEY, start). %% start of connected transport
+-define(CB_KEY, cb). %% capabilities callback
+-define(DPR_KEY, dpr). %% disconnect callback
+-define(DWA_KEY, dwa). %% outgoing DWA
+-define(REF_KEY, ref). %% transport_ref()
+-define(Q_KEY, q). %% transport start queue
+-define(START_KEY, start). %% start of connected transport
+-define(SEQUENCE_KEY, mask). %% mask for sequence numbers
+-define(RESTRICT_KEY, restrict). %% nodes for connection check
+
+%% The default sequence mask.
+-define(NOMASK, {0,32}).
%% A 2xxx series Result-Code. Not necessarily 2001.
-define(IS_SUCCESS(N), 2 == (N) div 1000).
+%% Guards.
+-define(IS_UINT32(N), (is_integer(N) andalso 0 =< N andalso 0 == N bsr 32)).
+-define(IS_TIMEOUT(N), ?IS_UINT32(N)).
+-define(IS_CAUSE(N), N == ?REBOOT; N == rebooting;
+ N == ?GOAWAY; N == goaway;
+ N == ?BUSY; N == busy).
+
%% RFC 3588:
%%
%% Timeout An application-defined timer has expired while waiting
%% for some event.
%%
-define(EVENT_TIMEOUT, 10000).
+%% Default timeout for reception of CER/CEA.
-%% How long to wait for a DPA in response to DPR before simply
-%% aborting. Used to distinguish between shutdown and not but there's
-%% not really any need. Stopping a service will require a timeout if
-%% the peer doesn't answer DPR so the value should be short-ish.
+%% Default timeout for DPA in response to DPR. A bit short but the
+%% timeout used to be hardcoded. (So it could be worse.)
-define(DPA_TIMEOUT, 1000).
+-type uint32() :: diameter:'Unsigned32'().
+
-record(state,
- {state = 'Wait-Conn-Ack' %% state of RFC 3588 Peer State Machine
- :: 'Wait-Conn-Ack' | recv_CER | 'Wait-CEA' | 'Open',
+ {state %% of RFC 3588 Peer State Machine
+ :: 'Wait-Conn-Ack' %% old code
+ | {'Wait-Conn-Ack', uint32()}
+ | recv_CER
+ | 'Wait-CEA' %% old code
+ | {'Wait-CEA', uint32(), uint32()}
+ | 'Open',
mode :: accept | connect | {connect, reference()},
- parent :: pid(),
- transport :: pid(),
+ parent :: pid(), %% watchdog process
+ transport :: pid(), %% transport process
service :: #diameter_service{},
- dpr = false :: false | {diameter:'Unsigned32'(),
- diameter:'Unsigned32'()}}).
+ dpr = false :: false | {uint32(), uint32()}}).
%% | hop by hop and end to end identifiers
%% There are non-3588 states possible as a consequence of 5.6.1 of the
@@ -121,7 +142,10 @@
%%% Output: Pid
%%% ---------------------------------------------------------------------------
--spec start(T, [Opt], #diameter_service{})
+-spec start(T, [Opt], #diameter_service{} %% from old code
+ | {diameter:sequence(),
+ diameter:restriction(),
+ #diameter_service{}})
-> pid()
when T :: {connect|accept, diameter:transport_ref()},
Opt :: diameter:transport_opt().
@@ -131,10 +155,8 @@
%% specified on the transport in question. Check here that the list is
%% still non-empty.
-start({_,_} = Type, Opts, #diameter_service{applications = Apps} = Svc) ->
- [] /= Apps orelse ?ERROR({no_apps, Type, Opts}),
- T = {self(), Type, Opts, Svc},
- {ok, Pid} = diameter_peer_fsm_sup:start_child(T),
+start({_,_} = Type, Opts, MS) ->
+ {ok, Pid} = diameter_peer_fsm_sup:start_child({self(), Type, Opts, MS}),
Pid.
start_link(T) ->
@@ -153,15 +175,28 @@ init(T) ->
proc_lib:init_ack({ok, self()}),
gen_server:enter_loop(?MODULE, [], i(T)).
-i({WPid, T, Opts, #diameter_service{capabilities = Caps} = Svc}) ->
- putr(?DWA_KEY, dwa(Caps)),
+i({WPid, Type, Opts, #diameter_service{} = Svc}) -> %% from old code
+ i({WPid, Type, Opts, {?NOMASK, [node() | nodes()], Svc}});
+
+i({WPid, T, Opts, {Mask, Nodes, #diameter_service{applications = Apps,
+ capabilities = LCaps}
+ = Svc}}) ->
+ [] /= Apps orelse ?ERROR({no_apps, T, Opts}),
+ putr(?DWA_KEY, dwa(LCaps)),
{M, Ref} = T,
diameter_stats:reg(Ref),
- {[Ts], Rest} = proplists:split(Opts, [capabilities_cb]),
- putr(?CB_KEY, {Ref, [F || {_,F} <- Ts]}),
+ {[Cs,Ds], Rest} = proplists:split(Opts, [capabilities_cb, disconnect_cb]),
+ putr(?CB_KEY, {Ref, [F || {_,F} <- Cs]}),
+ putr(?DPR_KEY, [F || {_, F} <- Ds]),
+ putr(?REF_KEY, Ref),
+ putr(?SEQUENCE_KEY, Mask),
+ putr(?RESTRICT_KEY, Nodes),
erlang:monitor(process, WPid),
{TPid, Addrs} = start_transport(T, Rest, Svc),
- #state{parent = WPid,
+ Tmo = proplists:get_value(capx_timeout, Opts, ?EVENT_TIMEOUT),
+ ?IS_TIMEOUT(Tmo) orelse ?ERROR({invalid, {capx_timeout, Tmo}}),
+ #state{state = {'Wait-Conn-Ack', Tmo},
+ parent = WPid,
transport = TPid,
mode = M,
service = svc(Svc, Addrs)}.
@@ -174,8 +209,8 @@ i({WPid, T, Opts, #diameter_service{capabilities = Caps} = Svc}) ->
%% watchdog start (start/2) succeeds regardless so as not to crash the
%% service.
-start_transport(T, Opts, #diameter_service{capabilities = Caps} = Svc) ->
- Addrs0 = Caps#diameter_caps.host_ip_address,
+start_transport(T, Opts, #diameter_service{capabilities = LCaps} = Svc) ->
+ Addrs0 = LCaps#diameter_caps.host_ip_address,
start_transport(Addrs0, {T, Opts, Svc}).
start_transport(Addrs0, T) ->
@@ -198,9 +233,9 @@ svc(Svc, []) ->
svc(Svc, Addrs) ->
readdr(Svc, Addrs).
-readdr(#diameter_service{capabilities = Caps0} = Svc, Addrs) ->
- Caps = Caps0#diameter_caps{host_ip_address = Addrs},
- Svc#diameter_service{capabilities = Caps}.
+readdr(#diameter_service{capabilities = LCaps0} = Svc, Addrs) ->
+ LCaps = LCaps0#diameter_caps{host_ip_address = Addrs},
+ Svc#diameter_service{capabilities = LCaps}.
%% The 4-tuple Data returned from diameter_peer:start/1 identifies the
%% transport module/config use to start the transport process in
@@ -299,13 +334,17 @@ eraser(Key) ->
%% transition/2
+%% Started in old code.
+transition(T, #state{state = 'Wait-Conn-Ack' = PS} = S) ->
+ transition(T, S#state{state = {PS, ?EVENT_TIMEOUT}});
+
%% Connection to peer.
transition({diameter, {TPid, connected, Remote}},
#state{transport = TPid,
state = PS,
mode = M}
= S) ->
- 'Wait-Conn-Ack' = PS, %% assert
+ {'Wait-Conn-Ack', _} = PS, %% assert
connect = M, %%
keep_transport(TPid),
send_CER(S#state{mode = {M, Remote}});
@@ -317,11 +356,11 @@ transition({diameter, {TPid, connected}},
mode = M,
parent = Pid}
= S) ->
- 'Wait-Conn-Ack' = PS, %% assert
+ {'Wait-Conn-Ack', Tmo} = PS, %% assert
accept = M, %%
keep_transport(TPid),
Pid ! {accepted, self()},
- start_timer(S#state{state = recv_CER});
+ start_timer(Tmo, S#state{state = recv_CER});
%% Connection established after receiving a connection_timeout
%% message. This may be followed by an incoming message which arrived
@@ -335,7 +374,7 @@ transition({diameter, {_, connected, _}}, _) ->
%% Connection has timed out: start an alternate.
transition({connection_timeout = T, TPid},
#state{transport = TPid,
- state = 'Wait-Conn-Ack'}
+ state = {'Wait-Conn-Ack', _}}
= S) ->
exit(TPid, {shutdown, T}),
start_next(S);
@@ -350,7 +389,7 @@ transition({diameter, {recv, Pkt}}, S) ->
%% Timeout when still in the same state ...
transition({timeout, PS}, #state{state = PS}) ->
- stop;
+ {stop, {capx(PS), timeout}};
%% ... or not.
transition({timeout, _}, _) ->
@@ -361,25 +400,19 @@ transition({send, Msg}, #state{transport = TPid}) ->
send(TPid, Msg),
ok;
-%% Request for graceful shutdown.
-transition({shutdown, Pid}, #state{parent = Pid, dpr = false} = S) ->
- dpr(?GOAWAY, S);
-transition({shutdown, Pid}, #state{parent = Pid}) ->
- ok;
-
-%% Application shutdown.
-transition(shutdown, #state{dpr = false} = S) ->
- dpr(?REBOOT, S);
-transition(shutdown, _) -> %% DPR already send: ensure expected timeout
- dpa_timer(),
+%% Messages from old (diameter_service) code.
+transition(shutdown = T, #state{parent = Pid} = S) ->
+ transition({T, Pid, service}, S); %% Reason irrelevant: old code has no cb
+
+%% Request for graceful shutdown at remove_transport, stop_service of
+%% application shutdown.
+transition({shutdown = T, Pid}, S) ->
+ transition({T, Pid, transport}, S);
+transition({shutdown, Pid, Reason}, #state{parent = Pid, dpr = false} = S) ->
+ dpr(Reason, S);
+transition({shutdown, Pid, _}, #state{parent = Pid}) ->
ok;
-%% Request to close the transport connection.
-transition({close = T, Pid}, #state{parent = Pid,
- transport = TPid}) ->
- diameter_peer:close(TPid),
- {stop, T};
-
%% DPA reception has timed out.
transition(dpa_timeout, _) ->
stop;
@@ -411,6 +444,11 @@ transition({state, Pid}, #state{state = S, transport = TPid}) ->
%% Crash on anything unexpected.
+capx(recv_CER) ->
+ 'CER';
+capx({'Wait-CEA', _, _}) ->
+ 'CEA'.
+
%% start_next/1
start_next(#state{service = Svc0} = S) ->
@@ -426,18 +464,23 @@ start_next(#state{service = Svc0} = S) ->
%% send_CER/1
-send_CER(#state{mode = {connect, Remote},
- service = #diameter_service{capabilities = Caps},
+send_CER(#state{state = {'Wait-Conn-Ack', Tmo},
+ mode = {connect, Remote},
+ service = #diameter_service{capabilities = LCaps},
transport = TPid}
= S) ->
- OH = Caps#diameter_caps.origin_host,
+ OH = LCaps#diameter_caps.origin_host,
req_send_CER(OH, Remote)
orelse
- close({already_connected, Remote, Caps}, S),
+ close({already_connected, Remote, LCaps}, S),
CER = build_CER(S),
?LOG(send, 'CER'),
- send(TPid, encode(CER)),
- start_timer(S#state{state = 'Wait-CEA'}).
+ #diameter_packet{header = #diameter_header{end_to_end_id = Eid,
+ hop_by_hop_id = Hid}}
+ = Pkt
+ = encode(CER),
+ send(TPid, Pkt),
+ start_timer(Tmo, S#state{state = {'Wait-CEA', Hid, Eid}}).
%% Register ourselves as connecting to the remote endpoint in
%% question. This isn't strictly necessary since a peer implementing
@@ -449,23 +492,36 @@ send_CER(#state{mode = {connect, Remote},
req_send_CER(OriginHost, Remote) ->
register_everywhere({?MODULE, connection, OriginHost, {remote, Remote}}).
-%% start_timer/1
+%% start_timer/2
-start_timer(#state{state = PS} = S) ->
- erlang:send_after(?EVENT_TIMEOUT, self(), {timeout, PS}),
+start_timer(Tmo, #state{state = PS} = S) ->
+ erlang:send_after(Tmo, self(), {timeout, PS}),
S.
%% build_CER/1
-build_CER(#state{service = #diameter_service{capabilities = Caps}}) ->
- {ok, CER} = diameter_capx:build_CER(Caps),
+build_CER(#state{service = #diameter_service{capabilities = LCaps}}) ->
+ {ok, CER} = diameter_capx:build_CER(LCaps),
CER.
%% encode/1
encode(Rec) ->
- #diameter_packet{bin = Bin} = diameter_codec:encode(?BASE, Rec),
- Bin.
+ Seq = diameter_session:sequence(sequence()),
+ Hdr = #diameter_header{version = ?DIAMETER_VERSION,
+ end_to_end_id = Seq,
+ hop_by_hop_id = Seq},
+ diameter_codec:encode(?BASE, #diameter_packet{header = Hdr,
+ msg = Rec}).
+
+sequence() ->
+ case getr(?SEQUENCE_KEY) of
+ {_,_} = Mask ->
+ Mask;
+ undefined -> %% started in old code
+ putr(?SEQUENCE_KEY, ?NOMASK),
+ ?NOMASK
+ end.
%% recv/2
@@ -524,7 +580,14 @@ discard(Reason, F, A) ->
%% rcv/3
%% Incoming CEA.
-rcv('CEA', Pkt, #state{state = 'Wait-CEA'} = S) ->
+rcv('CEA',
+ #diameter_packet{header = #diameter_header{end_to_end_id = Eid,
+ hop_by_hop_id = Hid}}
+ = Pkt,
+ #state{state = {'Wait-CEA' = T, Hid, Eid}}
+ = S) ->
+ handle_CEA(Pkt, S#state{state = T});
+rcv('CEA', Pkt, #state{state = 'Wait-CEA'} = S) -> %% old code
handle_CEA(Pkt, S);
%% Incoming CER
@@ -544,16 +607,16 @@ rcv(N, Pkt, S)
N == 'DPR' ->
handle_request(N, Pkt, S);
-%% DPA even though we haven't sent DPR: ignore.
-rcv('DPA', _Pkt, #state{dpr = false}) ->
- ok;
-
-%% DPA in response to DPR. We could check the sequence numbers but
-%% don't bother, just close.
-rcv('DPA' = N, _Pkt, #state{transport = TPid}) ->
+%% DPA in response to DPR and with the expected identifiers.
+rcv('DPA' = N,
+ #diameter_packet{header = #diameter_header{end_to_end_id = Eid,
+ hop_by_hop_id = Hid}},
+ #state{transport = TPid,
+ dpr = {Hid, Eid}}) ->
diameter_peer:close(TPid),
{stop, N};
+%% Ignore anything else, an unsolicited DPA in particular.
rcv(_, _, _) ->
ok.
@@ -771,8 +834,8 @@ a('CER', #diameter_caps{vendor_id = Vid,
{'Product-Name', Name},
{'Origin-State-Id', OSI}];
-a('DPR', #diameter_caps{origin_host = Host,
- origin_realm = Realm}) ->
+a('DPR', #diameter_caps{origin_host = {Host, _},
+ origin_realm = {Realm, _}}) ->
['DPA', {'Origin-Host', Host},
{'Origin-Realm', Realm}].
@@ -880,7 +943,9 @@ rejected(N)
%% open/5
-open(Pkt, SupportedApps, Caps, {Type, IS}, #state{parent = Pid} = S) ->
+open(Pkt, SupportedApps, Caps, {Type, IS}, #state{parent = Pid,
+ service = Svc}
+ = S) ->
#diameter_caps{origin_host = {_,_} = H,
inband_security_id = {LS,_}}
= Caps,
@@ -888,7 +953,9 @@ open(Pkt, SupportedApps, Caps, {Type, IS}, #state{parent = Pid} = S) ->
tls_ack(lists:member(?TLS, LS), Caps, Type, IS, S),
Pid ! {open, self(), H, {Caps, SupportedApps, Pkt}},
- S#state{state = 'Open'}.
+ %% Replace capabilities record with local/remote pairs.
+ S#state{state = 'Open',
+ service = Svc#diameter_service{capabilities = Caps}}.
%% We've advertised TLS support: tell the transport the result
%% and expect a reply when the handshake is complete.
@@ -941,38 +1008,148 @@ dwa(#diameter_caps{origin_host = OH,
{'Origin-State-Id', OSI}].
%% dpr/2
+%%
+%% The RFC isn't clear on whether DPR should be send in a non-Open
+%% state. The Peer State Machine transitions it documents aren't
+%% exhaustive (no Stop in Wait-I-CEA for example) so assume it's up to
+%% the implementation and transition to Closed (ie. die) if we haven't
+%% yet reached Open.
+
+%% Connection is open, DPR has not been sent.
+dpr(Reason, #state{state = 'Open',
+ dpr = false,
+ service = #diameter_service{capabilities = Caps}}
+ = S) ->
+ case getr(?DPR_KEY) of
+ CBs when is_list(CBs) ->
+ Ref = getr(?REF_KEY),
+ Peer = {self(), Caps},
+ dpr(CBs, [Reason, Ref, Peer], S);
+ undefined -> %% started in old code
+ send_dpr(Reason, [], S)
+ end;
-dpr(Cause, #state{transport = TPid,
- service = #diameter_service{capabilities = Caps}}
- = S) ->
- #diameter_caps{origin_host = OH,
- origin_realm = OR}
+%% Connection is open, DPR already sent.
+dpr(_, #state{state = 'Open'}) ->
+ ok;
+
+%% Connection not open.
+dpr(_Reason, _S) ->
+ stop.
+
+%% dpr/3
+%%
+%% Note that an implementation that wants to do something
+%% transport_module-specific can lookup the pid of the transport
+%% process and contact it. (eg. diameter:service_info/2)
+
+dpr([CB|Rest], [Reason | _] = Args, S) ->
+ try diameter_lib:eval([CB | Args]) of
+ {dpr, Opts} when is_list(Opts) ->
+ send_dpr(Reason, Opts, S);
+ dpr ->
+ send_dpr(Reason, [], S);
+ close = T ->
+ {stop, {disconnect_cb, T}};
+ ignore ->
+ dpr(Rest, Args, S);
+ T ->
+ No = {disconnect_cb, T},
+ diameter_lib:error_report(invalid, No),
+ {stop, No}
+ catch
+ E:R ->
+ No = {disconnect_cb, E, R, ?STACK},
+ diameter_lib:error_report(failure, No),
+ {stop, No}
+ end;
+
+dpr([], [Reason | _], S) ->
+ send_dpr(Reason, [], S).
+
+-record(opts, {cause, timeout = ?DPA_TIMEOUT}).
+
+send_dpr(Reason, Opts, #state{transport = TPid,
+ service = #diameter_service{capabilities = Caps}}
+ = S) ->
+ #opts{cause = Cause, timeout = Tmo}
+ = lists:foldl(fun opt/2,
+ #opts{cause = case Reason of
+ transport -> ?GOAWAY;
+ _ -> ?REBOOT
+ end,
+ timeout = ?DPA_TIMEOUT},
+ Opts),
+ #diameter_caps{origin_host = {OH, _},
+ origin_realm = {OR, _}}
= Caps,
- Bin = encode(['DPR', {'Origin-Host', OH},
+ #diameter_packet{header = #diameter_header{end_to_end_id = Eid,
+ hop_by_hop_id = Hid}}
+ = Pkt
+ = encode(['DPR', {'Origin-Host', OH},
{'Origin-Realm', OR},
{'Disconnect-Cause', Cause}]),
- send(TPid, Bin),
- dpa_timer(),
+ send(TPid, Pkt),
+ dpa_timer(Tmo),
?LOG(send, 'DPR'),
- S#state{dpr = diameter_codec:sequence_numbers(Bin)}.
-
-dpa_timer() ->
- erlang:send_after(?DPA_TIMEOUT, self(), dpa_timeout).
+ S#state{dpr = {Hid, Eid}}.
+
+opt({timeout, Tmo}, Rec)
+ when ?IS_TIMEOUT(Tmo) ->
+ Rec#opts{timeout = Tmo};
+opt({cause, Cause}, Rec)
+ when ?IS_CAUSE(Cause) ->
+ Rec#opts{cause = cause(Cause)};
+opt(T, _) ->
+ ?ERROR({invalid_option, T}).
+
+cause(rebooting) -> ?REBOOT;
+cause(goaway) -> ?GOAWAY;
+cause(busy) -> ?BUSY;
+cause(N)
+ when ?IS_CAUSE(N) ->
+ N;
+cause(N) ->
+ ?ERROR({invalid_cause, N}).
+
+dpa_timer(Tmo) ->
+ erlang:send_after(Tmo, self(), dpa_timeout).
%% register_everywhere/1
%%
%% Register a term and ensure it's not registered elsewhere. Note that
%% two process that simultaneously register the same term may well
%% both fail to do so this isn't foolproof.
+%%
+%% Everywhere is no longer everywhere, it's where a
+%% restrict_connections service_opt() specifies.
register_everywhere(T) ->
- diameter_reg:add_new(T)
- andalso unregistered(T).
+ reg(getr(?RESTRICT_KEY), T).
+
+reg(Nodes, T) ->
+ add(lists:member(node(), Nodes), T) andalso unregistered(Nodes, T).
+
+add(true, T) ->
+ diameter_reg:add_new(T);
+add(false, T) ->
+ diameter_reg:add(T).
+
+%% unregistered
+%%
+%% Ensure that the term in question isn't registered on other nodes.
+
+unregistered(Nodes, T) ->
+ {ResL, _} = rpc:multicall(Nodes, ?MODULE, match, [{node(), T}]),
+ lists:all(fun nomatch/1, ResL).
+
+nomatch({badrpc, {'EXIT', {undef, _}}}) -> %% no diameter on remote node
+ true;
+nomatch(L) ->
+ [] == L.
-unregistered(T) ->
- {ResL, _} = rpc:multicall(?MODULE, match, [{node(), T}]),
- lists:all(fun(L) -> [] == L end, ResL).
+%% match/1
match({Node, _})
when Node == node() ->
diff --git a/lib/diameter/src/base/diameter_reg.erl b/lib/diameter/src/base/diameter_reg.erl
index 882b9da238..619b12ecad 100644
--- a/lib/diameter/src/base/diameter_reg.erl
+++ b/lib/diameter/src/base/diameter_reg.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -30,7 +30,8 @@
add_new/1,
del/1,
repl/2,
- match/1]).
+ match/1,
+ wait/1]).
-export([start_link/0]).
@@ -65,27 +66,22 @@
%% Table entry containing the Term -> Pid mapping.
-define(MAPPING(Term, Pid), {Term, Pid}).
--record(state, {id = now()}).
-
-%%% ----------------------------------------------------------
-%%% # add(T)
-%%%
-%%% Input: Term = term()
-%%%
-%%% Output: true
-%%%
-%%% Description: Associate the specified term with self(). The list of pids
-%%% having this or other assocations can be retrieved using
-%%% match/1.
-%%%
-%%% An association is removed when the calling process dies
-%%% or as a result of calling del/1. Adding the same term
-%%% more than once is equivalent to adding it exactly once.
-%%%
-%%% Note that since match/1 takes a pattern as argument,
-%%% specifying a term that contains match variables is
-%%% probably not a good idea
-%%% ----------------------------------------------------------
+-record(state, {id = now(),
+ q = []}). %% [{From, Pat}]
+
+%% ===========================================================================
+%% # add(T)
+%%
+%% Associate the specified term with self(). The list of pids having
+%% this or other assocations can be retrieved using match/1.
+%%
+%% An association is removed when the calling process dies or as a
+%% result of calling del/1. Adding the same term more than once is
+%% equivalent to adding it exactly once.
+%%
+%% Note that since match/1 takes a pattern as argument, specifying a
+%% term that contains match variables is probably not a good idea
+%% ===========================================================================
-spec add(any())
-> true.
@@ -93,17 +89,12 @@
add(T) ->
call({add, fun ets:insert/2, T, self()}).
-%%% ----------------------------------------------------------
-%%% # add_new(T)
-%%%
-%%% Input: T = term()
-%%%
-%%% Output: true | false
-%%%
-%%% Description: Like add/1 but only one process is allowed to have the
-%%% the association, false being returned if an association
-%%% already exists.
-%%% ----------------------------------------------------------
+%% ===========================================================================
+%% # add_new(T)
+%%
+%% Like add/1 but only one process is allowed to have the the
+%% association, false being returned if an association already exists.
+%% ===========================================================================
-spec add_new(any())
-> boolean().
@@ -111,16 +102,12 @@ add(T) ->
add_new(T) ->
call({add, fun insert_new/2, T, self()}).
-%%% ----------------------------------------------------------
-%%% # repl(T, NewT)
-%%%
-%%% Input: T, NewT = term()
-%%%
-%%% Output: true | false
-%%%
-%%% Description: Like add/1 but only replace an existing association on T,
-%%% false being returned if it doesn't exist.
-%%% ----------------------------------------------------------
+%% ===========================================================================
+%% # repl(T, NewT)
+%%
+%% Like add/1 but only replace an existing association on T, false
+%% being returned if it doesn't exist.
+%% ===========================================================================
-spec repl(any(), any())
-> boolean().
@@ -128,15 +115,11 @@ add_new(T) ->
repl(T, U) ->
call({repl, T, U, self()}).
-%%% ----------------------------------------------------------
-%%% # del(Term)
-%%%
-%%% Input: Term = term()
-%%%
-%%% Output: true
-%%%
-%%% Description: Remove any existing association of Term with self().
-%%% ----------------------------------------------------------
+%% ===========================================================================
+%% # del(Term)
+%%
+%% Remove any existing association of Term with self().
+%% ===========================================================================
-spec del(any())
-> true.
@@ -144,20 +127,16 @@ repl(T, U) ->
del(T) ->
call({del, T, self()}).
-%%% ----------------------------------------------------------
-%%% # match(Pat)
-%%%
-%%% Input: Pat = pattern in the sense of ets:match_object/2.
-%%%
-%%% Output: list of {Term, Pid}
-%%%
-%%% Description: Return the list of associations whose Term, as specified
-%%% to add/1 or add_new/1, matches the specified pattern.
-%%%
-%%% Note that there's no guarantee that the returned processes
-%%% are still alive. (Although one that isn't will soon have
-%%% its associations removed.)
-%%% ----------------------------------------------------------
+%% ===========================================================================
+%% # match(Pat)
+%%
+%% Return the list of associations whose Term, as specified to add/1
+%% or add_new/1, matches the specified pattern.
+%%
+%% Note that there's no guarantee that the returned processes are
+%% still alive. (Although one that isn't will soon have its
+%% associations removed.)
+%% ===========================================================================
-spec match(tuple())
-> [{term(), pid()}].
@@ -165,9 +144,17 @@ del(T) ->
match(Pat) ->
ets:match_object(?TABLE, ?MAPPING(Pat, '_')).
-%% ---------------------------------------------------------
-%% EXPORTED INTERNAL FUNCTIONS
-%% ---------------------------------------------------------
+%% ===========================================================================
+%% # wait(Pat)
+%%
+%% Like match/1 but return only when the result is non-empty or fails.
+%% It's up to the caller to ensure that the wait won't be forever.
+%% ===========================================================================
+
+wait(Pat) ->
+ call({wait, Pat}).
+
+%% ===========================================================================
start_link() ->
ServerName = {local, ?SERVER},
@@ -182,7 +169,7 @@ uptime() ->
%% pids/0
%%
-%% Output: list of {Pid, [Term, ...]}
+%% Return: list of {Pid, [Term, ...]}
pids() ->
to_list(fun swap/1).
@@ -202,89 +189,100 @@ id(T) -> T.
%% terms/0
%%
-%% Output: list of {Term, [Pid, ...]}
+%% Return: list of {Term, [Pid, ...]}
terms() ->
to_list(fun id/1).
swap({X,Y}) -> {Y,X}.
-%%% ----------------------------------------------------------
-%%% # init(Role)
-%%%
-%%% Output: {ok, State}
-%%% ----------------------------------------------------------
+%% ----------------------------------------------------------
+%% # init/1
+%% ----------------------------------------------------------
init(_) ->
ets:new(?TABLE, [bag, named_table]),
{ok, #state{}}.
-%%% ----------------------------------------------------------
-%%% # handle_call(Request, From, State)
-%%% ----------------------------------------------------------
+%% ----------------------------------------------------------
+%% # handle_call/3
+%% ----------------------------------------------------------
-handle_call({add, Fun, Key, Pid}, _, State) ->
+handle_call(Req, From, S)
+ when not is_record(S, state) ->
+ handle_call(Req, From, upgrade(S));
+
+handle_call({add, Fun, Key, Pid}, _, S) ->
B = Fun(?TABLE, {Key, Pid}),
monitor(B andalso no_monitor(Pid), Pid),
- {reply, B, State};
+ {reply, B, pending(B, S)};
-handle_call({del, Key, Pid}, _, State) ->
- {reply, ets:delete_object(?TABLE, ?MAPPING(Key, Pid)), State};
+handle_call({del, Key, Pid}, _, S) ->
+ {reply, ets:delete_object(?TABLE, ?MAPPING(Key, Pid)), S};
-handle_call({repl, T, U, Pid}, _, State) ->
+handle_call({repl, T, U, Pid}, _, S) ->
MatchSpec = [{?MAPPING('$1', Pid),
[{'=:=', '$1', {const, T}}],
['$_']}],
- {reply, repl(ets:select(?TABLE, MatchSpec), U, Pid), State};
+ {reply, repl(ets:select(?TABLE, MatchSpec), U, Pid), S};
+
+handle_call({wait, Pat}, From, #state{q = Q} = S) ->
+ case find(Pat) of
+ {ok, L} ->
+ {reply, L, S};
+ false ->
+ {noreply, S#state{q = [{From, Pat} | Q]}}
+ end;
-handle_call(state, _, State) ->
- {reply, State, State};
+handle_call(state, _, S) ->
+ {reply, S, S};
-handle_call(uptime, _, #state{id = Time} = State) ->
- {reply, diameter_lib:now_diff(Time), State};
+handle_call(uptime, _, #state{id = Time} = S) ->
+ {reply, diameter_lib:now_diff(Time), S};
-handle_call(Req, From, State) ->
+handle_call(Req, From, S) ->
?UNEXPECTED([Req, From]),
- {reply, nok, State}.
+ {reply, nok, S}.
-%%% ----------------------------------------------------------
-%%% # handle_cast(Request, State)
-%%% ----------------------------------------------------------
+%% ----------------------------------------------------------
+%% # handle_cast/2
+%% ----------------------------------------------------------
-handle_cast(Msg, State)->
+handle_cast(Msg, S)->
?UNEXPECTED([Msg]),
- {noreply, State}.
+ {noreply, S}.
-%%% ----------------------------------------------------------
-%%% # handle_info(Request, State)
-%%% ----------------------------------------------------------
+%% ----------------------------------------------------------
+%% # handle_info/2
+%% ----------------------------------------------------------
-handle_info({'DOWN', MRef, process, Pid, _}, State) ->
+handle_info({'DOWN', MRef, process, Pid, _}, S) ->
ets:delete_object(?TABLE, ?MONITOR(Pid, MRef)),
ets:match_delete(?TABLE, ?MAPPING('_', Pid)),
- {noreply, State};
+ {noreply, S};
-handle_info(Info, State) ->
+handle_info(Info, S) ->
?UNEXPECTED([Info]),
- {noreply, State}.
+ {noreply, S}.
-%%% ----------------------------------------------------------
-%%% # terminate(Reason, State)
-%%% ----------------------------------------------------------
+%% ----------------------------------------------------------
+%% # terminate/2
+%% ----------------------------------------------------------
terminate(_Reason, _State)->
ok.
-%%% ----------------------------------------------------------
-%%% # code_change(OldVsn, State, Extra)
-%%% ----------------------------------------------------------
+%% ----------------------------------------------------------
+%% # code_change/3
+%% ----------------------------------------------------------
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-%% ---------------------------------------------------------
-%% INTERNAL FUNCTIONS
-%% ---------------------------------------------------------
+%% ===========================================================================
+
+upgrade(S) ->
+ #state{} = list_to_tuple(tuple_to_list(S) ++ [[]]).
monitor(true, Pid) ->
ets:insert(?TABLE, ?MONITOR(Pid, erlang:monitor(process, Pid)));
@@ -321,6 +319,37 @@ repl([?MAPPING(_, Pid) = M], Key, Pid) ->
repl([], _, _) ->
false.
+%% pending/1
+
+pending(true, #state{q = [_|_] = Q} = S) ->
+ S#state{q = q(lists:reverse(Q), [])}; %% retain reply order
+pending(_, S) ->
+ S.
+
+q([], Q) ->
+ Q;
+q([{From, Pat} = T | Rest], Q) ->
+ case find(Pat) of
+ {ok, L} ->
+ gen_server:reply(From, L),
+ q(Rest, Q);
+ false ->
+ q(Rest, [T|Q])
+ end.
+
+%% find/1
+
+find(Pat) ->
+ try match(Pat) of
+ [] ->
+ false;
+ L ->
+ {ok, L}
+ catch
+ _:_ ->
+ {ok, []}
+ end.
+
%% call/1
call(Request) ->
diff --git a/lib/diameter/src/base/diameter_service.erl b/lib/diameter/src/base/diameter_service.erl
index 725cccda1e..a4a0b80348 100644
--- a/lib/diameter/src/base/diameter_service.erl
+++ b/lib/diameter/src/base/diameter_service.erl
@@ -64,7 +64,7 @@
-include_lib("diameter/include/diameter.hrl").
-include("diameter_internal.hrl").
-%% The "old" states maintained in this module historically.
+%% The states mirrored by peer_up/peer_down callbacks.
-define(STATE_UP, up).
-define(STATE_DOWN, down).
@@ -107,6 +107,12 @@
%% process.
-define(STATE_TABLE, ?MODULE).
+%% The default sequence mask.
+-define(NOMASK, {0,32}).
+
+%% The default restrict_connections.
+-define(RESTRICT, nodes).
+
%% Workaround for dialyzer's lack of understanding of match specs.
-type match(T)
:: T | '_' | '$1' | '$2' | '$3' | '$4'.
@@ -114,15 +120,18 @@
%% State of service gen_server.
-record(state,
{id = now(),
- service_name, %% as passed to start_service/2, key in ?STATE_TABLE
+ service_name, %% as passed to start_service/2, key in ?STATE_TABLE
service :: #diameter_service{},
- peerT = ets_new(peers) :: ets:tid(), %% #peer{} at start_fsm
- connT = ets_new(conns) :: ets:tid(), %% #conn{} at connection_up
- share_peers = false :: boolean(), %% broadcast peers to remote nodes?
- use_shared_peers = false :: boolean(), %% use broadcasted peers?
+ peerT = ets_new(peers) :: ets:tid(),%% #peer{} at start_fsm
+ connT = ets_new(conns) :: ets:tid(),%% #conn{} at connection_up/reopen
shared_peers = ?Dict:new(), %% Alias -> [{TPid, Caps}, ...]
local_peers = ?Dict:new(), %% Alias -> [{TPid, Caps}, ...]
- monitor = false :: false | pid()}). %% process to die with
+ monitor = false :: false | pid(), %% process to die with
+ options
+ :: [{sequence, diameter:sequence()} %% sequence mask
+ | {restrict_connections, diameter:restriction()}
+ | {share_peers, boolean()} %% broadcast peers to remote nodes?
+ | {use_shared_peers, boolean()}]}).%% use broadcasted peers?
%% shared_peers reflects the peers broadcast from remote nodes. Note
%% that the state term itself doesn't change, which is relevant for
%% the stateless application callbacks since the state is retrieved
@@ -130,7 +139,12 @@
%% service record is used to determine whether or not we need to call
%% the process for a pick_peer callback.
-%% Record representing a watchdog process.
+%% Record representing a watchdog process as implemented by
+%% diameter_watchdog. The term "peer" here is historical, made
+%% especially confusing by the fact that a peer_ref() in the
+%% documentation is the key of a #conn{} record, not a #peer{} record.
+%% The name is also unfortunate given the meaning of peer in the
+%% Diameter sense.
-record(peer,
{pid :: match(pid()),
type :: match(connect | accept),
@@ -140,9 +154,15 @@
:: match(op_state() | {op_state(), wd_state()}),
started = now(), %% at process start
conn = false :: match(boolean() | pid())}).
- %% true at accept, pid() at connection_up (connT key)
-
-%% Record representing a peer_fsm process.
+ %% true at accepted, pid() at connection_up or reopen
+
+%% Record representing a peer process as implemented by
+%% diameter_peer_fsm. The term "conn" is historical. Despite the name
+%% here, comments refer to watchdog and peer processes, that are keys
+%% in #peer{} and #conn{} records respectively. To add to the
+%% confusion, a #request.transport is a peer process = key in a
+%% #conn{} record. The actual transport process (that the peer process
+%% knows about and that has a transport connection) isn't seen here.
-record(conn,
{pid :: pid(),
apps :: [{0..16#FFFFFFFF, diameter:app_alias()}], %% {Id, Alias}
@@ -156,10 +176,9 @@
handler :: match(pid()), %% request process
transport :: match(pid()), %% peer process
caps :: match(#diameter_caps{}),
- app :: match(diameter:app_alias()), %% #diameter_app.alias
- dictionary :: match(module()), %% #diameter_app.dictionary
- module :: match([module() | list()]),
- %% #diameter_app.module
+ app :: match(diameter:app_alias()),%% #diameter_app.alias
+ dictionary :: match(module()), %% #diameter_app.dictionary
+ module :: match([module() | list()]), %% #diameter_app.module
filter :: match(diameter:peer_filter()),
packet :: match(#diameter_packet{})}).
@@ -170,20 +189,6 @@
timeout = ?DEFAULT_TIMEOUT :: 0..16#FFFFFFFF,
detach = false :: boolean()}).
-%% Since RFC 3588 requires that a Diameter agent not modify End-to-End
-%% Identifiers, the possibility of explicitly setting an End-to-End
-%% Identifier would be needed to be able to implement an agent in
-%% which one side of the communication is not implemented on top of
-%% diameter. For example, Diameter being sent or received encapsulated
-%% in some other protocol, or even another Diameter stack in a
-%% non-Erlang environment. (Not that this is likely to be a normal
-%% case.)
-%%
-%% The implemented solution is not an option but to respect any header
-%% values set in a diameter_header record returned from a
-%% prepare_request callback. A call to diameter:call/4 can communicate
-%% values to the callback using the 'extra' option if so desired.
-
%%% ---------------------------------------------------------------------------
%%% # start(SvcName)
%%% ---------------------------------------------------------------------------
@@ -236,20 +241,20 @@ stop_transport(SvcName, [_|_] = Refs) ->
%%% ---------------------------------------------------------------------------
info(SvcName, Item) ->
- info_rc(call_service_by_name(SvcName, {info, Item})).
-
-info_rc({error, _}) ->
- undefined;
-info_rc(Info) ->
- Info.
+ case find_state(SvcName) of
+ #state{} = S ->
+ service_info(Item, S);
+ false ->
+ undefined
+ end.
%%% ---------------------------------------------------------------------------
%%% # receive_message(TPid, Pkt, MessageData)
%%% ---------------------------------------------------------------------------
-%% Handle an incoming message in the watchdog process. This used to
-%% come through the service process but this avoids that becoming a
-%% bottleneck.
+%% Handle an incoming Diameter message in the watchdog process. This
+%% used to come through the service process but this avoids that
+%% becoming a bottleneck.
receive_message(TPid, Pkt, T)
when is_pid(TPid) ->
@@ -329,21 +334,39 @@ call_rc(_, _, Sent) ->
%% In the process spawned for the outgoing request.
call(SvcName, App, Msg, Opts, Caller) ->
- c(ets:lookup(?STATE_TABLE, SvcName), App, Msg, Opts, Caller).
+ c(find_state(SvcName), App, Msg, Opts, Caller).
-c([#state{service_name = SvcName} = S], App, Msg, Opts, Caller) ->
+c(#state{service_name = Svc, options = [{_, Mask} | _]} = S,
+ App,
+ Msg,
+ Opts,
+ Caller) ->
case find_transport(App, Msg, Opts, S) of
{_,_,_} = T ->
- send_request(T, Msg, Opts, Caller, SvcName);
+ send_request(T, Mask, Msg, Opts, Caller, Svc);
false ->
{error, no_connection};
{error, _} = No ->
No
end;
-c([], _, _, _, _) ->
+c(false, _, _, _, _) ->
{error, no_service}.
+%% find_state/1
+
+find_state(SvcName) ->
+ fs(ets:lookup(?STATE_TABLE, SvcName)).
+
+fs([#state{} = S]) ->
+ S;
+
+fs([S]) -> %% inserted from old code
+ upgrade(S);
+
+fs([]) ->
+ false.
+
%% make_options/1
make_options(Options) ->
@@ -439,6 +462,10 @@ i(_, false) ->
%%% # handle_call(Req, From, State)
%%% ---------------------------------------------------------------------------
+handle_call(T, From, S)
+ when not is_record(S, state) ->
+ handle_call(T, From, upgrade(S));
+
handle_call(state, _, S) ->
{reply, S, S};
@@ -462,16 +489,25 @@ handle_call({pick_peer, Local, Remote, App}, _From, S) ->
handle_call({call_module, AppMod, Req}, From, S) ->
call_module(AppMod, Req, From, S);
+%% Call from old code.
handle_call({info, Item}, _From, S) ->
{reply, service_info(Item, S), S};
handle_call(stop, _From, S) ->
- shutdown(S),
+ shutdown(service, S),
{stop, normal, ok, S};
%% The server currently isn't guaranteed to be dead when the caller
%% gets the reply. We deal with this in the call to the server,
%% stating a monitor that waits for DOWN before returning.
+%% Watchdog is asking for the sequence mask.
+handle_call(sequence, _From, #state{options = [{_, Mask} | _]} = S) ->
+ {reply, Mask, S};
+
+%% Watchdog is asking for the nodes restriction.
+handle_call(restriction, _From, #state{options = [_,_,_,{_,R} | _]} = S) ->
+ {reply, R, S};
+
handle_call(Req, From, S) ->
unexpected(handle_call, [Req, From], S),
{reply, nok, S}.
@@ -488,15 +524,16 @@ handle_cast(Req, S) ->
%%% # handle_info(Req, State)
%%% ---------------------------------------------------------------------------
-handle_info(T,S) ->
+handle_info(T, #state{} = S) ->
case transition(T,S) of
ok ->
{noreply, S};
- #state{} = NS ->
- {noreply, NS};
{stop, Reason} ->
{stop, {shutdown, Reason}, S}
- end.
+ end;
+
+handle_info(T, S) ->
+ handle_info(T, upgrade(S)).
%% transition/2
@@ -507,15 +544,26 @@ transition({accepted, Pid, TPid}, S) ->
%% Peer process has a new open connection.
transition({connection_up, Pid, T}, S) ->
- connection_up(Pid, T, S);
+ connection_up(Pid, T, S),
+ ok;
+
+%% Watchdog has a new connection that will be opened after DW[RA]
+%% exchange. This message was added long after connection_up, to
+%% communicate the information as soon as it's available. Leave
+%% connection_up as is it for now, duplicated information and all.
+transition({reopen, Pid, T}, S) ->
+ reopen(Pid, T, S),
+ ok;
-%% Peer process has left state open.
+%% Watchdog has left state OKAY.
transition({connection_down, Pid}, S) ->
- connection_down(Pid, S);
+ connection_down(Pid, S),
+ ok;
-%% Peer process has returned to state open.
+%% Watchdog has returned to state OKAY.
transition({connection_up, Pid}, S) ->
- connection_up(Pid, S);
+ connection_up(Pid, S),
+ ok;
%% Accepting transport has lost connectivity.
transition({close, Pid}, S) ->
@@ -529,7 +577,7 @@ transition({reconnect, Pid}, S) ->
%% Watchdog is sending notification of a state transition. Note that
%% the connection_up/down messages are pre-date this message and are
-%% still used. A 'watchdog' message will follow these and communicate
+%% still used. A watchdog message will follow these and communicate
%% the same state as was set in handling connection_up/down.
transition({watchdog, Pid, {TPid, From, To}}, #state{service_name = SvcName,
peerT = PeerT}) ->
@@ -539,21 +587,22 @@ transition({watchdog, Pid, {TPid, From, To}}, #state{service_name = SvcName,
insert(PeerT, P#peer{op_state = {OS, To}}),
send_event(SvcName, {watchdog, Ref, TPid, {From, To}, {T, Opts}}),
ok;
-%% Death of a peer process results in the removal of it's peer and any
-%% associated conn record when 'DOWN' is received (after this) but the
-%% states will be {?STATE_UP, ?WD_DOWN} for a short time. (No real
-%% problem since ?WD_* is only used in service_info.) We set ?WD_OKAY
-%% as a consequence of connection_up since we know a watchdog is
-%% coming. We can't set anything at connection_down since we don't
-%% know if the subsequent watchdog message will be ?WD_DOWN or
-%% ?WD_SUSPECT. We don't (yet) set ?STATE_* as a consequence of a
-%% watchdog message since this requires changing some of the matching
-%% on ?STATE_*.
-%%
-%% Death of a conn process results in connection_down followed by
-%% watchdog ?WD_DOWN. The latter doesn't result in the conn record
-%% being deleted since 'DOWN' from death of its peer doesn't (yet)
-%% deal with the record having been removed.
+%% Death of a watchdog process (#peer.pid) results in the removal of
+%% it's peer and any associated conn record when 'DOWN' is received
+%% (after this) but the states will be {?STATE_UP, ?WD_DOWN} for a
+%% short time. (No real problem since ?WD_* is only used in
+%% service_info.) We set ?WD_OKAY as a consequence of connection_up
+%% since we know a watchdog is coming. We can't set anything at
+%% connection_down since we don't know if the subsequent watchdog
+%% message will be ?WD_DOWN or ?WD_SUSPECT. We don't (yet) set
+%% ?STATE_* as a consequence of a watchdog message since this requires
+%% changing some of the matching on ?STATE_*.
+%%
+%% Death of a peer process process (#conn.pid, #peer.conn) results in
+%% connection_down followed by watchdog ?WD_DOWN. The latter doesn't
+%% result in the conn record being deleted since 'DOWN' from death of
+%% its watchdog doesn't (yet) deal with the record having been
+%% removed.
%% Monitor process has died. Just die with a reason that tells
%% diameter_config about the happening. If a cleaner shutdown is
@@ -561,23 +610,26 @@ transition({watchdog, Pid, {TPid, From, To}}, #state{service_name = SvcName,
transition({'DOWN', MRef, process, _, Reason}, #state{monitor = MRef}) ->
{stop, {monitor, Reason}};
-%% Local peer process has died.
+%% Local watchdog process has died.
transition({'DOWN', _, process, Pid, Reason}, S)
when node(Pid) == node() ->
- peer_down(Pid, Reason, S);
+ peer_down(Pid, Reason, S),
+ ok;
-%% Remote service wants to know about shared transports.
+%% Remote service wants to know about shared peers.
transition({service, Pid}, S) ->
share_peers(Pid, S),
ok;
%% Remote service is communicating a shared peer.
transition({peer, TPid, Aliases, Caps}, S) ->
- remote_peer_up(TPid, Aliases, Caps, S);
+ remote_peer_up(TPid, Aliases, Caps, S),
+ ok;
%% Remote peer process has died.
transition({'DOWN', _, process, TPid, _}, S) ->
- remote_peer_down(TPid, S);
+ remote_peer_down(TPid, S),
+ ok;
%% Restart after tc expiry.
transition({tc_timeout, T}, S) ->
@@ -591,18 +643,48 @@ transition({failover, TRef, Seqs}, S) ->
failover(TRef, Seqs, S),
ok;
+%% Ensure upgraded state is stored in state table.
+transition(upgrade, _) ->
+ ok;
+
transition(Req, S) ->
unexpected(handle_info, [Req], S),
ok.
+%% upgrade/1
+
+upgrade({state, Id, Svc, Name, Svc, PT, CT, SB, UB, SD, LD, MPid}) ->
+ S = #state{id = Id,
+ service_name = Name,
+ service = Svc,
+ peerT = PT,
+ connT = CT,
+ shared_peers = SD,
+ local_peers = LD,
+ monitor = MPid,
+ options = [{sequence, ?NOMASK},
+ {share_peers, SB},
+ {use_shared_peers, UB},
+ {restrict_connections, ?RESTRICT}]},
+ upgrade_insert(S),
+ S.
+
+upgrade_insert(#state{service = #diameter_service{pid = Pid}} = S) ->
+ if Pid == self() ->
+ ets:insert(?STATE_TABLE, S);
+ true ->
+ Pid ! upgrade
+ end.
+
%%% ---------------------------------------------------------------------------
%%% # terminate(Reason, State)
%%% ---------------------------------------------------------------------------
terminate(Reason, #state{service_name = Name} = S) ->
+ send_event(Name, stop),
ets:delete(?STATE_TABLE, Name),
shutdown == Reason %% application shutdown
- andalso shutdown(S).
+ andalso shutdown(application, S).
%%% ---------------------------------------------------------------------------
%%% # code_change(FromVsn, State, Extra)
@@ -685,41 +767,49 @@ mod_state(Alias, ModS) ->
%%% # shutdown/2
%%% ---------------------------------------------------------------------------
-shutdown(Refs, #state{peerT = PeerT}) ->
- ets:foldl(fun(P,ok) -> s(P, Refs), ok end, ok, PeerT).
-
-s(#peer{ref = Ref, pid = Pid}, Refs) ->
- s(lists:member(Ref, Refs), Pid);
-
-s(true, Pid) ->
- Pid ! {shutdown, self()}; %% 'DOWN' will cleanup as usual
-s(false, _) ->
- ok.
-
-%%% ---------------------------------------------------------------------------
-%%% # shutdown/1
-%%% ---------------------------------------------------------------------------
+%% remove_transport: ask watchdogs to terminate their transport.
+shutdown(Refs, #state{peerT = PeerT})
+ when is_list(Refs) ->
+ ets:foldl(fun(P,ok) -> sp(P, Refs), ok end, ok, PeerT);
-shutdown(#state{peerT = PeerT}) ->
+%% application/service shutdown: ask transports to terminate themselves.
+shutdown(Reason, #state{peerT = PeerT}) ->
%% A transport might not be alive to receive the shutdown request
%% but give those that are a chance to shutdown gracefully.
- wait(fun st/2, PeerT),
+ shutdown(conn, Reason, PeerT),
%% Kill the watchdogs explicitly in case there was no transport.
- wait(fun sw/2, PeerT).
+ shutdown(peer, Reason, PeerT).
-wait(Fun, T) ->
- diameter_lib:wait(ets:foldl(Fun, [], T)).
+%% sp/2
-st(#peer{conn = B}, Acc)
- when is_boolean(B) ->
- Acc;
-st(#peer{conn = Pid}, Acc) ->
- Pid ! shutdown,
- [Pid | Acc].
+sp(#peer{ref = Ref, pid = Pid}, Refs) ->
+ lists:member(Ref, Refs)
+ andalso (Pid ! {shutdown, self()}). %% 'DOWN' cleans up
+
+%% shutdown/3
+
+shutdown(Who, Reason, T) ->
+ diameter_lib:wait(ets:foldl(fun(X,A) -> shutdown(Who, X, Reason, A) end,
+ [],
+ T)).
+
+shutdown(conn = Who, #peer{op_state = {OS,_}} = P, Reason, Acc) ->
+ shutdown(Who, P#peer{op_state = OS}, Reason, Acc);
+
+shutdown(conn,
+ #peer{pid = Pid, op_state = ?STATE_UP, conn = TPid},
+ Reason,
+ Acc) ->
+ TPid ! {shutdown, Pid, Reason},
+ [TPid | Acc];
-sw(#peer{pid = Pid}, Acc) ->
+shutdown(peer, #peer{pid = Pid}, _Reason, Acc)
+ when is_pid(Pid) ->
exit(Pid, shutdown),
- [Pid | Acc].
+ [Pid | Acc];
+
+shutdown(_, #peer{}, _, Acc) ->
+ Acc.
%%% ---------------------------------------------------------------------------
%%% # call_service/2
@@ -772,6 +862,7 @@ i(SvcName) ->
lists:foreach(fun(T) -> start_fsm(T,S) end, CL),
init_shared(S),
+ send_event(SvcName, start),
S.
cfg_acc({SvcName, #diameter_service{applications = Apps} = Rec, Opts},
@@ -779,9 +870,8 @@ cfg_acc({SvcName, #diameter_service{applications = Apps} = Rec, Opts},
lists:foreach(fun init_mod/1, Apps),
S = #state{service_name = SvcName,
service = Rec#diameter_service{pid = self()},
- share_peers = get_value(share_peers, Opts),
- use_shared_peers = get_value(use_shared_peers, Opts),
- monitor = mref(get_value(monitor, Opts))},
+ monitor = mref(get_value(monitor, Opts)),
+ options = service_options(Opts)},
{S, Acc};
cfg_acc({_Ref, Type, _Opts} = T, {S, Acc})
@@ -789,15 +879,24 @@ cfg_acc({_Ref, Type, _Opts} = T, {S, Acc})
Type == listen ->
{S, [T | Acc]}.
+service_options(Opts) ->
+ [{sequence, proplists:get_value(sequence, Opts, ?NOMASK)},
+ {share_peers, get_value(share_peers, Opts)},
+ {use_shared_peers, get_value(use_shared_peers, Opts)},
+ {restrict_connections, proplists:get_value(restrict_connections,
+ Opts,
+ ?RESTRICT)}].
+%% The order of options is significant since we match against the list.
+
mref(false = No) ->
No;
mref(P) ->
erlang:monitor(process, P).
-init_shared(#state{use_shared_peers = true,
+init_shared(#state{options = [_, _, {_, true} | _],
service_name = Svc}) ->
diameter_peer:notify(Svc, {service, self()});
-init_shared(#state{use_shared_peers = false}) ->
+init_shared(#state{options = [_, _, {_, false} | _]}) ->
ok.
init_mod(#diameter_app{alias = Alias,
@@ -860,9 +959,8 @@ start(Ref, Type, Opts, #state{peerT = PeerT,
Pid.
%% Note that the service record passed into the watchdog is the merged
-%% record so that each watchdog (and peer_fsm) may get a different
-%% record. This record is what is passed back into application
-%% callbacks.
+%% record so that each watchdog may get a different record. This
+%% record is what is passed back into application callbacks.
s(Type, Ref, T) ->
case diameter_watchdog:start({Type, Ref}, T) of
@@ -913,8 +1011,8 @@ accepted(Pid, _TPid, #state{peerT = PeerT} = S) ->
#peer{ref = Ref, type = accept = T, conn = false, options = Opts}
= P
= fetch(PeerT, Pid),
- insert(PeerT, P#peer{conn = true}), %% mark replacement transport started
- start(Ref, T, Opts, S). %% start new peer
+ insert(PeerT, P#peer{conn = true}), %% mark replacement as started
+ start(Ref, T, Opts, S). %% start new watchdog
fetch(Tid, Key) ->
[T] = ets:lookup(Tid, Key),
@@ -929,11 +1027,9 @@ fetch(Tid, Key) ->
%%% ---------------------------------------------------------------------------
%%% # connection_up/3
-%%%
-%%% Output: #state{}
%%% ---------------------------------------------------------------------------
-%% Peer process has reached the open state.
+%% Watchdog process has reached state OKAY.
connection_up(Pid, {TPid, {Caps, SApps, Pkt}}, #state{peerT = PeerT,
connT = ConnT}
@@ -948,9 +1044,29 @@ connection_up(Pid, {TPid, {Caps, SApps, Pkt}}, #state{peerT = PeerT,
connection_up([Pkt], P#peer{conn = TPid}, C, S).
%%% ---------------------------------------------------------------------------
+%%% # reopen/3
+%%% ---------------------------------------------------------------------------
+
+%% Note that this connection_up/3 rewrites the same #conn{} now
+%% written here. Both do so in case reopen has not happened in old
+%% code.
+
+reopen(Pid, {TPid, {Caps, SApps, _Pkt}}, #state{peerT = PeerT,
+ connT = ConnT}) ->
+ P = fetch(PeerT, Pid),
+ C = #conn{pid = TPid,
+ apps = SApps,
+ caps = Caps,
+ peer = Pid},
+
+ insert(ConnT, C),
+ #peer{op_state = {?STATE_DOWN, _}}
+ = P,
+ insert(PeerT, P#peer{op_state = {?STATE_DOWN, ?WD_REOPEN},
+ conn = TPid}).
+
+%%% ---------------------------------------------------------------------------
%%% # connection_up/2
-%%%
-%%% Output: #state{}
%%% ---------------------------------------------------------------------------
%% Peer process has transitioned back into the open state. Note that there
@@ -979,10 +1095,8 @@ connection_up(T, P, C, #state{peerT = PeerT,
insert(PeerT, P#peer{op_state = {?STATE_UP, ?WD_OKAY}}),
request_peer_up(TPid),
- report_status(up, P, C, S, T),
- S#state{local_peers = insert_local_peer(SApps,
- {{TPid, Caps}, {SvcName, Apps}},
- LDict)}.
+ insert_local_peer(SApps, {{TPid, Caps}, {SvcName, Apps}}, LDict),
+ report_status(up, P, C, S, T).
insert_local_peer(SApps, T, LDict) ->
lists:foldl(fun(A,D) -> ilp(A, T, D) end, LDict, SApps).
@@ -1024,11 +1138,9 @@ peer_cb(MFA, Alias) ->
%%% ---------------------------------------------------------------------------
%%% # connection_down/2
-%%%
-%%% Output: #state{}
%%% ---------------------------------------------------------------------------
-%% Peer process has transitioned out of the open state.
+%% Watchdog has transitioned out of state OKAY.
connection_down(Pid, #state{peerT = PeerT,
connT = ConnT}
@@ -1044,8 +1156,8 @@ connection_down(Pid, #state{peerT = PeerT,
%% connection_down/3
-connection_down(#peer{op_state = {?STATE_DOWN, _}}, _, S) ->
- S;
+connection_down(#peer{op_state = {?STATE_DOWN, _}}, _, _) ->
+ ok;
connection_down(#peer{conn = TPid,
op_state = {?STATE_UP, _}}
@@ -1058,12 +1170,8 @@ connection_down(#peer{conn = TPid,
local_peers = LDict}
= S) ->
report_status(down, P, C, S, []),
- NewS = S#state{local_peers
- = remove_local_peer(SApps,
- {{TPid, Caps}, {SvcName, Apps}},
- LDict)},
- request_peer_down(TPid, NewS),
- NewS.
+ remove_local_peer(SApps, {{TPid, Caps}, {SvcName, Apps}}, LDict),
+ request_peer_down(TPid, S).
remove_local_peer(SApps, T, LDict) ->
lists:foldl(fun(A,D) -> rlp(A, T, D) end, LDict, SApps).
@@ -1082,11 +1190,9 @@ down_conn(Id, Alias, TC, {SvcName, Apps}) ->
%%% ---------------------------------------------------------------------------
%%% # peer_down/3
-%%%
-%%% Output: #state{}
%%% ---------------------------------------------------------------------------
-%% Peer process has died.
+%% Watchdog process has died.
peer_down(Pid, Reason, #state{peerT = PeerT} = S) ->
P = fetch(PeerT, Pid),
@@ -1106,12 +1212,12 @@ closed({shutdown, {close, _TPid, Reason}},
closed(_, _, _) ->
ok.
-%% The peer has never come up ...
-peer_down(#peer{conn = B}, S)
+%% The watchdog has never reached OKAY ...
+peer_down(#peer{conn = B}, _)
when is_boolean(B) ->
- S;
+ ok;
-%% ... or it has.
+%% ... or maybe it has.
peer_down(#peer{conn = TPid} = P, #state{connT = ConnT} = S) ->
#conn{} = C = fetch(ConnT, TPid),
ets:delete_object(ConnT, C),
@@ -1139,7 +1245,7 @@ restart(#peer{ref = Ref,
started = Time}) ->
{Time, {Ref, T, Opts}};
-%% ... or it has: a replacement transport has already been spawned.
+%% ... or it has: a replacement has already been spawned.
restart(#peer{type = accept}) ->
false.
@@ -1165,8 +1271,8 @@ default_tc(connect, Opts) ->
default_tc(accept, _) ->
0.
-%% Bound tc below if the peer was restarted recently to avoid
-%% continuous in case of faulty config or other problems.
+%% Bound tc below if the watchdog was restarted recently to avoid
+%% continuous restarted in case of faulty config or other problems.
tc(Time, Tc) ->
choose(Tc > ?RESTART_TC
orelse timer:now_diff(now(), Time) > 1000*?RESTART_TC,
@@ -1288,7 +1394,7 @@ cm([_,_|_], _, _, _) ->
multiple.
%%% ---------------------------------------------------------------------------
-%%% # send_request/5
+%%% # send_request/6
%%% ---------------------------------------------------------------------------
%% Send an outgoing request in its dedicated process.
@@ -1301,71 +1407,89 @@ cm([_,_|_], _, _, _) ->
%% The mod field of the #diameter_app{} here includes any extra
%% arguments passed to diameter:call/2.
-send_request({TPid, Caps, App}, Msg, Opts, Caller, SvcName) ->
+send_request({TPid, Caps, App} = T, Mask, Msg, Opts, Caller, SvcName) ->
#diameter_app{module = ModX}
= App,
- Pkt = make_request_packet(Msg),
-
- case cb(ModX, prepare_request, [Pkt, SvcName, {TPid, Caps}]) of
- {send, P} ->
- send_request(make_request_packet(P, Pkt),
- TPid,
- Caps,
- App,
- Opts,
- Caller,
- SvcName);
- {discard, Reason} ->
- {error, Reason};
- discard ->
- {error, discarded};
- T ->
- ?ERROR({invalid_return, prepare_request, App, T})
- end.
+ Pkt = make_prepare_packet(Mask, Msg),
+
+ send_req(cb(ModX, prepare_request, [Pkt, SvcName, {TPid, Caps}]),
+ Pkt,
+ T,
+ Opts,
+ Caller,
+ SvcName,
+ []).
+
+send_req({send, P}, Pkt, T, Opts, Caller, SvcName, Fs) ->
+ send_req(make_request_packet(P, Pkt), T, Opts, Caller, SvcName, Fs);
+
+send_req({discard, Reason} , _, _, _, _, _, _) ->
+ {error, Reason};
-%% make_request_packet/1
+send_req(discard, _, _, _, _, _, _) ->
+ {error, discarded};
+
+send_req({eval_packet, RC, F}, Pkt, T, Opts, Caller, SvcName, Fs) ->
+ send_req(RC, Pkt, T, Opts, Caller, SvcName, [F|Fs]);
+
+send_req(E, _, {_, _, App}, _, _, _, _) ->
+ ?ERROR({invalid_return, prepare_request, App, E}).
+
+%% make_prepare_packet/2
%%
%% Turn an outgoing request as passed to call/4 into a diameter_packet
%% record in preparation for a prepare_request callback.
-make_request_packet(Bin)
+make_prepare_packet(_, Bin)
when is_binary(Bin) ->
#diameter_packet{header = diameter_codec:decode_header(Bin),
bin = Bin};
-make_request_packet(#diameter_packet{msg = [#diameter_header{} = Hdr | Avps]}
- = Pkt) ->
- Pkt#diameter_packet{msg = [make_request_header(Hdr) | Avps]};
+make_prepare_packet(Mask, #diameter_packet{msg = [#diameter_header{} = Hdr
+ | Avps]}
+ = Pkt) ->
+ Pkt#diameter_packet{msg = [make_prepare_header(Mask, Hdr) | Avps]};
-make_request_packet(#diameter_packet{header = Hdr} = Pkt) ->
- Pkt#diameter_packet{header = make_request_header(Hdr)};
+make_prepare_packet(Mask, #diameter_packet{header = Hdr} = Pkt) ->
+ Pkt#diameter_packet{header = make_prepare_header(Mask, Hdr)};
-make_request_packet(Msg) ->
- make_request_packet(#diameter_packet{msg = Msg}).
+make_prepare_packet(Mask, Msg) ->
+ make_prepare_packet(Mask, #diameter_packet{msg = Msg}).
-%% make_request_header/1
+%% make_prepare_header/1
-make_request_header(undefined) ->
- Seq = diameter_session:sequence(),
- make_request_header(#diameter_header{end_to_end_id = Seq,
+make_prepare_header(Mask, undefined) ->
+ Seq = diameter_session:sequence(Mask),
+ make_prepare_header(#diameter_header{end_to_end_id = Seq,
hop_by_hop_id = Seq});
-make_request_header(#diameter_header{version = undefined} = Hdr) ->
- make_request_header(Hdr#diameter_header{version = ?DIAMETER_VERSION});
+make_prepare_header(Mask, #diameter_header{end_to_end_id = undefined,
+ hop_by_hop_id = undefined}) ->
+ Seq = diameter_session:sequence(Mask),
+ make_prepare_header(#diameter_header{end_to_end_id = Seq,
+ hop_by_hop_id = Seq});
-make_request_header(#diameter_header{end_to_end_id = undefined} = H) ->
- Seq = diameter_session:sequence(),
- make_request_header(H#diameter_header{end_to_end_id = Seq});
+make_prepare_header(Mask, #diameter_header{end_to_end_id = undefined} = H) ->
+ Seq = diameter_session:sequence(Mask),
+ make_prepare_header(H#diameter_header{end_to_end_id = Seq});
-make_request_header(#diameter_header{hop_by_hop_id = undefined} = H) ->
- Seq = diameter_session:sequence(),
- make_request_header(H#diameter_header{hop_by_hop_id = Seq});
+make_prepare_header(Mask, #diameter_header{hop_by_hop_id = undefined} = H) ->
+ Seq = diameter_session:sequence(Mask),
+ make_prepare_header(H#diameter_header{hop_by_hop_id = Seq});
-make_request_header(#diameter_header{} = Hdr) ->
+make_prepare_header(_, Hdr) ->
+ make_prepare_header(Hdr).
+
+%% make_prepare_header/1
+
+make_prepare_header(#diameter_header{version = undefined} = Hdr) ->
+ make_prepare_header(Hdr#diameter_header{version = ?DIAMETER_VERSION});
+
+make_prepare_header(#diameter_header{} = Hdr) ->
Hdr;
-make_request_header(T) ->
+make_prepare_header(T) ->
?ERROR({invalid_header, T}).
%% make_request_packet/2
@@ -1375,7 +1499,7 @@ make_request_header(T) ->
make_request_packet(Bin, _)
when is_binary(Bin) ->
- make_request_packet(Bin);
+ make_prepare_packet(false, Bin);
make_request_packet(#diameter_packet{msg = [#diameter_header{} | _]}
= Pkt,
@@ -1387,7 +1511,7 @@ make_request_packet(#diameter_packet{msg = [#diameter_header{} | _]}
%% This is primarily so that the end to end and hop by hop identifiers
%% are retained.
make_request_packet(#diameter_packet{header = Hdr} = Pkt,
- #diameter_packet{header = Hdr0}) ->
+ #diameter_packet{header = Hdr0}) ->
Pkt#diameter_packet{header = fold_record(Hdr0, Hdr)};
make_request_packet(Msg, Pkt) ->
@@ -1400,16 +1524,16 @@ fold_record(undefined, R) ->
fold_record(Rec, R) ->
diameter_lib:fold_tuple(2, Rec, R).
-%% send_request/7
+%% send_req/6
-send_request(Pkt, TPid, Caps, App, Opts, Caller, SvcName) ->
+send_req(Pkt, {TPid, Caps, App}, Opts, Caller, SvcName, Fs) ->
#diameter_app{alias = Alias,
dictionary = Dict,
module = ModX,
options = [{answer_errors, AE} | _]}
= App,
- EPkt = encode(Dict, Pkt),
+ EPkt = encode(Dict, Pkt, Fs),
#options{filter = Filter,
timeout = Timeout}
@@ -1490,6 +1614,13 @@ msg(#diameter_packet{msg = undefined, bin = Bin}) ->
msg(#diameter_packet{msg = Msg}) ->
Msg.
+%% encode/3
+
+encode(Dict, Pkt, Fs) ->
+ P = encode(Dict, Pkt),
+ eval_packet(P, Fs),
+ P.
+
%% encode/2
%% Note that prepare_request can return a diameter_packet containing
@@ -1571,38 +1702,47 @@ send(Pid, Pkt) ->
%% retransmit/4
-retransmit({TPid, Caps, #diameter_app{alias = Alias} = App},
- #request{app = Alias,
- packet = Pkt}
+retransmit({TPid, Caps, #diameter_app{alias = Alias} = App} = T,
+ #request{app = Alias, packet = Pkt}
= Req,
SvcName,
Timeout) ->
have_request(Pkt, TPid) %% Don't failover to a peer we've
andalso ?THROW(timeout), %% already sent to.
- case cb(App, prepare_retransmit, [Pkt, SvcName, {TPid, Caps}]) of
- {send, P} ->
- retransmit(make_request_packet(P, Pkt), TPid, Caps, Req, Timeout);
- {discard, Reason} ->
- ?THROW(Reason);
- discard ->
- ?THROW(discarded);
- T ->
- ?ERROR({invalid_return, prepare_retransmit, App, T})
- end.
+ resend_req(cb(App, prepare_retransmit, [Pkt, SvcName, {TPid, Caps}]),
+ T,
+ Req,
+ Timeout,
+ []).
-%% retransmit/5
+resend_req({send, P}, T, #request{packet = Pkt} = Req, Timeout, Fs) ->
+ retransmit(make_request_packet(P, Pkt), T, Req, Timeout, Fs);
-retransmit(Pkt, TPid, Caps, #request{dictionary = Dict} = Req, Timeout) ->
- EPkt = encode(Dict, Pkt),
+resend_req({discard, Reason}, _, _, _, _) ->
+ ?THROW(Reason);
- NewReq = Req#request{transport = TPid,
- packet = Pkt,
- caps = Caps},
+resend_req(discard, _, _, _, _) ->
+ ?THROW(discarded);
- ?LOG(retransmission, NewReq),
- TRef = send_request(TPid, EPkt, NewReq, Timeout),
- {TRef, NewReq}.
+resend_req({eval_packet, RC, F}, T, Req, Timeout, Fs) ->
+ resend_req(RC, T, Req, Timeout, [F|Fs]);
+
+resend_req(T, {_, _, App}, _, _, _) ->
+ ?ERROR({invalid_return, prepare_retransmit, App, T}).
+
+%% retransmit/6
+
+retransmit(Pkt, {TPid, Caps, _}, #request{dictionary = D} = Req0, Tmo, Fs) ->
+ EPkt = encode(D, Pkt, Fs),
+
+ Req = Req0#request{transport = TPid,
+ packet = Pkt,
+ caps = Caps},
+
+ ?LOG(retransmission, Req),
+ TRef = send_request(TPid, EPkt, Req, Tmo),
+ {TRef, Req}.
%% store_request/4
@@ -1674,10 +1814,13 @@ request_peer_down(TPid, S) ->
%%% recv_request/3
%%% ---------------------------------------------------------------------------
-recv_request(TPid, Pkt, {ConnT, SvcName, Apps}) ->
+recv_request(TPid, Pkt, {ConnT, SvcName, Apps}) -> %% upgrade
+ recv_request(TPid, Pkt, {ConnT, SvcName, Apps, ?NOMASK});
+
+recv_request(TPid, Pkt, {ConnT, SvcName, Apps, Mask}) ->
try ets:lookup(ConnT, TPid) of
[C] ->
- recv_request(C, TPid, Pkt, SvcName, Apps);
+ recv_request(C, TPid, Pkt, SvcName, Apps, Mask);
[] -> %% transport has gone down
ok
catch
@@ -1687,7 +1830,12 @@ recv_request(TPid, Pkt, {ConnT, SvcName, Apps}) ->
%% recv_request/5
-recv_request(#conn{apps = SApps, caps = Caps}, TPid, Pkt, SvcName, Apps) ->
+recv_request(#conn{apps = SApps, caps = Caps},
+ TPid,
+ Pkt,
+ SvcName,
+ Apps,
+ Mask) ->
#diameter_caps{origin_host = {OH,_},
origin_realm = {OR,_}}
= Caps,
@@ -1699,6 +1847,7 @@ recv_request(#conn{apps = SApps, caps = Caps}, TPid, Pkt, SvcName, Apps) ->
{SvcName, OH, OR},
TPid,
Apps,
+ Mask,
Caps,
Pkt).
@@ -1724,20 +1873,24 @@ keyfind([Key | Rest], Pos, L) ->
T
end.
-%% recv_request/6
+%% recv_request/7
-recv_request({Id, Alias}, T, TPid, Apps, Caps, Pkt) ->
+recv_request({Id, Alias}, T, TPid, Apps, Mask, Caps, Pkt) ->
#diameter_app{dictionary = Dict}
= A
= find_app(Alias, Apps),
- recv_request(T, {TPid, Caps}, A, diameter_codec:decode(Id, Dict, Pkt));
+ recv_request(T,
+ {TPid, Caps},
+ A,
+ Mask,
+ diameter_codec:decode(Id, Dict, Pkt));
%% Note that the decode is different depending on whether or not Id is
%% ?APP_ID_RELAY.
%% DIAMETER_APPLICATION_UNSUPPORTED 3007
%% A request was sent for an application that is not supported.
-recv_request(false, T, TPid, _, _, Pkt) ->
+recv_request(false, T, TPid, _, _, _, Pkt) ->
As = collect_avps(Pkt),
protocol_error(3007, T, TPid, Pkt#diameter_packet{avps = As}).
@@ -1749,7 +1902,7 @@ collect_avps(Pkt) ->
As
end.
-%% recv_request/4
+%% recv_request/5
%% Wrong number of bits somewhere in the message: reply.
%%
@@ -1758,7 +1911,7 @@ collect_avps(Pkt) ->
%% set to an unrecognized value, or that is inconsistent with the
%% AVP's definition.
%%
-recv_request(T, {TPid, _}, _, #diameter_packet{errors = [Bs | _]} = Pkt)
+recv_request(T, {TPid, _}, _, _, #diameter_packet{errors = [Bs | _]} = Pkt)
when is_bitstring(Bs) ->
protocol_error(3009, T, TPid, Pkt);
@@ -1773,6 +1926,7 @@ recv_request(T, {TPid, _}, _, #diameter_packet{errors = [Bs | _]} = Pkt)
recv_request(T,
{TPid, _},
#diameter_app{id = Id},
+ _,
#diameter_packet{header = #diameter_header{is_proxiable = P},
msg = M}
= Pkt)
@@ -1790,6 +1944,7 @@ recv_request(T,
recv_request(T,
{TPid, _},
_,
+ _,
#diameter_packet{header = #diameter_header{is_error = true}}
= Pkt) ->
protocol_error(3008, T, TPid, Pkt);
@@ -1798,14 +1953,20 @@ recv_request(T,
%% in the relay application. Don't distinguish between the two since
%% each application has its own callback config. That is, the user can
%% easily distinguish between the two cases.
-recv_request(T, TC, App, Pkt) ->
- request_cb(T, TC, App, examine(Pkt)).
+recv_request(T, TC, App, Mask, Pkt) ->
+ request_cb(T, TC, App, Mask, examine(Pkt)).
%% Note that there may still be errors but these aren't protocol
%% (3xxx) errors that lead to an answer-message.
-request_cb({SvcName, _OH, _OR} = T, TC, App, Pkt) ->
- request_cb(cb(App, handle_request, [Pkt, SvcName, TC]), App, T, TC, Pkt).
+request_cb({SvcName, _OH, _OR} = T, TC, App, Mask, Pkt) ->
+ request_cb(cb(App, handle_request, [Pkt, SvcName, TC]),
+ App,
+ Mask,
+ T,
+ TC,
+ [],
+ Pkt).
%% examine/1
%%
@@ -1825,7 +1986,7 @@ examine(#diameter_packet{errors = Es} = Pkt) ->
Pkt#diameter_packet{errors = [5011 | Es]}.
%% It's odd/unfortunate that this isn't a protocol error.
-%% request_cb/5
+%% request_cb/7
%% A reply may be an answer-message, constructed either here or by
%% the handle_request callback. The header from the incoming request
@@ -1835,21 +1996,23 @@ examine(#diameter_packet{errors = Es} = Pkt) ->
request_cb({reply, Ans},
#diameter_app{dictionary = Dict},
_,
+ _,
{TPid, _},
+ Fs,
Pkt) ->
- reply(Ans, Dict, TPid, Pkt);
+ reply(Ans, Dict, TPid, Fs, Pkt);
%% An 3xxx result code, for which the E-bit is set in the header.
-request_cb({protocol_error, RC}, _, T, {TPid, _}, Pkt)
+request_cb({protocol_error, RC}, _, _, T, {TPid, _}, Fs, Pkt)
when 3000 =< RC, RC < 4000 ->
- protocol_error(RC, T, TPid, Pkt);
+ protocol_error(RC, T, TPid, Fs, Pkt);
%% RFC 3588 says we must reply 3001 to anything unrecognized or
%% unsupported. 'noreply' is undocumented (and inappropriately named)
%% backwards compatibility for this, protocol_error the documented
%% alternative.
-request_cb(noreply, _, T, {TPid, _}, Pkt) ->
- protocol_error(3001, T, TPid, Pkt);
+request_cb(noreply, _, _, T, {TPid, _}, Fs, Pkt) ->
+ protocol_error(3001, T, TPid, Fs, Pkt);
%% Relay a request to another peer. This is equivalent to doing an
%% explicit call/4 with the message in question except that (1) a loop
@@ -1869,38 +2032,51 @@ request_cb(noreply, _, T, {TPid, _}, Pkt) ->
request_cb({A, Opts},
#diameter_app{id = Id}
= App,
+ Mask,
T,
TC,
+ Fs,
Pkt)
when A == relay, Id == ?APP_ID_RELAY;
A == proxy, Id /= ?APP_ID_RELAY;
A == resend ->
- resend(Opts, App, T, TC, Pkt);
+ resend(Opts, App, Mask, T, TC, Fs, Pkt);
-request_cb(discard, _, _, _, _) ->
+request_cb(discard, _, _, _, _, _, _) ->
ok;
-request_cb({eval, RC, F}, App, T, TC, Pkt) ->
- request_cb(RC, App, T, TC, Pkt),
+request_cb({eval_packet, RC, F}, App, Mask, T, TC, Fs, Pkt) ->
+ request_cb(RC, App, Mask, T, TC, [F|Fs], Pkt);
+
+request_cb({eval, RC, F}, App, Mask, T, TC, Fs, Pkt) ->
+ request_cb(RC, App, Mask, T, TC, Pkt, Fs),
diameter_lib:eval(F).
-%% protocol_error/4
+%% protocol_error/5
-protocol_error(RC, {_, OH, OR}, TPid, #diameter_packet{avps = Avps} = Pkt) ->
+protocol_error(RC, {_, OH, OR}, TPid, Fs, Pkt) ->
+ #diameter_packet{avps = Avps} = Pkt,
?LOG({error, RC}, Pkt),
- reply(answer_message({OH, OR, RC}, Avps), ?BASE, TPid, Pkt).
+ reply(answer_message({OH, OR, RC}, Avps), ?BASE, TPid, Fs, Pkt).
-%% resend/5
+%% protocol_error/4
+
+protocol_error(RC, T, TPid, Pkt) ->
+ protocol_error(RC, T, TPid, [], Pkt).
+
+%% resend/7
%%
%% Resend a message as a relay or proxy agent.
resend(Opts,
#diameter_app{} = App,
+ Mask,
{_SvcName, OH, _OR} = T,
{_TPid, _Caps} = TC,
+ Fs,
#diameter_packet{avps = Avps} = Pkt) ->
{Code, _Flags, Vid} = ?BASE:avp_header('Route-Record'),
- resend(is_loop(Code, Vid, OH, Avps), Opts, App, T, TC, Pkt).
+ resend(is_loop(Code, Vid, OH, Avps), Opts, App, Mask, T, TC, Fs, Pkt).
%% DIAMETER_LOOP_DETECTED 3005
%% An agent detected a loop while trying to get the message to the
@@ -1908,8 +2084,8 @@ resend(Opts,
%% if one is available, but the peer reporting the error has
%% identified a configuration problem.
-resend(true, _, _, T, {TPid, _}, Pkt) -> %% Route-Record loop
- protocol_error(3005, T, TPid, Pkt);
+resend(true, _, _, _, T, {TPid, _}, Fs, Pkt) -> %% Route-Record loop
+ protocol_error(3005, T, TPid, Fs, Pkt);
%% 6.1.8. Relaying and Proxying Requests
%%
@@ -1920,16 +2096,18 @@ resend(true, _, _, T, {TPid, _}, Pkt) -> %% Route-Record loop
resend(false,
Opts,
App,
+ Mask,
{SvcName, _, _} = T,
{TPid, #diameter_caps{origin_host = {_, OH}}},
+ Fs,
#diameter_packet{header = Hdr0,
avps = Avps}
= Pkt) ->
Route = #diameter_avp{data = {?BASE, 'Route-Record', OH}},
- Seq = diameter_session:sequence(),
+ Seq = diameter_session:sequence(Mask),
Hdr = Hdr0#diameter_header{hop_by_hop_id = Seq},
Msg = [Hdr, Route | Avps],
- resend(call(SvcName, App, Msg, Opts), T, TPid, Pkt).
+ resend(call(SvcName, App, Msg, Opts), T, TPid, Fs, Pkt).
%% The incoming request is relayed with the addition of a
%% Route-Record. Note the requirement on the return from call/4 below,
%% which places a requirement on the value returned by the
@@ -1955,15 +2133,18 @@ resend(#diameter_packet{bin = B}
= Pkt,
_,
TPid,
+ Fs,
#diameter_packet{header = #diameter_header{hop_by_hop_id = Id},
transport_data = TD}) ->
- send(TPid, Pkt#diameter_packet{bin = diameter_codec:hop_by_hop_id(Id, B),
- transport_data = TD});
+ P = Pkt#diameter_packet{bin = diameter_codec:hop_by_hop_id(Id, B),
+ transport_data = TD},
+ eval_packet(P, Fs),
+ send(TPid, P);
%% TODO: counters
%% Or not: DIAMETER_UNABLE_TO_DELIVER.
-resend(_, T, TPid, Pkt) ->
- protocol_error(3002, T, TPid, Pkt).
+resend(_, T, TPid, Fs, Pkt) ->
+ protocol_error(3002, T, TPid, Fs, Pkt).
%% is_loop/4
%%
@@ -1985,52 +2166,68 @@ is_loop(Code, Vid, OH, [_ | Avps])
is_loop(Code, Vid, OH, Avps) ->
is_loop(Code, Vid, ?BASE:avp(encode, OH, 'Route-Record'), Avps).
-%% reply/4
+%% reply/5
%%
%% Send a locally originating reply.
%% Skip the setting of Result-Code and Failed-AVP's below.
-reply([Msg], Dict, TPid, Pkt)
+reply([Msg], Dict, TPid, Fs, Pkt)
when is_list(Msg);
is_tuple(Msg) ->
- reply(Msg, Dict, TPid, Pkt#diameter_packet{errors = []});
+ reply(Msg, Dict, TPid, Fs, Pkt#diameter_packet{errors = []});
%% No errors or a diameter_header/avp list.
-reply(Msg, Dict, TPid, #diameter_packet{errors = Es,
- transport_data = TD}
- = ReqPkt)
+reply(Msg, Dict, TPid, Fs, #diameter_packet{errors = Es} = ReqPkt)
when [] == Es;
is_record(hd(Msg), diameter_header) ->
Pkt = diameter_codec:encode(Dict, make_answer_packet(Msg, ReqPkt)),
+ eval_packet(Pkt, Fs),
incr(send, Pkt, Dict, TPid), %% count result codes in sent answers
- send(TPid, Pkt#diameter_packet{transport_data = TD});
+ send(TPid, Pkt);
%% Or not: set Result-Code and Failed-AVP AVP's.
-reply(Msg, Dict, TPid, #diameter_packet{errors = [H|_] = Es} = Pkt) ->
+reply(Msg, Dict, TPid, Fs, #diameter_packet{errors = [H|_] = Es} = Pkt) ->
reply(rc(Msg, rc(H), [A || {_,A} <- Es], Dict),
Dict,
TPid,
+ Fs,
Pkt#diameter_packet{errors = []}).
+eval_packet(Pkt, Fs) ->
+ lists:foreach(fun(F) -> diameter_lib:eval([F,Pkt]) end, Fs).
+
%% make_answer_packet/2
+%% A reply message clears the R and T flags and retains the P flag.
+%% The E flag will be set at encode. 6.2 of 3588 requires the same P
+%% flag on an answer as on the request. A #diameter_packet{} returned
+%% from a handle_request callback can circumvent this by setting its
+%% own header values.
+make_answer_packet(#diameter_packet{header = Hdr,
+ msg = Msg,
+ transport_data = TD},
+ #diameter_packet{header = ReqHdr}) ->
+ Hdr0 = ReqHdr#diameter_header{version = ?DIAMETER_VERSION,
+ is_request = false,
+ is_error = undefined,
+ is_retransmitted = false},
+ #diameter_packet{header = fold_record(Hdr0, Hdr),
+ msg = Msg,
+ transport_data = TD};
+
%% Binaries and header/avp lists are sent as-is.
-make_answer_packet(Bin, _)
+make_answer_packet(Bin, #diameter_packet{transport_data = TD})
when is_binary(Bin) ->
- #diameter_packet{bin = Bin};
-make_answer_packet([#diameter_header{} | _] = Msg, _) ->
- #diameter_packet{msg = Msg};
-
-%% Otherwise a reply message clears the R and T flags and retains the
-%% P flag. The E flag will be set at encode. 6.2 of 3588 requires the
-%% same P flag on an answer as on the request.
-make_answer_packet(Msg, #diameter_packet{header = ReqHdr}) ->
- Hdr = ReqHdr#diameter_header{version = ?DIAMETER_VERSION,
- is_request = false,
- is_error = undefined,
- is_retransmitted = false},
- #diameter_packet{header = Hdr,
- msg = Msg}.
+ #diameter_packet{bin = Bin,
+ transport_data = TD};
+make_answer_packet([#diameter_header{} | _] = Msg,
+ #diameter_packet{transport_data = TD}) ->
+ #diameter_packet{msg = Msg,
+ transport_data = TD};
+
+%% Otherwise, preserve transport_data.
+make_answer_packet(Msg, #diameter_packet{transport_data = TD} = Pkt) ->
+ make_answer_packet(#diameter_packet{msg = Msg, transport_data = TD}, Pkt).
%% rc/1
@@ -2071,7 +2268,7 @@ rc([MsgName | _], {'Result-Code' = K, RC} = T, Dict) ->
rc(Rec, T, Dict) ->
rc([Dict:rec2msg(element(1, Rec))], T, Dict).
-
+
%% failed_avp/3
failed_avp(_, [] = No, _) ->
@@ -2307,7 +2504,7 @@ a(#diameter_packet{errors = Es} = Pkt, SvcName, AE, #request{transport = TPid,
when [] == Es;
callback == AE ->
cb(Req, handle_answer, [Pkt, msg(P), SvcName, {TPid, Caps}]);
-
+
a(Pkt, SvcName, report, Req) ->
x(errors, handle_answer, [SvcName, Req, Pkt]);
@@ -2469,7 +2666,7 @@ send_event(#diameter_event{service = SvcName} = E) ->
%%% # share_peer/5
%%% ---------------------------------------------------------------------------
-share_peer(up, Caps, Aliases, TPid, #state{share_peers = true,
+share_peer(up, Caps, Aliases, TPid, #state{options = [_, {_, true} | _],
service_name = Svc}) ->
diameter_peer:notify(Svc, {peer, TPid, Aliases, Caps});
@@ -2480,11 +2677,11 @@ share_peer(_, _, _, _, _) ->
%%% # share_peers/2
%%% ---------------------------------------------------------------------------
-share_peers(Pid, #state{share_peers = true,
+share_peers(Pid, #state{options = [_, {_, true} | _],
local_peers = PDict}) ->
?Dict:fold(fun(A,Ps,ok) -> sp(Pid, A, Ps), ok end, ok, PDict);
-share_peers(_, #state{share_peers = false}) ->
+share_peers(_, _) ->
ok.
sp(Pid, Alias, Peers) ->
@@ -2494,39 +2691,31 @@ sp(Pid, Alias, Peers) ->
%%% # remote_peer_up/4
%%% ---------------------------------------------------------------------------
-remote_peer_up(Pid, Aliases, Caps, #state{use_shared_peers = true,
+remote_peer_up(Pid, Aliases, Caps, #state{options = [_, _, {_, true} | _],
service = Svc,
- shared_peers = PDict}
- = S) ->
+ shared_peers = PDict}) ->
#diameter_service{applications = Apps} = Svc,
- Update = lists:filter(fun(A) ->
- lists:keymember(A, #diameter_app.alias, Apps)
- end,
- Aliases),
- S#state{shared_peers = rpu(Pid, Caps, PDict, Update)};
+ Key = #diameter_app.alias,
+ As = lists:filter(fun(A) -> lists:keymember(A, Key, Apps) end, Aliases),
+ rpu(Pid, Caps, PDict, As);
-remote_peer_up(_, _, _, #state{use_shared_peers = false} = S) ->
- S.
+remote_peer_up(_, _, _, #state{options = [_, _, {_, false} | _]}) ->
+ ok.
rpu(_, _, PDict, []) ->
PDict;
rpu(Pid, Caps, PDict, Aliases) ->
erlang:monitor(process, Pid),
T = {Pid, Caps},
- lists:foldl(fun(A,D) -> ?Dict:append(A, T, D) end,
- PDict,
- Aliases).
+ lists:foreach(fun(A) -> ?Dict:append(A, T, PDict) end, Aliases).
%%% ---------------------------------------------------------------------------
%%% # remote_peer_down/2
%%% ---------------------------------------------------------------------------
-remote_peer_down(Pid, #state{use_shared_peers = true,
- shared_peers = PDict}
- = S) ->
- S#state{shared_peers = lists:foldl(fun(A,D) -> rpd(Pid, A, D) end,
- PDict,
- ?Dict:fetch_keys(PDict))}.
+remote_peer_down(Pid, #state{options = [_, _, {_, true} | _],
+ shared_peers = PDict}) ->
+ lists:foreach(fun(A) -> rpd(Pid, A, PDict) end, ?Dict:fetch_keys(PDict)).
rpd(Pid, Alias, PDict) ->
?Dict:update(Alias, fun(Ps) -> lists:keydelete(Pid, 1, Ps) end, PDict).
@@ -2851,7 +3040,8 @@ transports(#state{peerT = PeerT}) ->
-define(ALL_INFO, [capabilities,
applications,
transport,
- pending]).
+ pending,
+ options]).
%% The rest.
-define(OTHER_INFO, [connections,
@@ -2878,6 +3068,19 @@ tagged_info(Item, S)
undefined
end;
+tagged_info(TPid, #state{peerT = PT, connT = CT})
+ when is_pid(TPid) ->
+ try
+ [#conn{peer = Pid}] = ets:lookup(CT, TPid),
+ [#peer{ref = Ref, type = Type, options = Opts}] = ets:lookup(PT, Pid),
+ [{ref, Ref},
+ {type, Type},
+ {options, Opts}]
+ catch
+ error:_ ->
+ []
+ end;
+
tagged_info(Items, S)
when is_list(Items) ->
[T || I <- Items, T <- [tagged_info(I,S)], T /= undefined, T /= []];
@@ -2928,6 +3131,7 @@ complete_info(Item, #state{service = Svc} = S) ->
capabilities -> service_info(?CAP_INFO, S);
applications -> info_apps(S);
transport -> info_transport(S);
+ options -> info_options(S);
pending -> info_pending(S);
keys -> ?ALL_INFO ++ ?CAP_INFO ++ ?OTHER_INFO;
all -> service_info(?ALL_INFO, S);
@@ -2955,7 +3159,12 @@ info_stats(#state{peerT = PeerT}) ->
MatchSpec = [{#peer{ref = '$1', conn = '$2', _ = '_'},
[{'is_pid', '$2'}],
[['$1', '$2']]}],
- diameter_stats:read(lists:append(ets:select(PeerT, MatchSpec))).
+ try ets:select(PeerT, MatchSpec) of
+ L ->
+ diameter_stats:read(lists:append(L))
+ catch
+ error: badarg -> [] %% service has gone down
+ end.
%% info_transport/1
%%
@@ -3000,7 +3209,12 @@ transport([[{type, accept}, {options, Opts} | _] | _] = Ls) ->
{accept, [lists:nthtail(2,L) || L <- Ls]}].
peer_dict(#state{peerT = PeerT, connT = ConnT}, Dict0) ->
- ets:foldl(fun(T,A) -> peer_acc(ConnT, A, T) end, Dict0, PeerT).
+ try ets:tab2list(PeerT) of
+ L ->
+ lists:foldl(fun(T,A) -> peer_acc(ConnT, A, T) end, Dict0, L)
+ catch
+ error: badarg -> Dict0 %% service has gone down
+ end.
peer_acc(ConnT, Acc, #peer{pid = Pid,
type = Type,
@@ -3019,7 +3233,11 @@ peer_acc(ConnT, Acc, #peer{pid = Pid,
info_conn(ConnT, TPid, true)
when is_pid(TPid) ->
- info_conn(ets:lookup(ConnT, TPid));
+ try ets:lookup(ConnT, TPid) of
+ T -> info_conn(T)
+ catch
+ error: badarg -> [] %% service has gone down
+ end;
info_conn(_, _, _) ->
[].
@@ -3096,7 +3314,11 @@ info_pending(#state{} = S) ->
{{transport, '$2'}},
{{from, '$3'}}]}}]}],
- ets:select(?REQUEST_TABLE, MatchSpec).
+ try
+ ets:select(?REQUEST_TABLE, MatchSpec)
+ catch
+ error: badarg -> [] %% service has gone down
+ end.
%% info_connections/1
%%
@@ -3151,3 +3373,8 @@ peer_acc(Peer, {PeerD, RefD}) ->
[{TPid, _}, [{origin_host, {_, OH}} | _]]
= [proplists:get_value(K, Peer) || K <- [peer, caps]],
{dict:append(OH, Peer, PeerD), dict:append(OH, TPid, RefD)}.
+
+%% info_options/1
+
+info_options(S) ->
+ S#state.options.
diff --git a/lib/diameter/src/base/diameter_session.erl b/lib/diameter/src/base/diameter_session.erl
index 4c468f207c..3b236f109a 100644
--- a/lib/diameter/src/base/diameter_session.erl
+++ b/lib/diameter/src/base/diameter_session.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -20,6 +20,7 @@
-module(diameter_session).
-export([sequence/0,
+ sequence/1,
session_id/1,
origin_state_id/0]).
@@ -30,7 +31,7 @@
-define(INT32, 16#FFFFFFFF).
%% ---------------------------------------------------------------------------
-%% # sequence/0
+%% # sequence/0-1
%%
%% Output: 32-bit
%% ---------------------------------------------------------------------------
@@ -77,6 +78,15 @@ sequence() ->
Instr = {_Pos = 2, _Incr = 1, _Threshold = ?INT32, _SetVal = 0},
ets:update_counter(diameter_sequence, sequence, Instr).
+-spec sequence(diameter:sequence())
+ -> diameter:'Unsigned32'().
+
+sequence({_,32}) ->
+ sequence();
+
+sequence({H,N}) ->
+ (H bsl N) bor (sequence() band (1 bsl N - 1)).
+
%% ---------------------------------------------------------------------------
%% # origin_state_id/0
%% ---------------------------------------------------------------------------
diff --git a/lib/diameter/src/base/diameter_watchdog.erl b/lib/diameter/src/base/diameter_watchdog.erl
index d7474e5c56..243ad0a986 100644
--- a/lib/diameter/src/base/diameter_watchdog.erl
+++ b/lib/diameter/src/base/diameter_watchdog.erl
@@ -43,20 +43,24 @@
-include("diameter_internal.hrl").
-define(DEFAULT_TW_INIT, 30000). %% RFC 3539 ch 3.4.1
+-define(NOMASK, {0,32}). %% default sequence mask
-record(watchdog,
{%% PCB - Peer Control Block; see RFC 3539, Appendix A
status = initial :: initial | okay | suspect | down | reopen,
- pending = false :: boolean(),
+ pending = false :: boolean(), %% DWA
tw :: 6000..16#FFFFFFFF | {module(), atom(), list()},
%% {M,F,A} -> integer() >= 0
num_dwa = 0 :: -1 | non_neg_integer(),
%% number of DWAs received during reopen
%% end PCB
- parent = self() :: pid(),
- transport :: pid() | undefined,
+ parent = self() :: pid(), %% service process
+ transport :: pid() | undefined, %% peer_fsm process
tref :: reference(), %% reference for current watchdog timer
- message_data}). %% term passed into diameter_service with message
+ message_data, %% term passed into diameter_service with message
+ sequence :: diameter:sequence(), %% mask
+ restrict :: {diameter:restriction(), boolean()},
+ shutdown = false :: boolean()}).
%% start/2
%%
@@ -118,12 +122,23 @@ make_state({T, Pid, {RecvData,
random:seed(now()),
putr(restart, {T, Opts, Svc}), %% save seeing it in trace
putr(dwr, dwr(Caps)), %%
+ {_,_} = Mask = call(Pid, sequence),
+ Restrict = call(Pid, restriction),
+ Nodes = restrict_nodes(Restrict),
#watchdog{parent = Pid,
- transport = monitor(diameter_peer_fsm:start(T, Opts, Svc)),
+ transport = monitor(diameter_peer_fsm:start(T,
+ Opts,
+ {Mask, Nodes, Svc})),
tw = proplists:get_value(watchdog_timer,
Opts,
?DEFAULT_TW_INIT),
- message_data = {RecvData, SvcName, Apps}}.
+ message_data = {RecvData, SvcName, Apps, Mask},
+ sequence = Mask,
+ restrict = {Restrict, lists:member(node(), Nodes)}}.
+
+%% Retrieve the sequence mask from the parent from the parent, rather
+%% than having it passed into init/1, for upgrade reasons: the call to
+%% diameter_service:receive_message/3 passes back the mask.
%% handle_call/3
@@ -137,7 +152,7 @@ handle_cast(_, State) ->
%% handle_info/2
-handle_info(T, State) ->
+handle_info(T, #watchdog{} = State) ->
case transition(T, State) of
ok ->
{noreply, State};
@@ -148,7 +163,14 @@ handle_info(T, State) ->
?LOG(stop, T),
event(State, State#watchdog{status = down}),
{stop, {shutdown, T}, State}
- end.
+ end;
+
+handle_info(T, S) ->
+ handle_info(T, upgrade(S)).
+
+upgrade(S) ->
+ #watchdog{} = list_to_tuple(tuple_to_list(S)
+ ++ [?NOMASK, {nodes, true}, false]).
event(#watchdog{status = T}, #watchdog{status = T}) ->
ok;
@@ -205,9 +227,10 @@ transition({shutdown, Pid}, #watchdog{parent = Pid,
down = S, %% sanity check
stop;
transition({shutdown = T, Pid}, #watchdog{parent = Pid,
- transport = TPid}) ->
+ transport = TPid}
+ = S) ->
TPid ! {T, self()},
- ok;
+ S#watchdog{shutdown = true};
%% Parent process has died,
transition({'DOWN', _, process, Pid, _Reason},
@@ -241,9 +264,10 @@ transition({close, TPid, _Reason}, #watchdog{transport = TPid}) ->
transition({open, TPid, Hosts, T} = Open,
#watchdog{transport = TPid,
status = initial,
- parent = Pid}
+ parent = Pid,
+ restrict = {_, R}}
= S) ->
- case okay(getr(restart), Hosts) of
+ case okay(getr(restart), Hosts, R) of
okay ->
open(Pid, {TPid, T}),
set_watchdog(S#watchdog{status = okay});
@@ -258,12 +282,15 @@ transition({open, TPid, Hosts, T} = Open,
transition({open = P, TPid, _Hosts, T},
#watchdog{transport = TPid,
+ parent = Pid,
status = down}
= S) ->
%% Store the info we need to notify the parent to reopen the
%% connection after the requisite DWA's are received, at which
- %% time we eraser(open).
+ %% time we eraser(open). The reopen message is a later addition,
+ %% to communicate the new capabilities as soon as they're known.
putr(P, {TPid, T}),
+ Pid ! {reopen, self(), {TPid, T}},
set_watchdog(send_watchdog(S#watchdog{status = reopen,
num_dwa = 0}));
@@ -277,7 +304,10 @@ transition({open = P, TPid, _Hosts, T},
transition({'DOWN', _, process, TPid, _},
#watchdog{transport = TPid,
- status = initial}) ->
+ status = S,
+ shutdown = D})
+ when S == initial;
+ D ->
stop;
transition({'DOWN', _, process, TPid, _},
@@ -312,6 +342,15 @@ transition({state, Pid}, #watchdog{status = S}) ->
%% ===========================================================================
+%% Only call "upwards", to the parent service.
+call(Pid, Req) ->
+ try
+ gen_server:call(Pid, Req, infinity)
+ catch
+ exit: Reason ->
+ exit({shutdown, {Req, Reason}})
+ end.
+
monitor(Pid) ->
erlang:monitor(process, Pid),
Pid.
@@ -325,26 +364,36 @@ getr(Key) ->
eraser(Key) ->
erase({?MODULE, Key}).
-%% encode/1
+%% encode/2
-encode(Msg) ->
- #diameter_packet{bin = Bin} = diameter_codec:encode(?BASE, Msg),
+encode(Msg, Mask) ->
+ Seq = diameter_session:sequence(Mask),
+ Hdr = #diameter_header{version = ?DIAMETER_VERSION,
+ end_to_end_id = Seq,
+ hop_by_hop_id = Seq},
+ Pkt = #diameter_packet{header = Hdr,
+ msg = Msg},
+ #diameter_packet{bin = Bin} = diameter_codec:encode(?BASE, Pkt),
Bin.
-%% okay/2
+%% okay/3
-okay({{accept, Ref}, _, _}, Hosts) ->
+okay({{accept, Ref}, _, _}, Hosts, Restrict) ->
T = {?MODULE, connection, Ref, Hosts},
diameter_reg:add(T),
- okay(diameter_reg:match(T));
+ if Restrict ->
+ okay(diameter_reg:match(T));
+ true ->
+ okay
+ end;
%% Register before matching so that at least one of two registering
-%% processes will match the other. (Which can't happen as long as
-%% diameter_peer_fsm guarantees at most one open connection to the same
-%% peer.)
+%% processes will match the other.
-okay({{connect, _}, _, _}, _) ->
+okay({{connect, _}, _, _}, _, _) ->
okay.
+%% okay/2
+
%% The peer hasn't been connected recently ...
okay([{_,P}]) ->
P = self(), %% assert
@@ -400,9 +449,10 @@ close(#watchdog{parent = Pid}) ->
%% send_watchdog/1
send_watchdog(#watchdog{pending = false,
- transport = TPid}
+ transport = TPid,
+ sequence = Mask}
= S) ->
- TPid ! {send, encode(getr(dwr))},
+ TPid ! {send, encode(getr(dwr), Mask)},
?LOG(send, 'DWR'),
S#watchdog{pending = true}.
@@ -437,6 +487,14 @@ throwaway(S) ->
throw({?MODULE, throwaway, S}).
%% rcv/2
+%%
+%% The lack of Hop-by-Hop and End-to-End Identifiers checks in a
+%% received DWA is intentional. The purpose of the message is to
+%% demonstrate life but a peer that consistently bungles it by sending
+%% the wrong identifiers causes the connection to toggle between OPEN
+%% and SUSPECT, with failover and failback as result, despite there
+%% being no real problem with connectivity. Thus, relax and accept any
+%% incoming DWA as being in response to an outgoing DWR.
%% INITIAL Receive DWA Pending = FALSE
%% Throwaway() INITIAL
@@ -555,7 +613,7 @@ timeout(#watchdog{status = T,
= S)
when T == suspect;
T == reopen, P, N < 0 ->
- exit(TPid, shutdown),
+ exit(TPid, {shutdown, watchdog_timeout}),
close(S),
S#watchdog{status = down};
@@ -600,19 +658,40 @@ restart(#watchdog{transport = undefined} = S) ->
restart(S) ->
S.
+%% restart/2
+%%
%% Only restart the transport in the connecting case. For an accepting
-%% transport, we've registered the peer connection when leaving state
-%% initial and this is used by a new accepting process to realize that
-%% it's actually in state down rather then initial when receiving
-%% notification of an open connection.
-
-restart({{connect, _} = T, Opts, Svc}, #watchdog{parent = Pid} = S) ->
+%% transport, there's no guarantee that an accepted connection in a
+%% restarted transport if from the peer we've lost contact with so
+%% have to be prepared for another watchdog to handle it. This is what
+%% the diameter_reg registration in this module is for: the peer
+%% connection is registered when leaving state initial and this is
+%% used by a new accepting watchdog to realize that it's actually in
+%% state down rather then initial when receiving notification of an
+%% open connection.
+
+restart({{connect, _} = T, Opts, Svc}, #watchdog{parent = Pid,
+ sequence = Mask,
+ restrict = {R,_}}
+ = S) ->
Pid ! {reconnect, self()},
- S#watchdog{transport = monitor(diameter_peer_fsm:start(T, Opts, Svc))};
+ Nodes = restrict_nodes(R),
+ S#watchdog{transport = monitor(diameter_peer_fsm:start(T,
+ Opts,
+ {Mask, Nodes, Svc})),
+ restrict = {R, lists:member(node(), Nodes)}};
+
+%% No restriction on the number of connections to the same peer: just
+%% die. Note that a state machine never enters state REOPEN in this
+%% case.
+restart({{accept, _}, _, _}, #watchdog{restrict = {_, false}}) ->
+ stop;
+
+%% Otherwise hang around until told to die.
restart({{accept, _}, _, _}, S) ->
S.
-%% Don't currently use Opts/Svc in the accept case but having them in
-%% the process dictionary is helpful if the process dies unexpectedly.
+
+%% Don't currently use Opts/Svc in the accept case.
%% dwr/1
@@ -622,3 +701,22 @@ dwr(#diameter_caps{origin_host = OH,
['DWR', {'Origin-Host', OH},
{'Origin-Realm', OR},
{'Origin-State-Id', OSI}].
+
+%% restrict_nodes/1
+
+restrict_nodes(false) ->
+ [];
+
+restrict_nodes(nodes) ->
+ [node() | nodes()];
+
+restrict_nodes(node) ->
+ [node()];
+
+restrict_nodes(Nodes)
+ when [] == Nodes;
+ is_atom(hd(Nodes)) ->
+ Nodes;
+
+restrict_nodes(F) ->
+ diameter_lib:eval(F).
diff --git a/lib/diameter/test/Makefile b/lib/diameter/test/Makefile
index e3a57553c1..866d135bd9 100644
--- a/lib/diameter/test/Makefile
+++ b/lib/diameter/test/Makefile
@@ -16,13 +16,8 @@
#
# %CopyrightEnd%
-ifeq ($(ERL_TOP),)
-include $(DIAMETER_TOP)/make/target.mk
-include $(DIAMETER_TOP)/make/$(TARGET)/rules.mk
-else
include $(ERL_TOP)/make/target.mk
include $(ERL_TOP)/make/$(TARGET)/otp.mk
-endif
# ----------------------------------------------------
# Application version
@@ -148,11 +143,7 @@ log:
/%: % force
sed -f release.sed $< > "$(RELSYSDIR)$@"
-ifeq ($(ERL_TOP),)
-include $(DIAMETER_TOP)/make/release_targets.mk
-else
include $(ERL_TOP)/make/otp_release_targets.mk
-endif
release_spec release_docs_spec:
diff --git a/lib/diameter/test/diameter_compiler_SUITE.erl b/lib/diameter/test/diameter_compiler_SUITE.erl
index 4b792b5426..79bf9d32db 100644
--- a/lib/diameter/test/diameter_compiler_SUITE.erl
+++ b/lib/diameter/test/diameter_compiler_SUITE.erl
@@ -31,8 +31,8 @@
%% testcases
-export([format/1, format/2,
replace/1, replace/2,
- generate/1, generate/4, generate/0,
- examples/1, examples/0]).
+ generate/1, generate/4,
+ examples/1]).
-export([dict/0]). %% fake dictionary module
@@ -339,7 +339,7 @@
%% ===========================================================================
suite() ->
- [{timetrap, {minutes, 2}}].
+ [{timetrap, {minutes, 10}}].
all() ->
[format,
@@ -407,9 +407,6 @@ re({RE, Repl}, Bin) ->
%%
%% Ensure success when generating code and compiling.
-generate() ->
- [{timetrap, {seconds, 2*length(?REPLACE)}}].
-
generate(Config) ->
Bin = proplists:get_value(base, Config),
Rs = lists:zip(?REPLACE, lists:seq(1, length(?REPLACE))),
@@ -436,9 +433,6 @@ generate(Mods, Bin, N, Mode) ->
%%
%% Compile dictionaries extracted from various standards.
-examples() ->
- [{timetrap, {seconds, 3*length(?EXAMPLES)}}].
-
examples(_Config) ->
Dir = filename:join([code:lib_dir(diameter, examples), "dict"]),
[D || D <- ?EXAMPLES, _ <- [examples(?S(D), Dir)]].
diff --git a/lib/diameter/test/diameter_dpr_SUITE.erl b/lib/diameter/test/diameter_dpr_SUITE.erl
new file mode 100644
index 0000000000..9252650bf7
--- /dev/null
+++ b/lib/diameter/test/diameter_dpr_SUITE.erl
@@ -0,0 +1,196 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2012. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+%% Tests of the disconnect_cb configuration.
+%%
+
+-module(diameter_dpr_SUITE).
+
+-export([suite/0,
+ all/0,
+ groups/0,
+ init_per_group/2,
+ end_per_group/2]).
+
+%% testcases
+-export([start/1,
+ connect/1,
+ remove_transport/1,
+ stop_service/1,
+ check/1,
+ stop/1]).
+
+%% disconnect_cb
+-export([disconnect/5]).
+
+-include("diameter.hrl").
+
+%% ===========================================================================
+
+-define(util, diameter_util).
+
+-define(ADDR, {127,0,0,1}).
+
+-define(CLIENT, "CLIENT").
+-define(SERVER, "SERVER").
+
+-define(DICT_COMMON, ?DIAMETER_DICT_COMMON).
+-define(APP_ID, ?DICT_COMMON:id()).
+
+%% Config for diameter:start_service/2.
+-define(SERVICE(Host),
+ [{'Origin-Host', Host},
+ {'Origin-Realm', "erlang.org"},
+ {'Host-IP-Address', [?ADDR]},
+ {'Vendor-Id', hd(Host)}, %% match this in disconnect/5
+ {'Product-Name', "OTP/diameter"},
+ {'Acct-Application-Id', [?APP_ID]},
+ {restrict_connections, false},
+ {application, [{dictionary, ?DICT_COMMON},
+ {module, #diameter_callback{_ = false}}]}]).
+
+%% Disconnect reasons that diameter passes as the first argument of a
+%% function configured as disconnect_cb.
+-define(REASONS, [transport, service, application]).
+
+%% Valid values for Disconnect-Cause.
+-define(CAUSES, [0, rebooting, 1, busy, 2, goaway]).
+
+%% Establish one client connection for element of this list,
+%% configured with disconnect/5 as disconnect_cb and returning the
+%% specified value.
+-define(RETURNS,
+ [[close, {dpr, [{cause, invalid}]}], [ignore, close], []]
+ ++ [[{dpr, [{timeout, 5000}, {cause, T}]}] || T <- ?CAUSES]).
+
+%% ===========================================================================
+
+suite() ->
+ [{timetrap, {seconds, 60}}].
+
+all() ->
+ [{group, R} || R <- ?REASONS].
+
+%% The group determines how transports are terminated: by remove_transport,
+%% stop_service or application stop.
+groups() ->
+ Ts = tc(),
+ [{R, [], Ts} || R <- ?REASONS].
+
+init_per_group(Name, Config) ->
+ [{group, Name} | Config].
+
+end_per_group(_, _) ->
+ ok.
+
+tc() ->
+ [start, connect, remove_transport, stop_service, check, stop].
+
+%% ===========================================================================
+%% start/stop testcases
+
+start(_Config) ->
+ ok = diameter:start(),
+ ok = diameter:start_service(?SERVER, ?SERVICE(?SERVER)),
+ ok = diameter:start_service(?CLIENT, ?SERVICE(?CLIENT)).
+
+connect(Config) ->
+ Pid = spawn(fun init/0), %% process for disconnect_cb to bang
+ Grp = group(Config),
+ LRef = ?util:listen(?SERVER, tcp),
+ Refs = [?util:connect(?CLIENT, tcp, LRef, opts(RCs, {Grp, Pid}))
+ || RCs <- ?RETURNS],
+ ?util:write_priv(Config, config, [Pid | Refs]).
+
+%% Remove all the client transports only in the transport group.
+remove_transport(Config) ->
+ transport == group(Config)
+ andalso (ok = diameter:remove_transport(?CLIENT, true)).
+
+%% Stop the service only in the service group.
+stop_service(Config) ->
+ service == group(Config)
+ andalso (ok = diameter:stop_service(?CLIENT)).
+
+%% Check for callbacks and stop the service. (Not the other way around
+%% for the timing reason explained below.)
+check(Config) ->
+ Grp = group(Config),
+ [Pid | Refs] = ?util:read_priv(Config, config),
+ Pid ! self(), %% ask for dictionary
+ Dict = receive {Pid, D} -> D end, %% get it
+ check(Refs, ?RETURNS, Grp, Dict). %% check for callbacks
+
+stop(_Config) ->
+ ok = diameter:stop().
+
+%% Whether or not there are callbacks after diameter:stop() depends on
+%% timing as long as the server runs on the same node: a server
+%% transport could close the connection before the client has chance
+%% to apply its callback. Therefore, just check that there haven't
+%% been any callbacks yet.
+check(_, _, application, Dict) ->
+ [] = dict:to_list(Dict);
+
+check([], [], _, _) ->
+ ok;
+
+check([Ref | Refs], CBs, Grp, Dict) ->
+ check1(Ref, hd(CBs), Grp, Dict),
+ check(Refs, tl(CBs), Grp, Dict).
+
+check1(Ref, [ignore | RCs], Reason, Dict) ->
+ check1(Ref, RCs, Reason, Dict);
+
+check1(Ref, [_|_], Reason, Dict) ->
+ {ok, Reason} = dict:find(Ref, Dict); %% callback with expected reason
+
+check1(Ref, [], _, Dict) ->
+ error = dict:find(Ref, Dict). %% no callback
+
+%% ----------------------------------------
+
+group(Config) ->
+ {group, Grp} = lists:keyfind(group, 1, Config),
+ Grp.
+
+%% Configure the callback with the group name (= disconnect reason) as
+%% extra argument.
+opts(RCs, T) ->
+ [{disconnect_cb, {?MODULE, disconnect, [T, RC]}} || RC <- RCs].
+
+%% Match the group name with the disconnect reason to ensure the
+%% callback is being called as expected.
+disconnect(Reason, Ref, Peer, {Reason, Pid}, RC) ->
+ io:format("disconnect: ~p ~p~n", [Ref, Reason]),
+ {_, #diameter_caps{vendor_id = {$C,$S}}} = Peer,
+ Pid ! {Reason, Ref},
+ RC.
+
+init() ->
+ exit(recv(dict:new())).
+
+recv(Dict) ->
+ receive
+ Pid when is_pid(Pid) ->
+ Pid ! {self(), Dict};
+ {Reason, Ref} ->
+ recv(dict:store(Ref, Reason, Dict))
+ end.
diff --git a/lib/diameter/test/diameter_traffic_SUITE.erl b/lib/diameter/test/diameter_traffic_SUITE.erl
index 669918f757..c157b0e304 100644
--- a/lib/diameter/test/diameter_traffic_SUITE.erl
+++ b/lib/diameter/test/diameter_traffic_SUITE.erl
@@ -38,6 +38,7 @@
result_codes/1,
send_ok/1,
send_nok/1,
+ send_bad_answer/1,
send_arbitrary/1,
send_unknown/1,
send_unknown_mandatory/1,
@@ -81,11 +82,11 @@
%% diameter callbacks
-export([peer_up/3,
peer_down/3,
- pick_peer/5, pick_peer/6,
- prepare_request/4, prepare_request/5,
- prepare_retransmit/4,
- handle_answer/5, handle_answer/6,
- handle_error/5,
+ pick_peer/6, pick_peer/7,
+ prepare_request/5, prepare_request/6,
+ prepare_retransmit/5,
+ handle_answer/6, handle_answer/7,
+ handle_error/6,
handle_request/3]).
-include("diameter.hrl").
@@ -108,10 +109,16 @@
-define(BASE, ?DIAMETER_DICT_COMMON).
-define(ACCT, ?DIAMETER_DICT_ACCOUNTING).
+%% Sequence mask for End-to-End and Hop-by-Hop identifiers.
+-define(CLIENT_MASK, {1,26}). %% 1 in top 6 bits
+
%% Run tests cases in different encoding variants. Send outgoing
%% messages as lists or records.
-define(ENCODINGS, [list, record]).
+%% Identifers for client connections.
+-define(CONNECTIONS, [c1,c2,c3]).
+
%% Not really what we should be setting unless the message is sent in
%% the common application but diameter doesn't care.
-define(APP_ID, ?DIAMETER_APP_ID_COMMON).
@@ -124,7 +131,8 @@
{'Vendor-Id', 12345},
{'Product-Name', "OTP/diameter"},
{'Auth-Application-Id', [?DIAMETER_APP_ID_COMMON]},
- {'Acct-Application-Id', [?DIAMETER_APP_ID_ACCOUNTING]}
+ {'Acct-Application-Id', [?DIAMETER_APP_ID_ACCOUNTING]},
+ {restrict_connections, false}
| [{application, [{dictionary, D},
{module, ?MODULE},
{answer_errors, callback}]}
@@ -175,12 +183,14 @@ suite() ->
all() ->
[start, start_services, add_transports, result_codes]
- ++ [{group, name([E]), P} || E <- ?ENCODINGS, P <- [[], [parallel]]]
+ ++ [{group, ?util:name([E,C]), P} || E <- ?ENCODINGS,
+ C <- ?CONNECTIONS,
+ P <- [[], [parallel]]]
++ [remove_transports, stop_services, stop].
groups() ->
Ts = tc(),
- [{name([E]), [], Ts} || E <- ?ENCODINGS].
+ [{?util:name([E,C]), [], Ts} || E <- ?ENCODINGS, C <- ?CONNECTIONS].
init_per_group(Name, Config) ->
[{group, Name} | Config].
@@ -199,6 +209,7 @@ end_per_testcase(_, _) ->
tc() ->
[send_ok,
send_nok,
+ send_bad_answer,
send_arbitrary,
send_unknown,
send_unknown_mandatory,
@@ -244,16 +255,17 @@ start(_Config) ->
start_services(_Config) ->
ok = diameter:start_service(?SERVER, ?SERVICE(?SERVER)),
- ok = diameter:start_service(?CLIENT, ?SERVICE(?CLIENT)).
+ ok = diameter:start_service(?CLIENT, [{sequence, ?CLIENT_MASK}
+ | ?SERVICE(?CLIENT)]).
add_transports(Config) ->
LRef = ?util:listen(?SERVER, tcp, [{capabilities_cb, fun capx/2}]),
- CRef = ?util:connect(?CLIENT, tcp, LRef),
- ?util:write_priv(Config, "transport", {LRef, CRef}).
+ Cs = [?util:connect(?CLIENT, tcp, LRef, [{id, C}]) || C <- ?CONNECTIONS],
+ ?util:write_priv(Config, "transport", [LRef | Cs]).
remove_transports(Config) ->
- {LRef, CRef} = ?util:read_priv(Config, "transport"),
- ?util:disconnect(?CLIENT, CRef, ?SERVER, LRef).
+ [LRef | Cs] = ?util:read_priv(Config, "transport"),
+ [?util:disconnect(?CLIENT, C, ?SERVER, LRef) || C <- Cs].
stop_services(_Config) ->
ok = diameter:stop_service(?CLIENT),
@@ -298,6 +310,14 @@ send_nok(Config) ->
#'diameter_base_answer-message'{'Result-Code' = ?INVALID_AVP_BITS}
= call(Config, Req).
+%% Send an accounting ACR that the server tries to answer with an
+%% inappropriate header, resulting in no answer being sent and the
+%% request timing out.
+send_bad_answer(Config) ->
+ Req = ['ACR', {'Accounting-Record-Type', ?EVENT_RECORD},
+ {'Accounting-Record-Number', 2}],
+ {error, timeout} = call(Config, Req).
+
%% Send an ASR with an arbitrary AVP and expect success and the same
%% AVP in the reply.
send_arbitrary(Config) ->
@@ -539,11 +559,11 @@ call(Config, Req) ->
call(Config, Req, Opts) ->
Name = proplists:get_value(testcase, Config),
- [Enc] = name(proplists:get_value(group, Config)),
+ [Encoding, Client] = ?util:name(proplists:get_value(group, Config)),
diameter:call(?CLIENT,
dict(Req),
- req(Req, Enc),
- [{extra, [Name]} | Opts]).
+ req(Req, Encoding),
+ [{extra, [Name, Client]} | Opts]).
req(['ACR' = H | T], record) ->
?ACCT:'#new-'(?ACCT:msg2rec(H), T);
@@ -579,17 +599,6 @@ set(Dict, E, FV, Rec)
set(_, _, _, Rec) ->
Rec.
-%% Contruct and deconstruct names to work around group names being
-%% restricted to atoms. (Not really used yet.)
-
-name(Names)
- when is_list(Names) ->
- ?A(string:join([?L(A) || A <- Names], ","));
-
-name(A)
- when is_atom(A) ->
- [?A(S) || S <- string:tokens(?L(A), ",")].
-
%% ===========================================================================
%% diameter callbacks
@@ -603,28 +612,40 @@ peer_up(_SvcName, _Peer, State) ->
peer_down(_SvcName, _Peer, State) ->
State.
-%% pick_peer/5/6
+%% pick_peer/6-7
-pick_peer([Peer], _, ?CLIENT, _State, Name)
+pick_peer(Peers, _, ?CLIENT, _State, Name, Id)
when Name /= send_detach ->
- {ok, Peer}.
+ find(Id, Peers).
-pick_peer([_Peer], _, ?CLIENT, _State, send_nopeer, ?EXTRA) ->
+pick_peer(_Peers, _, ?CLIENT, _State, send_nopeer, _, ?EXTRA) ->
false;
-pick_peer([Peer], _, ?CLIENT, _State, send_detach, {_,_}) ->
- {ok, Peer}.
+pick_peer(Peers, _, ?CLIENT, _State, send_detach, Id, {_,_}) ->
+ find(Id, Peers).
-%% prepare_request/4/5
+find(Id, Peers) ->
+ [P] = [P || P <- Peers, id(Id, P)],
+ {ok, P}.
-prepare_request(_Pkt, ?CLIENT, {_Ref, _Caps}, send_discard) ->
+id(Id, {Pid, _Caps}) ->
+ [{ref, _}, {type, _}, {options, Opts} | _]
+ = diameter:service_info(?CLIENT, Pid),
+ lists:member({id, Id}, Opts).
+
+%% prepare_request/5-6
+
+prepare_request(_Pkt, ?CLIENT, {_Ref, _Caps}, send_discard, _) ->
{discard, unprepared};
-prepare_request(Pkt, ?CLIENT, {_Ref, Caps}, Name) ->
+prepare_request(Pkt, ?CLIENT, {_Ref, Caps}, Name, _) ->
{send, prepare(Pkt, Caps, Name)}.
-prepare_request(Pkt, ?CLIENT, {_Ref, Caps}, send_detach, _) ->
- {send, prepare(Pkt, Caps)}.
+prepare_request(Pkt, ?CLIENT, {_Ref, Caps}, send_detach, _, _) ->
+ {eval_packet, {send, prepare(Pkt, Caps)}, [fun log/2, detach]}.
+
+log(#diameter_packet{} = P, T) ->
+ io:format("~p: ~p~n", [T,P]).
prepare(Pkt, Caps, send_unsupported) ->
Req = prepare(Pkt, Caps),
@@ -707,25 +728,25 @@ prepare(#diameter_packet{msg = Req}, Caps)
{'Destination-Realm', DR},
{'Auth-Application-Id', ?APP_ID}]).
-%% prepare_retransmit/4
+%% prepare_retransmit/5
-prepare_retransmit(_Pkt, false, _Peer, _Name) ->
+prepare_retransmit(_Pkt, false, _Peer, _Name, _Id) ->
discard.
-%% handle_answer/5/6
+%% handle_answer/6-7
-handle_answer(Pkt, Req, ?CLIENT, Peer, Name) ->
+handle_answer(Pkt, Req, ?CLIENT, Peer, Name, _Id) ->
answer(Pkt, Req, Peer, Name).
-handle_answer(Pkt, _Req, ?CLIENT, _Peer, send_detach, {Pid, Ref}) ->
+handle_answer(Pkt, _Req, ?CLIENT, _Peer, send_detach, _Id, {Pid, Ref}) ->
Pid ! {Ref, Pkt}.
answer(#diameter_packet{msg = Rec, errors = []}, _Req, _Peer, _) ->
Rec.
-%% handle_error/5
+%% handle_error/6
-handle_error(Reason, _Req, ?CLIENT, _Peer, _Name) ->
+handle_error(Reason, _Req, ?CLIENT, _Peer, _Name, _Id) ->
{error, Reason}.
%% handle_request/3
@@ -733,12 +754,33 @@ handle_error(Reason, _Req, ?CLIENT, _Peer, _Name) ->
%% Note that diameter will set Result-Code and Failed-AVPs if
%% #diameter_packet.errors is non-null.
-handle_request(#diameter_packet{msg = M}, ?SERVER, {_Ref, Caps}) ->
+handle_request(#diameter_packet{header = H, msg = M}, ?SERVER, {_Ref, Caps}) ->
+ #diameter_header{end_to_end_id = EI,
+ hop_by_hop_id = HI}
+ = H,
+ {V,B} = ?CLIENT_MASK,
+ V = EI bsr B, %% assert
+ V = HI bsr B, %%
request(M, Caps).
request(#diameter_base_accounting_ACR{'Accounting-Record-Number' = 0},
_) ->
- {protocol_error, ?INVALID_AVP_BITS};
+ {eval_packet, {protocol_error, ?INVALID_AVP_BITS}, [fun log/2, invalid]};
+
+request(#diameter_base_accounting_ACR{'Session-Id' = SId,
+ 'Accounting-Record-Type' = RT,
+ 'Accounting-Record-Number' = 2 = RN},
+ #diameter_caps{origin_host = {OH, _},
+ origin_realm = {OR, _}}) ->
+ Ans = ['ACA', {'Result-Code', ?SUCCESS},
+ {'Session-Id', SId},
+ {'Origin-Host', OH},
+ {'Origin-Realm', OR},
+ {'Accounting-Record-Type', RT},
+ {'Accounting-Record-Number', RN}],
+
+ {reply, #diameter_packet{header = #diameter_header{is_error = true},%% not
+ msg = Ans}};
request(#diameter_base_accounting_ACR{'Session-Id' = SId,
'Accounting-Record-Type' = RT,
diff --git a/lib/diameter/test/diameter_util.erl b/lib/diameter/test/diameter_util.erl
index 890d24f6f8..5af4ad9ba5 100644
--- a/lib/diameter/test/diameter_util.erl
+++ b/lib/diameter/test/diameter_util.erl
@@ -24,7 +24,8 @@
%%
%% generic
--export([consult/2,
+-export([name/1,
+ consult/2,
run/1,
fold/3,
foldl/3,
@@ -45,6 +46,21 @@
-define(L, atom_to_list).
+
+%% ---------------------------------------------------------------------------
+%% name/2
+%%
+%% Contruct and deconstruct lists of atoms as atoms to work around
+%% group names in common_test being restricted to atoms.
+
+name(Names)
+ when is_list(Names) ->
+ list_to_atom(string:join([atom_to_list(A) || A <- Names], ","));
+
+name(A)
+ when is_atom(A) ->
+ [list_to_atom(S) || S <- string:tokens(atom_to_list(A), ",")].
+
%% ---------------------------------------------------------------------------
%% consult/2
%%
diff --git a/lib/diameter/test/diameter_watchdog_SUITE.erl b/lib/diameter/test/diameter_watchdog_SUITE.erl
index ff40326947..7ce09e93ca 100644
--- a/lib/diameter/test/diameter_watchdog_SUITE.erl
+++ b/lib/diameter/test/diameter_watchdog_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -30,11 +30,21 @@
end_per_suite/1]).
%% testcases
--export([reopen/1, reopen/4]).
+-export([reopen/1, reopen/4, reopen/7]).
--export([start/3, %% diameter_transport callback
- id/1, %% jitter callback
- run/1]).
+-export([id/1, %% jitter callback
+ run1/1]).
+
+%% diameter_app callbacks
+-export([peer_up/3,
+ peer_down/3]).
+
+%% gen_tcp-ish interface
+-export([listen/2,
+ accept/1,
+ connect/3,
+ send/2,
+ setopts/2]).
-include("diameter.hrl").
-include("diameter_ct.hrl").
@@ -43,33 +53,21 @@
-define(util, diameter_util).
--define(BASE, diameter_gen_base_rfc3588).
--define(APPL_ID, diameter_gen_base_rfc3588:id()).
--define(SUCCESS, 2001). %% DIAMETER_SUCCESS
-
-%% Addresses for the local and remote diameter nodes. The values don't
-%% matter since we're faking transport.
--define(LOCALHOST, {127,0,0,1}).
--define(REMOTEHOST, {10,0,0,1}).
-
--define(CAPS, #diameter_caps{origin_host = "node.innan.com",
- origin_realm = "innan.com",
- host_ip_address = [?LOCALHOST],
- vendor_id = 1022,
- product_name = "remote",
- auth_application_id = [?APPL_ID]}).
-
--define(APPL, #diameter_app{alias = ?MODULE,
- dictionary = ?BASE,
- module = [?MODULE],
- init_state = now(),
- id = ?APPL_ID,
- mutable = false}).
-
-%% Service record maintained by our faked service process.
--define(SERVICE, #diameter_service{pid = self(),
- capabilities = ?CAPS,
- applications = [?APPL]}).
+-define(BASE, ?DIAMETER_DICT_COMMON).
+-define(REALM, "erlang.org").
+-define(ADDR, {127,0,0,1}).
+
+%% Config for diameter:start_service/2.
+-define(SERVICE(Name),
+ [{'Origin-Host', Name ++ "." ++ ?REALM},
+ {'Origin-Realm', ?REALM},
+ {'Host-IP-Address', [?ADDR]},
+ {'Vendor-Id', 42},
+ {'Product-Name', "OTP/diameter"},
+ {'Auth-Application-Id', [?DIAMETER_APP_ID_COMMON]},
+ {application, [{alias, Name},
+ {dictionary, ?BASE},
+ {module, ?MODULE}]}]).
%% Watchdog timer as a callback.
-define(WD(T), {?MODULE, id, [T]}).
@@ -82,28 +80,28 @@
F_ <- [fun(T__) -> T__ end,
fun(T__) -> ?WD(T__) end]]]).
-%% Transport types.
--define(TRANSPORTS, [connect, accept]).
+%% Watchdog timer of the misbehaving peer.
+-define(PEER_WD, 10000).
-%% Message over the transport interface.
--define(TMSG(T), {diameter, T}).
-
-%% Receive a message within a specified time.
--define(RECV(T, Timeout),
- receive T -> now()
- after Timeout -> ?ERROR({timeout, Timeout})
+%% Receive a watchdog event within a specified time.
+-define(EVENT(T, Tmo),
+ receive #diameter_event{info = T} -> now()
+ after Tmo -> ?ERROR({timeout, Tmo})
end).
-%% Receive a message in a given number of watchdogs, plus or minus
+%% Receive an event in a given number of watchdogs, plus or minus
%% half. Note that the call to now_diff assumes left to right
%% evaluation order.
--define(RECV(T, N, WdL, WdH),
+-define(EVENT(T, N, WdL, WdH),
[?ERROR({received, _Elapsed_, _LowerBound_, N, WdL})
|| _UpperBound_ <- [(N)*(WdH) + (WdH) div 2],
- _Elapsed_ <- [now_diff(now(), ?RECV(T, _UpperBound_))],
+ _Elapsed_ <- [now_diff(now(), ?EVENT(T, _UpperBound_))],
_LowerBound_ <- [(N)*(WdL) - (WdL) div 2],
_Elapsed_ =< _LowerBound_*1000]).
+-define(EVENT(T, N, Wd),
+ ?EVENT(T, N, Wd, Wd)).
+
%% A timeout that ensures one watchdog. The ensure only one watchdog
%% requires (Wd + 2000) + 1000 < 2*(Wd - 2000) ==> 7000 < Wd for the
%% case with random jitter.
@@ -112,7 +110,7 @@
%% ===========================================================================
suite() ->
- [{timetrap, {minutes, 6}}].%% enough for 11 watchdogs @ 30 sec plus jitter
+ [{timetrap, {minutes, 10}}].%% enough for 17 watchdogs @ 30 sec plus jitter
all() ->
[reopen].
@@ -134,52 +132,97 @@ end_per_suite(_Config) ->
%% implement a transport process that plays the role of the peer
%% Diameter node.
+%reopen(_) ->
+% reopen(connect, ?WD(10000), 1, 'DWR');
+
reopen(_) ->
- [] = ?util:run([{?MODULE, [run, [reopen, Wd, T, N, M]]}
- || Wd <- ?WD_TIMERS,
- T <- ?TRANSPORTS,
- N <- [0,1,2],
- M <- ['DWR', 'DWA', other]]).
+ [] = run([[reopen, T, Wd, N, M]
+ || Wd <- ?WD_TIMERS, %% watchdog_timer value
+ T <- [listen, connect], %% watchdog to test
+ N <- [0,1,2], %% DWR's to answer before ignoring
+ M <- ['DWR', 'DWA', 'RAA']]). %% how to induce failback
-reopen(Wd, Type, N, What) ->
- Ref = make_ref(),
+reopen(Type, Wd, N, M) ->
+ Server = start_service(),
+ Client = start_service(),
- %% The maker of transport processes.
- TPid = start({N, Wd, What, Ref}),
+ %% The peer to the transport whose watchdog is tested is given a
+ %% long watchdog timeout so that it doesn't send DWR of its own.
+ {Node, Peer} = {{[], Wd}, {[{module, ?MODULE}], ?WD(?PEER_WD)}},
- %% Act like diameter_service and start the watchdog process, which
- %% in turn starts a peer_fsm process, which in turn starts a
- %% transport process by way of start/3. Messages received by the
- %% testcase are those sent by diameter_watchdog to the service
- %% process (= process starting the watchdog).
- WPid1 = watchdog(Type, Ref, TPid, Wd),
+ {{LH,LW},{CH,CW}} = case Type of
+ listen -> {Node, Peer};
+ connect -> {Peer, Node}
+ end,
- %% Low/high watchdog timeouts.
- WdL = jitter(Wd, -2000),
- WdH = jitter(Wd, 2000),
+ LO = [{transport_module, diameter_tcp},
+ {transport_config, LH ++ [{ip, ?ADDR}, {port, 0}]},
+ {watchdog_timer, LW}],
+
+ {ok, LRef} = diameter:add_transport(Server, {listen, LO}),
+
+ [LP] = ?util:lport(tcp, LRef, 20),
+
+ CO = [{transport_module, diameter_tcp},
+ {transport_config, CH ++ [{ip, ?ADDR}, {port, 0},
+ {raddr, ?ADDR}, {rport, LP}]},
+ {watchdog_timer, CW}],
+
+ %% Use a temporary process to ensure the connecting transport is
+ %% added only once events from the listening transport are
+ %% subscribed to.
+ Pid = spawn(fun() -> receive _ -> ok end end),
+
+ [] = run([[reopen, Type, T, LRef, Pid, Wd, N, M]
+ || T <- [{listen, Server}, {connect, Client, CO}]]).
+
+%% start_service/1
+
+start_service() ->
+ Name = hostname(),
+ ok = diameter:start_service(Name, [{monitor, self()} | ?SERVICE(Name)]),
+ Name.
+
+%% reopen/7
+reopen(Type, {listen = T, SvcName}, Ref, Pid, Wd, N, M) ->
+ diameter:subscribe(SvcName),
+ Pid ! ok,
+ recv(Type, T, SvcName, Ref, Wd, N, M);
+
+reopen(Type, {connect = T, SvcName, Opts}, _, Pid, Wd, N, M) ->
+ diameter:subscribe(SvcName),
+ MRef = erlang:monitor(process, Pid),
+ receive {'DOWN', MRef, process, _, _} -> ok end,
+ {ok, Ref} = diameter:add_transport(SvcName, {T, Opts}),
+ recv(Type, T, SvcName, Ref, Wd, N, M).
+
+%% recv/7
+
+%% The watchdog to be tested.
+recv(Type, Type, _SvcName, Ref, Wd, N, M) ->
%% Connection should come up immediately as a consequence of
%% starting the watchdog process. In the accepting case this
%% results in a new watchdog on a transport waiting for a new
%% connection.
- ?RECV({connection_up, WPid1, _}, 1000),
- WPid2 = case Type of
- connect ->
- WPid1;
- accept ->
- watchdog(Type, Ref, TPid, Wd)
- end,
+ ?EVENT({watchdog, Ref, _, {initial, okay}, _}, 2000),
+ ?EVENT({up, Ref, _, _, #diameter_packet{}}, 0),
+
+ %% Low/high watchdog timeouts.
+ WdL = jitter(Wd, -2000),
+ WdH = jitter(Wd, 2000),
%% OKAY Timer expires & Failover()
%% Pending SetWatchdog() SUSPECT
%%
- %% Since our transport is replying to N DWR's before becoming
- %% silent, we should go down after N+2 watchdog_timer expirations:
- %% that is, after the first unanswered DWR. Knowing the min/max
- %% watchdog timeout values gives the time interval in which the
- %% down message is expected.
- ?RECV({connection_down, WPid1}, N+2, WdL, WdH),
+ %% The peer replies to N DWR's before becoming silent, we should
+ %% go down after N+2 watchdog_timer expirations: that is, after
+ %% the first unanswered DWR. Knowing the min/max watchdog timeout
+ %% values gives the time interval in which the event is expected.
+
+ ?EVENT({watchdog, Ref, _, {okay, suspect}, _}, N+2, WdL, WdH),
+ ?EVENT({down, Ref, _, _}, 0),
%% SUSPECT Receive DWA Pending = FALSE
%% Failback()
@@ -188,9 +231,11 @@ reopen(Wd, Type, N, What) ->
%% SUSPECT Receive non-DWA Failback()
%% SetWatchdog() OKAY
%%
- %% The transport receives a message before the expiry of another
- %% watchdog to induce failback.
- ?RECV({connection_up, WPid1}, WdH),
+ %% The peer sends a message before the expiry of another watchdog
+ %% to induce failback.
+
+ ?EVENT({watchdog, Ref, _, {suspect, okay}, _}, WdH + 2000),
+ ?EVENT({up, Ref, _, _}, 0),
%% OKAY Timer expires & SendWatchdog()
%% !Pending SetWatchdog()
@@ -199,30 +244,35 @@ reopen(Wd, Type, N, What) ->
%% OKAY Timer expires & Failover()
%% Pending SetWatchdog() SUSPECT
%%
- %% The transport is still not responding to watchdogs so the
- %% connection should go back down after either one or two watchdog
- %% expiries, depending on whether or not DWA restored the connection.
- F = choose(What == 'DWA', 2, 1),
- ?RECV({connection_down, WPid1}, F, WdL, WdH),
+ %% The peer is now ignoring all watchdogs so the connection goes
+ %% back down after either one or two watchdog expiries, depending
+ %% on whether or not DWA restored the connection.
+
+ F = choose(M == 'DWA', 2, 1),
+ ?EVENT({watchdog, Ref, _, {okay, suspect}, _}, F, WdL, WdH),
+ ?EVENT({down, Ref, _, _}, 0),
%% SUSPECT Timer expires CloseConnection()
%% SetWatchdog() DOWN
%%
+ %% Non-response brings the connection down after another timeout.
+
+ ?EVENT({watchdog, Ref, _, {suspect, down}, _}, 1, WdL, WdH),
+
%% DOWN Timer expires AttemptOpen()
%% SetWatchdog() DOWN
%%
- %% Our transport tells us when the fake connection is
- %% reestablished, which should happen after another couple of
- %% watchdog expiries, the first bringing the watchdog to state
- %% DOWN, the second triggering an attempt to reopen the
- %% connection.
- ?RECV({reopen, Ref}, 2, WdL, WdH),
-
%% DOWN Connection up NumDWA = 0
%% SendWatchdog()
%% SetWatchdog()
%% Pending = TRUE REOPEN
%%
+ %% The connection is reestablished after another timeout.
+
+ recv_reopen(Type, Ref, WdL, WdH),
+
+ %% REOPEN Receive non-DWA Throwaway() REOPEN
+ %%
%% REOPEN Receive DWA & Pending = FALSE
%% NumDWA < 2 NumDWA++ REOPEN
%%
@@ -230,312 +280,259 @@ reopen(Wd, Type, N, What) ->
%% NumDWA == 2 NumDWA++
%% Failback() OKAY
%%
- %% Now the watchdog should require three received DWA's before
- %% taking the connection back up. The first DWR is sent directly
- %% after capabilities exchange so it should take no more than two
- %% watchdog expiries.
- ?RECV({connection_up, WPid2, _}, 2, WdL, WdH).
+ %% REOPEN Timer expires & SendWatchdog()
+ %% !Pending SetWatchdog()
+ %% Pending = TRUE REOPEN
+ %%
+ %% An exchange of 3 watchdogs (the first directly after
+ %% capabilities exchange) brings the connection back up.
-%% ===========================================================================
+ ?EVENT({watchdog, Ref, _, {reopen, okay}, _}, 2, WdL, WdH),
+ ?EVENT({up, Ref, _, _, #diameter_packet{}}, 0),
-%% Start the fake transport process. From diameter's point of view
-%% it's started when diameter calls start/3. We start it before this
-%% happens since we use the same fake transport each time diameter
-%% calls start/3. The process lives and dies with the test case.
-start(Config) ->
- Pid = self(),
- spawn(fun() -> loop(init(Pid, Config)) end).
-
-%% Transport start from diameter. This may be called multiple times
-%% depending on the testcase.
-start({Type, _Ref}, #diameter_service{}, Pid) ->
- Ref = make_ref(),
- MRef = erlang:monitor(process, Pid),
- Pid ! {start, self(), Type, Ref},
- {Ref, TPid} = receive
- {Ref, _} = T ->
- T;
- {'DOWN', MRef, process, _, _} = T ->
- T
- end,
- erlang:demonitor(MRef, [flush]),
- {ok, TPid}.
+ %% Non-response brings it down again.
-%% id/1
+ ?EVENT({watchdog, Ref, _, {okay, suspect}, _}, 2, WdL, WdH),
+ ?EVENT({down, Ref, _, _}, 0),
+ ?EVENT({watchdog, Ref, _, {suspect, down}, _}, 1, WdL, WdH),
-id(T) ->
- T.
+ %% Reestablish after another watchdog.
-%% ===========================================================================
+ recv_reopen(Type, Ref, WdL, WdH),
-choose(true, X, _) -> X;
-choose(false, _, X) -> X.
+ %% REOPEN Timer expires & NumDWA = -1
+ %% Pending & SetWatchdog()
+ %% NumDWA >= 0 REOPEN
+ %%
+ %% REOPEN Timer expires & CloseConnection()
+ %% Pending & SetWatchdog()
+ %% NumDWA < 0 DOWN
+ %%
+ %% Peer is now ignoring all watchdogs go down again after 2
+ %% timeouts.
-%% run/1
-%%
-%% A more useful badmatch in case of failure.
+ ?EVENT({watchdog, Ref, _, {reopen, down}, _}, 2, WdL, WdH);
-run([F|A]) ->
- ok = try
- apply(?MODULE, F, A),
- ok
- catch
- E:R ->
- {A, E, R, erlang:get_stacktrace()}
- end.
+%% The misbehaving peer.
+recv(_, Type, SvcName, Ref, Wd, N, M) ->
+ %% First transport process.
+ ?EVENT({watchdog, Ref, _, {initial, okay}, _}, 1000),
+ ?EVENT({up, Ref, _, _, #diameter_packet{}}, 0),
+ reg(Type, Ref, SvcName, {SvcName, {Wd,N,M}}),
+ ?EVENT({watchdog, Ref, _, {okay, down}, _}, infinity),
-%% now_diff/2
+ %% Second transport process.
+ ?EVENT({watchdog, Ref, _, {_, reopen}, _}, infinity),
+ reg(Type, Ref, SvcName, 3),
+ ?EVENT({watchdog, Ref, _, {_, down}, _}, infinity),
-now_diff(T1, T2) ->
- timer:now_diff(T2, T1).
+ %% Third transport process.
+ ?EVENT({watchdog, Ref, _, {_, reopen}, _}, infinity),
+ reg(Type, Ref, SvcName, 0),
+ ?EVENT({watchdog, Ref, _, {_, down}, _}, infinity),
-%% jitter/2
+ ok.
-jitter(?WD(T), _) ->
- T;
-jitter(T,D) ->
- T+D.
+%% recv_reopen/4
-%% watchdog/4
-%%
-%% Fake the call from diameter_service. The watchdog process will send
-%% messages to the calling "service" process so our tests are that the
-%% watchdog responds as expected.
-
-watchdog(Type, Ref, TPid, Wd) ->
- Opts = [{transport_module, ?MODULE},
- {transport_config, TPid},
- {watchdog_timer, Wd}],
- {_MRef, Pid} = diameter_watchdog:start({Type, Ref},
- {false, Opts, false, ?SERVICE}),
- Pid.
+recv_reopen(connect, Ref, WdL, WdH) ->
+ ?EVENT({watchdog, Ref, _, {_, reopen}, _}, 1, WdL, WdH),
+ ?EVENT({reconnect, Ref, _}, 0);
-%% ===========================================================================
+recv_reopen(listen, Ref, _, _) ->
+ ?EVENT({watchdog, Ref, _, {_, reopen}, _}, 1, ?PEER_WD).
-%% Transport process implmentation. Fakes reception of messages by
-%% sending fakes to the parent (peer fsm) process that called start/3.
-
--record(transport,
- {type, %% connect | accept | manager
- parent, %% pid() of peer_fsm/ervice process
- open = false, %% done with capabilities exchange?
- config}).%% testcase-specific config
-
-%% init/2
-
-%% Testcase starting the manager.
-init(SvcPid, {_,_,_,_} = Config) ->
- putr(peer, [{'Origin-Host', hostname() ++ ".utan.com"},
- {'Origin-Realm', "utan.com"}]),
- #transport{type = manager,
- parent = monitor(SvcPid),
- config = Config};
-
-%% Manager starting a transport.
-init(_, {Type, ParentPid, SvcPid, TwinPid, Peer, {N,_,_,_} = Config}) ->
- putr(peer, Peer),
- putr(service, SvcPid),
- putr(count, init(Type, ParentPid, TwinPid, N)),%% number of DWR's to answer
- #transport{type = Type,
- parent = monitor(ParentPid),
- config = Config}.
-
-init(Type, ParentPid, undefined, N) ->
- connected(ParentPid, Type),
- N;
-init(_, _, TPid, _) ->
- monitor(TPid),
- 3.
-
-monitor(Pid) ->
- erlang:monitor(process, Pid),
- Pid.
+%% reg/4
+%%
+%% Lookup the pid of the transport process and publish a term for
+%% send/2 to lookup.
+reg(Type, Ref, SvcName, T) ->
+ TPid = tpid(Type, Ref, diameter:service_info(SvcName, transport)),
+ true = diameter_reg:add_new({?MODULE, TPid, T}).
+
+%% tpid/3
+
+tpid(connect, Ref, [[{ref, Ref},
+ {type, connect},
+ {options, _},
+ {watchdog, _},
+ {peer, _},
+ {apps, _},
+ {caps, _},
+ {port, [{owner, TPid} | _]}
+ | _]]) ->
+ TPid;
+
+tpid(listen, Ref, [[{ref, Ref},
+ {type, listen},
+ {options, _},
+ {accept, As}
+ | _]]) ->
+ [[{watchdog, _},
+ {peer, _},
+ {apps, _},
+ {caps, _},
+ {port, [{owner, TPid} | _]}
+ | _]]
+ = lists:filter(fun([{watchdog, {_,_,S}} | _]) ->
+ S == okay orelse S == reopen
+ end,
+ As),
+ TPid.
-%% Generate a unique hostname for the faked peer.
-hostname() ->
- lists:flatten(io_lib:format("~p-~p-~p", tuple_to_list(now()))).
+%% ===========================================================================
-%% loop/1
-
-loop(S) ->
- loop(msg(receive T -> T end, S)).
-
-msg(T,S) ->
- case transition(T,S) of
- ok ->
- S;
- #transport{} = NS ->
- NS;
- {stop, Reason} ->
- x(Reason)
- end.
-
-x(Reason) ->
- exit(Reason).
-
-%% transition/2
-
-%% Manager is being asked for a new transport process.
-transition({start, Pid, Type, Ref}, #transport{type = manager,
- parent = SvcPid,
- config = Config}) ->
- TPid = start({Type, Pid, SvcPid, getr(transport), getr(peer), Config}),
- Pid ! {Ref, TPid},
- putr(transport, TPid),
+listen(PortNr, Opts) ->
+ gen_tcp:listen(PortNr, Opts).
+
+accept(LSock) ->
+ gen_tcp:accept(LSock).
+
+connect(Addr, Port, Opts) ->
+ gen_tcp:connect(Addr, Port, Opts).
+
+setopts(Sock, Opts) ->
+ inet:setopts(Sock, Opts).
+
+send(Sock, Bin) ->
+ send(getr(config), Sock, Bin).
+
+%% send/3
+
+%% First outgoing message from a new transport process is CER/CEA.
+%% Remaining outgoing messages are either DWR or DWA.
+send(undefined, Sock, Bin) ->
+ putr(config, init),
+ gen_tcp:send(Sock, Bin);
+
+%% Outgoing DWR: fake reception of DWA. Use the fact that AVP values
+%% are ignored. This is to ensure that the peer's watchdog state
+%% transitions are only induced by responses to messages it sends.
+send(_, Sock, <<_:32, 1:1, _:7, 280:24, _:32, EId:32, HId:32, _/binary>>) ->
+ Pkt = #diameter_packet{header = #diameter_header{version = 1,
+ end_to_end_id = EId,
+ hop_by_hop_id = HId},
+ msg = ['DWA', {'Result-Code', 2001},
+ {'Origin-Host', "XXX"},
+ {'Origin-Realm', ?REALM}]},
+ #diameter_packet{bin = Bin} = diameter_codec:encode(?BASE, Pkt),
+ self() ! {tcp, Sock, Bin},
ok;
-%% Peer fsm or testcase process has died.
-transition({'DOWN', _, process, Pid, _} = T, #transport{parent = Pid}) ->
- {stop, T};
-
-%% Twin transport process has gone down. In the connect case, the
-%% transport isn't started until this happens in the first place so
-%% connect immediately. In the accept case, fake the peer reconnecting
-%% only after another watchdog expiry.
-transition({'DOWN', _, process, _, _}, #transport{type = Type,
- config = {_, Wd, _, _}}) ->
- Tmo = case Type of
- connect ->
- 0;
- accept ->
- ?ONE_WD(Wd)
- end,
- erlang:send_after(Tmo, self(), reconnect),
+%% First outgoing DWA.
+send(init, Sock, Bin) ->
+ [{{?MODULE, _, T}, _}] = diameter_reg:wait({?MODULE, self(), '_'}),
+ putr(config, T),
+ send(Sock, Bin);
+
+%% First transport process.
+send({SvcName, {_,_,_} = T}, Sock, Bin) ->
+ [{'Origin-Host', _} = OH, {'Origin-Realm', _} = OR | _]
+ = ?SERVICE(SvcName),
+ putr(origin, [OH, OR]),
+ putr(config, T),
+ send(Sock, Bin);
+
+%% Discard DWA, failback after another timeout in the peer.
+send({Wd, 0 = No, Msg}, Sock, Bin) ->
+ Origin = getr(origin),
+ spawn(fun() -> failback(?ONE_WD(Wd), Msg, Sock, Bin, Origin) end),
+ putr(config, No),
ok;
-transition(reconnect, #transport{type = Type,
- parent = Pid,
- config = {_,_,_,Ref}}) ->
- getr(service) ! {reopen, Ref},
- connected(Pid, Type),
- ok;
+%% Send DWA while we're in the mood (aka 0 < N).
+send({Wd, N, Msg}, Sock, Bin) ->
+ putr(config, {Wd, N-1, Msg}),
+ gen_tcp:send(Sock, Bin);
-%% Peer fsm process is sending CER: fake the peer's CEA.
-transition(?TMSG({send, Bin}), #transport{type = connect,
- open = false,
- parent = Pid}
- = S) ->
- {Code, Flags, _} = ?BASE:msg_header('CER'),
- <<_:32, Flags:8, Code:24, _:96, _/binary>> = Bin,
- Hdr = make_header(Bin),
- recv(Pid, {Hdr, make_cea()}),
- S#transport{open = true};
-
-%% Peer fsm process is sending CEA.
-transition(?TMSG({send, Bin}), #transport{type = accept,
- open = false}
- = S) ->
- {Code, Flags, _} = ?BASE:msg_header('CEA'),
- <<_:32, Flags:8, Code:24, _:96, _/binary>> = Bin,
- S#transport{open = true};
-
-%% Watchdog is sending DWR or DWA.
-transition(?TMSG({send, Bin}), #transport{open = true} = S) ->
- {Code, _, _} = ?BASE:msg_header('DWR'),
- {Code, _, _} = ?BASE:msg_header('DWA'),
- <<_:32, R:1, 0:7, Code:24, _:96, _/binary>> = Bin,
- Hdr = make_header(Bin),
- dwa(1 == R, S, Hdr),
+%% Discard DWA.
+send(0, _Sock, _Bin) ->
ok;
-%% We're telling ourselves to fake a received message.
-transition({recv, Msg}, #transport{parent = Pid}) ->
- recv(Pid, Msg),
- ok;
+%% Send DWA.
+send(N, Sock, <<_:32, 0:1, _:7, 280:24, _/binary>> = Bin) ->
+ putr(config, N-1),
+ gen_tcp:send(Sock, Bin).
-%% We're telling ourselves to receive a message to induce failback.
-transition(failback = T, #transport{parent = Pid}) ->
- recv(Pid, eraser(T)),
- ok.
+failback(Tmo, Msg, Sock, Bin, Origin) ->
+ timer:sleep(Tmo),
+ ok = gen_tcp:send(Sock, msg(Msg, Bin, Origin)).
-make_header(Bin) ->
- #diameter_header{end_to_end_id = E,
- hop_by_hop_id = H}
- = diameter_codec:decode_header(Bin),
- #diameter_header{end_to_end_id = E,
- hop_by_hop_id = H}.
-
-recv(Pid, Msg) ->
- Pid ! ?TMSG({recv, encode(Msg)}).
-
-%% Replace the end-to-end/hop-by-hop identifiers with those from an
-%% incoming request to which we're constructing a reply.
-encode({Hdr, [_|_] = Msg}) ->
- #diameter_header{hop_by_hop_id = HBH,
- end_to_end_id = E2E}
- = Hdr,
- #diameter_packet{bin = Bin} = diameter_codec:encode(?BASE, Msg),
- <<H:12/binary, _:64, T/binary>> = Bin,
- <<H/binary, HBH:32, E2E:32, T/binary>>;
-
-encode([_|_] = Msg) ->
- #diameter_packet{bin = Bin} = diameter_codec:encode(?BASE, Msg),
+%% msg/2
+
+msg('DWA', Bin, _Origin) ->
+ Bin;
+msg(Msg, _Bin, Origin) ->
+ #diameter_packet{bin = Bin}
+ = diameter_codec:encode(?BASE, msg(Msg, Origin)),
Bin.
-connected(Pid, connect) ->
- Pid ! ?TMSG({self(), connected, make_ref()});
-connected(Pid, accept) ->
- Pid ! ?TMSG({self(), connected}),
- recv(Pid, make_cer()).
+msg('DWR' = M, T) ->
+ [M | T];
-make_cer() ->
- ['CER' | getr(peer)] ++ [{'Host-IP-Address', [?REMOTEHOST]},
- {'Vendor-Id', 1028},
- {'Product-Name', "Utan"},
- {'Auth-Application-Id', [?APPL_ID]}].
+msg('RAA', T) ->
+ ['RAA', {'Session-Id', diameter:session_id("abc")},
+ {'Result-Code', 2001}
+ | T].
+%% An unexpected answer is discarded after passing through the
+%% watchdog state machine.
-make_cea() ->
- ['CER' | Rest] = make_cer(),
- ['CEA', {'Result-Code', ?SUCCESS} | Rest].
+%% ===========================================================================
-make_dwr() ->
- ['DWR' | getr(peer)].
+peer_up(_SvcName, _Peer, S) ->
+ S.
-make_dwa() ->
- ['DWR' | Rest] = make_dwr(),
- ['DWA', {'Result-Code', ?SUCCESS} | Rest].
+peer_down(_SvcName, _Peer, S) ->
+ S.
-dwa(false, _, _) -> %% outgoing was DWA ...
- ok;
-dwa(true, S, Hdr) -> %% ... or DWR
- dwa(getr(count), Hdr, S);
-
-%% React to the DWR only after another watchdog expiry. We shouldn't
-%% get another DWR while the answer is pending.
-dwa(0, Hdr, #transport{config = {_, Wd, What, _}}) ->
- erlang:send_after(?ONE_WD(Wd), self(), failback),
- putr(failback, make_msg(What, Hdr)),
- eraser(count);
-
-dwa(undefined, _, _) ->
- undefined = getr(failback), %% ensure this is after failback
- ok;
+%% ===========================================================================
-%% Reply with DWA.
-dwa(N, Hdr, #transport{parent = Pid}) ->
- putr(count, N-1),
- recv(Pid, {Hdr, make_dwa()}).
+choose(true, X, _) -> X;
+choose(false, _, X) -> X.
-%% Answer to received DWR.
-make_msg('DWA', Hdr) ->
- {Hdr, make_dwa()};
+%% id/1
+%%
+%% Jitter callback.
-%% DWR from peer.
-make_msg('DWR', _) ->
- make_dwr();
+id(T) ->
+ T.
-%% An unexpected answer is discarded after passing through the
-%% watchdog state machine.
-make_msg(other, _) ->
- ['RAA', {'Session-Id', diameter:session_id("abc")},
- {'Result-Code', 2001}
- | getr(peer)].
+%% run/1
+%%
+%% A more useful badmatch in case of failure.
+
+run(Fs) ->
+ ?util:run([{?MODULE, [run1, F]} || F <- Fs]).
+
+run1([F|A]) ->
+ ok = try
+ apply(?MODULE, F, A),
+ ok
+ catch
+ E:R ->
+ S = erlang:get_stacktrace(),
+ io:format("~p~n", [{A, E, R, S}]),
+ S
+ end.
+
+%% now_diff/2
+
+now_diff(T1, T2) ->
+ timer:now_diff(T2, T1).
+
+%% jitter/2
+
+jitter(?WD(T), _) ->
+ T;
+jitter(T,D) ->
+ T+D.
+
+%% Generate a unique hostname for the faked peer.
+hostname() ->
+ lists:flatten(io_lib:format("~p-~p-~p", tuple_to_list(now()))).
putr(Key, Val) ->
put({?MODULE, Key}, Val).
getr(Key) ->
get({?MODULE, Key}).
-
-eraser(Key) ->
- erase({?MODULE, Key}).
diff --git a/lib/diameter/test/modules.mk b/lib/diameter/test/modules.mk
index 7f163536fb..5898e125ae 100644
--- a/lib/diameter/test/modules.mk
+++ b/lib/diameter/test/modules.mk
@@ -2,7 +2,7 @@
# %CopyrightBegin%
#
-# Copyright Ericsson AB 2010-2011. All Rights Reserved.
+# Copyright Ericsson AB 2010-2012. All Rights Reserved.
#
# The contents of this file are subject to the Erlang Public License,
# Version 1.1, (the "License"); you may not use this file except in
@@ -39,7 +39,8 @@ MODULES = \
diameter_traffic_SUITE \
diameter_relay_SUITE \
diameter_tls_SUITE \
- diameter_failover_SUITE
+ diameter_failover_SUITE \
+ diameter_dpr_SUITE
HRL_FILES = \
diameter_ct.hrl
diff --git a/lib/diameter/vsn.mk b/lib/diameter/vsn.mk
index 48e6596e72..c9f74ffcec 100644
--- a/lib/diameter/vsn.mk
+++ b/lib/diameter/vsn.mk
@@ -18,7 +18,7 @@
# %CopyrightEnd%
APPLICATION = diameter
-DIAMETER_VSN = 1.2
+DIAMETER_VSN = 1.3
PRE_VSN =
APP_VSN = "$(APPLICATION)-$(DIAMETER_VSN)$(PRE_VSN)"
diff --git a/lib/erl_docgen/priv/xsl/Makefile b/lib/erl_docgen/priv/xsl/Makefile
index 1510387d72..58589672b8 100644
--- a/lib/erl_docgen/priv/xsl/Makefile
+++ b/lib/erl_docgen/priv/xsl/Makefile
@@ -42,7 +42,8 @@ XSL_FILES = \
db_pdf_params.xsl \
db_html.xsl \
db_html_params.xsl \
- db_man.xsl
+ db_man.xsl \
+ db_eix.xsl
# ----------------------------------------------------
diff --git a/lib/erl_docgen/priv/xsl/db_html.xsl b/lib/erl_docgen/priv/xsl/db_html.xsl
index 4bc5abb364..ab5f24c406 100644
--- a/lib/erl_docgen/priv/xsl/db_html.xsl
+++ b/lib/erl_docgen/priv/xsl/db_html.xsl
@@ -578,8 +578,22 @@
<xsl:param name="curModule"/>
<html>
<head>
- <link rel="stylesheet" href="{$topdocdir}/otp_doc.css" type="text/css"/>
- <title>Erlang -- <xsl:value-of select="header/title"/></title>
+ <xsl:choose>
+ <xsl:when test="string-length($stylesheet) > 0">
+ <link rel="stylesheet" href="{$topdocdir}/{$stylesheet}" type="text/css"/>
+ </xsl:when>
+ <xsl:otherwise>
+ <link rel="stylesheet" href="{$topdocdir}/otp_doc.css" type="text/css"/>
+ </xsl:otherwise>
+ </xsl:choose>
+ <xsl:choose>
+ <xsl:when test="string-length($winprefix) > 0">
+ <title><xsl:value-of select="$winprefix"/> -- <xsl:value-of select="header/title"/></title>
+ </xsl:when>
+ <xsl:otherwise>
+ <title>Erlang -- <xsl:value-of select="header/title"/></title>
+ </xsl:otherwise>
+ </xsl:choose>
</head>
<body bgcolor="white" text="#000000" link="#0000ff" vlink="#ff00ff" alink="#ff0000">
@@ -719,7 +733,14 @@
<xsl:template name="menu_top">
- <img alt="Erlang logo" src="{$topdocdir}/erlang-logo.png"/>
+ <xsl:choose>
+ <xsl:when test="string-length($logo) > 0">
+ <img alt="Erlang logo" src="{$topdocdir}/{$logo}"/>
+ </xsl:when>
+ <xsl:otherwise>
+ <img alt="Erlang logo" src="{$topdocdir}/erlang-logo.png"/>
+ </xsl:otherwise>
+ </xsl:choose>
<br/>
<small>
<xsl:if test="boolean(/book/parts/part)">
@@ -731,7 +752,14 @@
<xsl:if test="boolean(/book/releasenotes)">
<a href="release_notes.html">Release Notes</a><br/>
</xsl:if>
- <a href="{$pdfdir}/{$appname}-{$appver}.pdf">PDF</a><br/>
+ <xsl:choose>
+ <xsl:when test="string-length($pdfname) > 0">
+ <a href="{$pdfdir}/{$pdfname}.pdf">PDF</a><br/>
+ </xsl:when>
+ <xsl:otherwise>
+ <a href="{$pdfdir}/{$appname}-{$appver}.pdf">PDF</a><br/>
+ </xsl:otherwise>
+ </xsl:choose>
<a href="{$topdocdir}/index.html">Top</a>
</small>
</xsl:template>
diff --git a/lib/erl_docgen/priv/xsl/db_pdf.xsl b/lib/erl_docgen/priv/xsl/db_pdf.xsl
index da96052462..7de5af2a49 100644
--- a/lib/erl_docgen/priv/xsl/db_pdf.xsl
+++ b/lib/erl_docgen/priv/xsl/db_pdf.xsl
@@ -650,7 +650,7 @@
<fo:flow flow-name="xsl-region-body">
<fo:block xsl:use-attribute-sets="cover.logo">
- <fo:external-graphic src="{$docgen}/priv/images/erlang-logo.gif"/>
+ <fo:external-graphic src="{$logo}"/>
</fo:block>
<fo:block xsl:use-attribute-sets="cover.title" id="cover-page">
<xsl:apply-templates/>
diff --git a/lib/erl_docgen/priv/xsl/db_pdf_params.xsl b/lib/erl_docgen/priv/xsl/db_pdf_params.xsl
index 4d9c08d0c3..4e61f1f476 100644
--- a/lib/erl_docgen/priv/xsl/db_pdf_params.xsl
+++ b/lib/erl_docgen/priv/xsl/db_pdf_params.xsl
@@ -87,7 +87,7 @@
<xsl:attribute-set name="cover.title">
<xsl:attribute name="border-before-style">solid</xsl:attribute>
<xsl:attribute name="border-before-width">10pt</xsl:attribute>
- <xsl:attribute name="border-color">#960003</xsl:attribute>
+ <xsl:attribute name="border-color"><xsl:value-of select="$pdfcolor"/></xsl:attribute>
<xsl:attribute name="font-size">2.3em</xsl:attribute>
<xsl:attribute name="padding-before">0.5em</xsl:attribute>
<xsl:attribute name="text-align">end</xsl:attribute>
@@ -101,7 +101,7 @@
<xsl:attribute-set name="cover.inner.copyright">
<xsl:attribute name="border-before-style">solid</xsl:attribute>
<xsl:attribute name="border-before-width">1pt</xsl:attribute>
- <xsl:attribute name="border-color">#960003</xsl:attribute>
+ <xsl:attribute name="border-color"><xsl:value-of select="$pdfcolor"/></xsl:attribute>
<xsl:attribute name="font-weight">bold</xsl:attribute>
<xsl:attribute name="padding-before">0.5em</xsl:attribute>
<xsl:attribute name="space-before">200mm</xsl:attribute>
@@ -160,7 +160,7 @@
<xsl:attribute-set name="h1">
<xsl:attribute name="border-after-style">solid</xsl:attribute>
<xsl:attribute name="border-after-width">1pt</xsl:attribute>
- <xsl:attribute name="border-color">#960003</xsl:attribute>
+ <xsl:attribute name="border-color"><xsl:value-of select="$pdfcolor"/></xsl:attribute>
<xsl:attribute name="break-before">page</xsl:attribute>
<xsl:attribute name="font-family">sans-serif</xsl:attribute>
<xsl:attribute name="font-size">1.83em</xsl:attribute>
@@ -226,7 +226,7 @@
<xsl:attribute-set name="page-header">
<xsl:attribute name="border-after-style">solid</xsl:attribute>
<xsl:attribute name="border-after-width">2pt</xsl:attribute>
- <xsl:attribute name="border-color">#960003</xsl:attribute>
+ <xsl:attribute name="border-color"><xsl:value-of select="$pdfcolor"/></xsl:attribute>
<xsl:attribute name="font-family">sans-serif</xsl:attribute>
<xsl:attribute name="font-size">0.9em</xsl:attribute>
<xsl:attribute name="font-weight">bold</xsl:attribute>
diff --git a/lib/erl_docgen/vsn.mk b/lib/erl_docgen/vsn.mk
index b05df254a6..2599dc0ff7 100644
--- a/lib/erl_docgen/vsn.mk
+++ b/lib/erl_docgen/vsn.mk
@@ -1,2 +1,2 @@
-ERL_DOCGEN_VSN = 0.3.2
+ERL_DOCGEN_VSN = 0.3.3
diff --git a/lib/inets/doc/src/httpd.xml b/lib/inets/doc/src/httpd.xml
index 7e21229fcf..8497d91549 100644
--- a/lib/inets/doc/src/httpd.xml
+++ b/lib/inets/doc/src/httpd.xml
@@ -178,7 +178,13 @@
<p>Note that this option is only used when the option
<c>socket_type</c> has the value <c>ip_comm</c>. </p>
</item>
-
+ <marker id="prop_minimum_bytes_per_second"></marker>
+ <tag>{minimum_bytes_per_second, integer()}</tag>
+ <item>
+ <p>If given, sets a minimum bytes per second value for connections.</p>
+ <p>If the value is not reached, the socket will close for that connection.</p>
+ <p>The option is good for reducing the risk of "slow dos" attacks.</p>
+ </item>
</taglist>
<marker id="props_api_modules"></marker>
diff --git a/lib/inets/src/http_server/httpd_conf.erl b/lib/inets/src/http_server/httpd_conf.erl
index 747118431e..a97bbd9b25 100644
--- a/lib/inets/src/http_server/httpd_conf.erl
+++ b/lib/inets/src/http_server/httpd_conf.erl
@@ -483,7 +483,7 @@ validate_properties(Properties) ->
case mandatory_properties(Properties) of
ok ->
%% Second, check that property dependency are ok
- {ok, validate_properties2(Properties)};
+ {ok, check_minimum_bytes_per_second(validate_properties2(Properties))};
Error ->
throw(Error)
end.
@@ -522,7 +522,18 @@ validate_properties2(Properties) ->
throw(Error)
end
end.
-
+check_minimum_bytes_per_second(Properties) ->
+ case proplists:get_value(minimum_bytes_per_second, Properties, false) of
+ false ->
+ Properties;
+ Nr ->
+ case is_integer(Nr) of
+ false ->
+ throw({error, {minimum_bytes_per_second, is_not_integer}});
+ _ ->
+ Properties
+ end
+ end.
mandatory_properties(ConfigList) ->
a_must(ConfigList, [server_name, port, server_root, document_root]).
diff --git a/lib/inets/src/http_server/httpd_request_handler.erl b/lib/inets/src/http_server/httpd_request_handler.erl
index b62c10bbc7..5e0bd39cb3 100644
--- a/lib/inets/src/http_server/httpd_request_handler.erl
+++ b/lib/inets/src/http_server/httpd_request_handler.erl
@@ -44,7 +44,9 @@
timeout, %% infinity | integer() > 0
timer, %% ref() - Request timer
headers, %% #http_request_h{}
- body %% binary()
+ body, %% binary()
+ data, %% The total data received in bits, checked after 10s
+ byte_limit %% Bit limit per second before kick out
}).
%%====================================================================
@@ -98,7 +100,6 @@ init([Manager, ConfigDB, AcceptTimeout]) ->
[{socket_type, SocketType}, {socket, Socket}]),
TimeOut = httpd_util:lookup(ConfigDB, keep_alive_timeout, 150000),
-
Then = erlang:now(),
?hdrd("negotiate", []),
@@ -139,12 +140,11 @@ continue_init(Manager, ConfigDB, SocketType, Socket, TimeOut) ->
mfa = MFA},
?hdrt("activate request timeout", []),
- NewState = activate_request_timeout(State),
?hdrt("set socket options (binary, packet & active)", []),
http_transport:setopts(SocketType, Socket,
[binary, {packet, 0}, {active, once}]),
-
+ NewState = data_receive_counter(activate_request_timeout(State), httpd_util:lookup(ConfigDB, minimum_bytes_per_second, false)),
?hdrt("init done", []),
gen_server:enter_loop(?MODULE, [], NewState).
@@ -205,16 +205,25 @@ handle_info({Proto, Socket, Data},
?hdrd("received data",
[{data, Data}, {proto, Proto},
{socket, Socket}, {socket_type, SockType}, {mfa, MFA}]),
-
+
%% case (catch Module:Function([Data | Args])) of
PROCESSED = (catch Module:Function([Data | Args])),
-
+ NewDataSize = case State#state.byte_limit of
+ undefined ->
+ undefined;
+ _ ->
+ State#state.data + byte_size(Data)
+ end,
?hdrt("data processed", [{processing_result, PROCESSED}]),
-
case PROCESSED of
{ok, Result} ->
?hdrd("data processed", [{result, Result}]),
- NewState = cancel_request_timeout(State),
+ NewState = case NewDataSize of
+ undefined ->
+ cancel_request_timeout(State);
+ _ ->
+ set_new_data_size(cancel_request_timeout(State), NewDataSize)
+ end,
handle_http_msg(Result, NewState);
{error, {uri_too_long, MaxSize}, Version} ->
@@ -239,7 +248,12 @@ handle_info({Proto, Socket, Data},
NewMFA ->
?hdrd("data processed - reactivate socket", [{new_mfa, NewMFA}]),
http_transport:setopts(SockType, Socket, [{active, once}]),
- {noreply, State#state{mfa = NewMFA}}
+ case NewDataSize of
+ undefined ->
+ {noreply, State#state{mfa = NewMFA}};
+ _ ->
+ {noreply, State#state{mfa = NewMFA, data = NewDataSize}}
+ end
end;
%% Error cases
@@ -263,7 +277,22 @@ handle_info(timeout, #state{mod = ModData} = State) ->
error_log("The client did not send the whole request before the "
"server side timeout", ModData),
{stop, normal, State#state{response_sent = true}};
-
+handle_info(check_data_first, #state{data = Data, byte_limit = Byte_Limit} = State) ->
+ case Data >= (Byte_Limit*3) of
+ true ->
+ erlang:send_after(1000, self(), check_data),
+ {noreply, State#state{data = 0}};
+ _ ->
+ {stop, normal, State#state{response_sent = true}}
+ end;
+handle_info(check_data, #state{data = Data, byte_limit = Byte_Limit} = State) ->
+ case Data >= Byte_Limit of
+ true ->
+ erlang:send_after(1000, self(), check_data),
+ {noreply, State#state{data = 0}};
+ _ ->
+ {stop, normal, State#state{response_sent = true}}
+ end;
%% Default case
handle_info(Info, #state{mod = ModData} = State) ->
Error = lists:flatten(
@@ -311,6 +340,8 @@ code_change(_OldVsn, State, _Extra) ->
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
+set_new_data_size(State, NewData) ->
+ State#state{data = NewData}.
await_socket_ownership_transfer(AcceptTimeout) ->
receive
{socket_ownership_transfered, SocketType, Socket} ->
@@ -603,7 +634,14 @@ activate_request_timeout(#state{timeout = Time} = State) ->
?hdrt("activate request timeout", [{time, Time}]),
Ref = erlang:send_after(Time, self(), timeout),
State#state{timer = Ref}.
-
+data_receive_counter(State, Byte_limit) ->
+ case Byte_limit of
+ false ->
+ State#state{data = 0};
+ Nr ->
+ erlang:send_after(3000, self(), check_data_first),
+ State#state{data = 0, byte_limit = Nr}
+ end.
cancel_request_timeout(#state{timer = undefined} = State) ->
State;
cancel_request_timeout(#state{timer = Timer} = State) ->
diff --git a/lib/inets/src/inets_app/inets.appup.src b/lib/inets/src/inets_app/inets.appup.src
index 2adb2a0fc8..ffd0ed622f 100644
--- a/lib/inets/src/inets_app/inets.appup.src
+++ b/lib/inets/src/inets_app/inets.appup.src
@@ -18,8 +18,14 @@
{"%VSN%",
[
+ {"5.9.1",
+ [
+ {load_module, httpd_request_handler, soft_purge, soft_purge, []}
+ ]
+ },
{"5.9",
[
+ {load_module, httpd_request_handler, soft_purge, soft_purge, []},
{load_module, tftp, soft_purge, soft_purge, [inets_service]},
{load_module, inets_service, soft_purge, soft_purge, []},
{load_module, httpc, soft_purge, soft_purge, [httpc_manager]},
@@ -29,6 +35,7 @@
},
{"5.8.1",
[
+ {load_module, httpd_request_handler, soft_purge, soft_purge, []},
{load_module, tftp, soft_purge, soft_purge, [inets_service]},
{load_module, inets_service, soft_purge, soft_purge, []},
@@ -64,8 +71,14 @@
}
],
[
+ {"5.9.1",
+ [
+ {load_module, httpd_request_handler, soft_purge, soft_purge, []}
+ ]
+ },
{"5.9",
[
+ {load_module, httpd_request_handler, soft_purge, soft_purge, []},
{load_module, tftp, soft_purge, soft_purge, [inets_service]},
{load_module, inets_service, soft_purge, soft_purge, []},
{load_module, httpc, soft_purge, soft_purge, [httpc_manager]},
@@ -75,6 +88,7 @@
},
{"5.8.1",
[
+ {load_module, httpd_request_handler, soft_purge, soft_purge, []},
{load_module, tftp, soft_purge, soft_purge, [inets_service]},
{load_module, inets_service, soft_purge, soft_purge, []},
diff --git a/lib/inets/test/httpd_basic_SUITE.erl b/lib/inets/test/httpd_basic_SUITE.erl
index 7a476ea14a..523cf9d38c 100644
--- a/lib/inets/test/httpd_basic_SUITE.erl
+++ b/lib/inets/test/httpd_basic_SUITE.erl
@@ -34,7 +34,8 @@ all() ->
[
uri_too_long_414,
header_too_long_413,
- escaped_url_in_error_body
+ escaped_url_in_error_body,
+ slowdose
].
groups() ->
@@ -278,7 +279,18 @@ escaped_url_in_error_body(Config) when is_list(Config) ->
inets:stop(httpd, Pid),
tsp("escaped_url_in_error_body -> done"),
ok.
-
+slowdose(doc) ->
+ ["Testing minimum bytes per second option"];
+slowdose(Config) when is_list(Config) ->
+ HttpdConf = ?config(httpd_conf, Config),
+ {ok, Pid} = inets:start(httpd, [{port, 0}, {minimum_bytes_per_second, 200}|HttpdConf]),
+ Info = httpd:info(Pid),
+ Port = proplists:get_value(port, Info),
+ {ok, Socket} = gen_tcp:connect("localhost", Port, []),
+ receive
+ after 6000 ->
+ {error, closed} = gen_tcp:send(Socket, "Hey")
+ end.
find_URL_path([]) ->
"";
find_URL_path(["URL", URL | _]) ->
diff --git a/lib/inets/test/inets_app_test.erl b/lib/inets/test/inets_app_test.erl
index db2218f3b6..d32f7e290b 100644
--- a/lib/inets/test/inets_app_test.erl
+++ b/lib/inets/test/inets_app_test.erl
@@ -35,6 +35,15 @@
init_per_testcase(undef_funcs, Config) ->
NewConfig = lists:keydelete(watchdog, 1, Config),
Dog = test_server:timetrap(inets_test_lib:minutes(10)),
+
+ %% We need to check if there is a point to run this test.
+ %% On some platforms, crypto will not build, which in turn
+ %% causes ssl to not build (at this time, this will
+ %% change in the future).
+ %% So, we first check if we can start crypto, and if not,
+ %% we skip this test case!
+ ?ENSURE_STARTED(crypto),
+
[{watchdog, Dog}| NewConfig];
init_per_testcase(_, Config) ->
Config.
@@ -240,13 +249,6 @@ undef_funcs(suite) ->
undef_funcs(doc) ->
[];
undef_funcs(Config) when is_list(Config) ->
- %% We need to check if there is a point to run this test.
- %% On some platforms, crypto will not build, which in turn
- %% causes ssl to not build (at this time, this will
- %% change in the future).
- %% So, we first check if we can start crypto, and if not,
- %% we skip this test case!
- ?ENSURE_STARTED(crypto),
App = inets,
AppFile = key1search(app_file, Config),
Mods = key1search(modules, AppFile),
diff --git a/lib/inets/vsn.mk b/lib/inets/vsn.mk
index 949eceea7f..0c7cb5e7c2 100644
--- a/lib/inets/vsn.mk
+++ b/lib/inets/vsn.mk
@@ -18,7 +18,7 @@
# %CopyrightEnd%
APPLICATION = inets
-INETS_VSN = 5.9.1
+INETS_VSN = 5.9.2
PRE_VSN =
APP_VSN = "$(APPLICATION)-$(INETS_VSN)$(PRE_VSN)"
diff --git a/lib/jinterface/java_src/com/ericsson/otp/erlang/OtpEpmd.java b/lib/jinterface/java_src/com/ericsson/otp/erlang/OtpEpmd.java
index deac528133..b985f8aa50 100644
--- a/lib/jinterface/java_src/com/ericsson/otp/erlang/OtpEpmd.java
+++ b/lib/jinterface/java_src/com/ericsson/otp/erlang/OtpEpmd.java
@@ -358,7 +358,7 @@ public class OtpEpmd {
}
public static String[] lookupNames() throws IOException {
- return lookupNames(InetAddress.getLocalHost());
+ return lookupNames(InetAddress.getByName(null));
}
public static String[] lookupNames(final InetAddress address)
diff --git a/lib/kernel/doc/src/heart.xml b/lib/kernel/doc/src/heart.xml
index 26d1e27822..2826d3d00a 100644
--- a/lib/kernel/doc/src/heart.xml
+++ b/lib/kernel/doc/src/heart.xml
@@ -71,6 +71,39 @@
timeout and try to reboot the system. This can happen, for
example, if the system clock is adjusted automatically by use of
NTP (Network Time Protocol).</p>
+
+ <p> If a crash occurs, an <c><![CDATA[erl_crash.dump]]></c> will <em>not</em> be written
+ unless the environment variable <c><![CDATA[ERL_CRASH_DUMP_SECONDS]]></c> is set.
+ </p>
+
+ <pre>
+% <input>erl -heart -env ERL_CRASH_DUMP_SECONDS 10 ...</input></pre>
+ <p>
+ Furthermore, <c><![CDATA[ERL_CRASH_DUMP_SECONDS]]></c> has the following behaviour on
+ <c>heart</c>:
+ </p>
+ <taglist>
+ <tag><c><![CDATA[ERL_CRASH_DUMP_SECONDS=0]]></c></tag>
+ <item><p>
+ Suppresses the writing a crash dump file entirely,
+ thus rebooting the runtime system immediately.
+ This is the same as not setting the environment variable.
+ </p>
+ </item>
+ <tag><c><![CDATA[ERL_CRASH_DUMP_SECONDS=-1]]></c></tag>
+ <item><p> Setting the environment variable to a negative value will not reboot
+ the runtime system until the crash dump file has been completly written.
+ </p>
+ </item>
+ <tag><c><![CDATA[ERL_CRASH_DUMP_SECONDS=S]]></c></tag>
+ <item><p>
+ Heart will wait for <c>S</c> seconds to let the crash dump file be written.
+ After <c>S</c> seconds <c>heart</c> will reboot the runtime system regardless of
+ the crash dump file has been written or not.
+ </p>
+ </item>
+ </taglist>
+
<p>In the following descriptions, all function fails with reason
<c>badarg</c> if <c>heart</c> is not started.</p>
</description>
diff --git a/lib/kernel/doc/src/inet.xml b/lib/kernel/doc/src/inet.xml
index 32b4a429dd..f498a88c5e 100644
--- a/lib/kernel/doc/src/inet.xml
+++ b/lib/kernel/doc/src/inet.xml
@@ -520,6 +520,69 @@ fe80::204:acff:fe17:bf38
</p>
</item>
+ <tag><c>{ipv6_v6only, Boolean}</c></tag>
+ <item>
+ <p>
+ Restricts the socket to only use IPv6, prohibiting any
+ IPv4 connections. This is only applicable for
+ IPv6 sockets (option <c>inet6</c>).
+ </p>
+ <p>
+ On most platforms this option has to be set on the socket
+ before associating it to an address. Therefore it is only
+ reasonable to give it when creating the socket and not
+ to use it when calling the function
+ (<seealso marker="#setopts/2">setopts/2</seealso>)
+ containing this description.
+ </p>
+ <p>
+ The behaviour of a socket with this socket option set to
+ <c>true</c> is becoming the only portable one. The original
+ idea when IPv6 was new of using IPv6 for all traffic
+ is now not recommended by FreeBSD (you can use
+ <c>{ipv6_v6only,false}</c> to override the recommended
+ system default value),
+ forbidden by OpenBSD (the supported GENERIC kernel)
+ and impossible on Windows (that has separate
+ IPv4 and IPv6 protocol stacks). Most Linux distros
+ still have a system default value of <c>false</c>.
+ This policy shift among operating systems towards
+ separating IPv6 from IPv4 traffic has evolved since
+ it gradually proved hard and complicated to get
+ a dual stack implementation correct and secure.
+ </p>
+ <p>
+ On some platforms the only allowed value for this option
+ is <c>true</c>, e.g. OpenBSD and Windows. Trying to set
+ this option to <c>false</c> when creating the socket
+ will in this case fail.
+ </p>
+ <p>
+ Setting this option on platforms where it does not exist
+ is ignored and getting this option with
+ <seealso marker="#getopts/2">getopts/2</seealso>
+ returns no value i.e the returned list will not contain an
+ <c>{ipv6_v6only,_}</c> tuple. On Windows the option acually
+ does not exist, but it is emulated as being a
+ read-only option with the value <c>true</c>.
+ </p>
+ <p>
+ So it boils down to that setting this option to <c>true</c>
+ when creating a socket will never fail except possibly
+ (at the time of this writing) on a platform where you
+ have customized the kernel to only allow <c>false</c>,
+ which might be doable (but weird) on e.g. OpenBSD.
+ </p>
+ <p>
+ If you read back the option value using
+ <seealso marker="#getopts/2">getopts/2</seealso>
+ and get no value the option does not exist in the host OS
+ and all bets are off regarding the behaviour of both
+ an IPv6 and an IPv4 socket listening on the same port
+ as well as for an IPv6 socket getting IPv4 traffic.
+ </p>
+ </item>
+
<tag><c>{keepalive, Boolean}</c>(TCP/IP sockets)</tag>
<item>
<p>Enables/disables periodic transmission on a connected
diff --git a/lib/kernel/src/gen_sctp.erl b/lib/kernel/src/gen_sctp.erl
index 8fa963ec78..74ad192802 100644
--- a/lib/kernel/src/gen_sctp.erl
+++ b/lib/kernel/src/gen_sctp.erl
@@ -44,6 +44,7 @@
{priority, non_neg_integer()} |
{recbuf, non_neg_integer()} |
{reuseaddr, boolean()} |
+ {ipv6_v6only, boolean()} |
{sctp_adaptation_layer, #sctp_setadaptation{}} |
{sctp_associnfo, #sctp_assocparams{}} |
{sctp_autoclose, non_neg_integer()} |
@@ -72,6 +73,7 @@
priority |
recbuf |
reuseaddr |
+ ipv6_v6only |
sctp_adaptation_layer |
sctp_associnfo |
sctp_autoclose |
diff --git a/lib/kernel/src/gen_tcp.erl b/lib/kernel/src/gen_tcp.erl
index e6dfdadb03..22e6aa5bc8 100644
--- a/lib/kernel/src/gen_tcp.erl
+++ b/lib/kernel/src/gen_tcp.erl
@@ -57,7 +57,8 @@
{send_timeout, non_neg_integer() | infinity} |
{send_timeout_close, boolean()} |
{sndbuf, non_neg_integer()} |
- {tos, non_neg_integer()}.
+ {tos, non_neg_integer()} |
+ {ipv6_v6only, boolean()}.
-type option_name() ::
active |
buffer |
@@ -85,7 +86,8 @@
send_timeout |
send_timeout_close |
sndbuf |
- tos.
+ tos |
+ ipv6_v6only.
-type connect_option() ::
{ip, inet:ip_address()} |
{fd, Fd :: non_neg_integer()} |
diff --git a/lib/kernel/src/gen_udp.erl b/lib/kernel/src/gen_udp.erl
index 830ca61b3c..c5a1173575 100644
--- a/lib/kernel/src/gen_udp.erl
+++ b/lib/kernel/src/gen_udp.erl
@@ -47,7 +47,8 @@
{recbuf, non_neg_integer()} |
{reuseaddr, boolean()} |
{sndbuf, non_neg_integer()} |
- {tos, non_neg_integer()}.
+ {tos, non_neg_integer()} |
+ {ipv6_v6only, boolean()}.
-type option_name() ::
active |
broadcast |
@@ -69,7 +70,8 @@
recbuf |
reuseaddr |
sndbuf |
- tos.
+ tos |
+ ipv6_v6only.
-type socket() :: port().
-export_type([option/0, option_name/0]).
diff --git a/lib/kernel/src/heart.erl b/lib/kernel/src/heart.erl
index 28452a377e..de287bfa43 100644
--- a/lib/kernel/src/heart.erl
+++ b/lib/kernel/src/heart.erl
@@ -42,6 +42,7 @@
-define(CLEAR_CMD, 5).
-define(GET_CMD, 6).
-define(HEART_CMD, 7).
+-define(PREPARING_CRASH, 8). % Used in beam vm
-define(TIMEOUT, 5000).
-define(CYCLE_TIMEOUT, 10000).
@@ -130,6 +131,8 @@ start_portprogram() ->
Port when is_port(Port) ->
case wait_ack(Port) of
ok ->
+ %% register port so the vm can find it if need be
+ register(heart_port, Port),
{ok, Port};
{error, Reason} ->
report_problem({{port_problem, Reason},
diff --git a/lib/kernel/src/inet.erl b/lib/kernel/src/inet.erl
index b4ebb1500c..92c1802a86 100644
--- a/lib/kernel/src/inet.erl
+++ b/lib/kernel/src/inet.erl
@@ -531,7 +531,7 @@ getservbyname(Name, Protocol) when is_atom(Name) ->
options() ->
[
tos, priority, reuseaddr, keepalive, dontroute, linger,
- broadcast, sndbuf, recbuf, nodelay,
+ broadcast, sndbuf, recbuf, nodelay, ipv6_v6only,
buffer, header, active, packet, deliver, mode,
multicast_if, multicast_ttl, multicast_loop,
exit_on_close, high_watermark, low_watermark,
@@ -607,7 +607,7 @@ con_add(Name, Val, R, Opts, AllOpts) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
listen_options() ->
[tos, priority, reuseaddr, keepalive, linger, sndbuf, recbuf, nodelay,
- header, active, packet, buffer, mode, deliver, backlog,
+ header, active, packet, buffer, mode, deliver, backlog, ipv6_v6only,
exit_on_close, high_watermark, low_watermark, send_timeout,
send_timeout_close, delay_send, packet_size,raw].
@@ -664,7 +664,7 @@ list_add(Name, Val, R, Opts, As) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
udp_options() ->
[tos, priority, reuseaddr, sndbuf, recbuf, header, active, buffer, mode,
- deliver,
+ deliver, ipv6_v6only,
broadcast, dontroute, multicast_if, multicast_ttl, multicast_loop,
add_membership, drop_membership, read_packets,raw].
@@ -720,7 +720,7 @@ udp_add(Name, Val, R, Opts, As) ->
sctp_options() ->
[ % The following are generic inet options supported for SCTP sockets:
mode, active, buffer, tos, priority, dontroute, reuseaddr, linger, sndbuf,
- recbuf,
+ recbuf, ipv6_v6only,
% Other options are SCTP-specific (though they may be similar to their
% TCP and UDP counter-parts):
diff --git a/lib/kernel/src/inet_int.hrl b/lib/kernel/src/inet_int.hrl
index 467c4d5065..6d808b54cd 100644
--- a/lib/kernel/src/inet_int.hrl
+++ b/lib/kernel/src/inet_int.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -124,6 +124,7 @@
-define(UDP_OPT_MULTICAST_LOOP, 13).
-define(UDP_OPT_ADD_MEMBERSHIP, 14).
-define(UDP_OPT_DROP_MEMBERSHIP, 15).
+-define(INET_OPT_IPV6_V6ONLY, 16).
% "Local" options: codes start from 20:
-define(INET_LOPT_BUFFER, 20).
-define(INET_LOPT_HEADER, 21).
diff --git a/lib/kernel/test/code_SUITE.erl b/lib/kernel/test/code_SUITE.erl
index 5e0300639e..d7424c0c9a 100644
--- a/lib/kernel/test/code_SUITE.erl
+++ b/lib/kernel/test/code_SUITE.erl
@@ -684,8 +684,8 @@ ext_mod_dep(Config) when is_list(Config) ->
xref:set_default(s, [{verbose,false},{warnings,false},
{builtins,true},{recurse,true}]),
xref:set_library_path(s, code:get_path()),
- xref:add_directory(s, filename:dirname(code:which(kernel))),
- xref:add_directory(s, filename:dirname(code:which(lists))),
+ xref:add_directory(s, filename:join(code:lib_dir(kernel),"ebin")),
+ xref:add_directory(s, filename:join(code:lib_dir(stdlib),"ebin")),
case catch ext_mod_dep2() of
{'EXIT', Reason} ->
xref:stop(s),
diff --git a/lib/kernel/test/global_SUITE.erl b/lib/kernel/test/global_SUITE.erl
index 60035b50a0..6eb2134644 100644
--- a/lib/kernel/test/global_SUITE.erl
+++ b/lib/kernel/test/global_SUITE.erl
@@ -168,7 +168,7 @@ end_per_testcase(_Case, Config) ->
register_1(suite) -> [];
register_1(Config) when is_list(Config) ->
Timeout = 15,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
P = spawn_link(?MODULE, lock_global, [self(), Config]),
@@ -195,7 +195,6 @@ register_1(Config) when is_list(Config) ->
?line _ = global:unregister_name(foo),
write_high_level_trace(Config),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
lock_global(Parent, Config) ->
@@ -238,7 +237,7 @@ lock_global(Parent, Config) ->
both_known_1(suite) -> [];
both_known_1(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
@@ -316,7 +315,6 @@ both_known_1(Config) when is_list(Config) ->
stop_node(Cp3),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
lost_unregister(suite) -> [];
@@ -324,7 +322,7 @@ lost_unregister(doc) ->
["OTP-6428. An unregistered name reappears."];
lost_unregister(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
@@ -361,7 +359,6 @@ lost_unregister(Config) when is_list(Config) ->
stop_node(B),
stop_node(C),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
-define(UNTIL_LOOP, 300).
@@ -448,7 +445,7 @@ lock_global2(Id, Parent) ->
names(suite) -> [];
names(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -532,7 +529,6 @@ names(Config) when is_list(Config) ->
?line ?UNTIL(undefined =:= global:whereis_name(test)),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
names_hidden(suite) -> [];
@@ -541,7 +537,7 @@ names_hidden(doc) ->
"visible nodes."];
names_hidden(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -639,13 +635,12 @@ names_hidden(Config) when is_list(Config) ->
stop_node(Cp3),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
locks(suite) -> [];
locks(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line {ok, Cp1} = start_node(cp1, Config),
@@ -750,7 +745,6 @@ locks(Config) when is_list(Config) ->
?line test_server:sleep(10),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
@@ -760,7 +754,7 @@ locks_hidden(doc) ->
"visible nodes."];
locks_hidden(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNodes = nodes(),
@@ -833,14 +827,13 @@ locks_hidden(Config) when is_list(Config) ->
stop_node(Cp3),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
bad_input(suite) -> [];
bad_input(Config) when is_list(Config) ->
Timeout = 15,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
Pid = whereis(global_name_server),
@@ -854,13 +847,12 @@ bad_input(Config) when is_list(Config) ->
?line {'EXIT', _} = (catch global:trans({id, self()}, {m,f}, [node()], -1)),
?line Pid = whereis(global_name_server),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
names_and_locks(suite) -> [];
names_and_locks(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -922,7 +914,6 @@ names_and_locks(Config) when is_list(Config) ->
stop_node(Cp3),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
lock_die(suite) -> [];
@@ -930,7 +921,7 @@ lock_die(doc) ->
["OTP-6341. Remove locks using monitors."];
lock_die(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -964,7 +955,6 @@ lock_die(Config) when is_list(Config) ->
stop_node(Cp1),
stop_node(Cp2),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
name_die(suite) -> [];
@@ -972,7 +962,7 @@ name_die(doc) ->
["OTP-6341. Remove names using monitors."];
name_die(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -1027,7 +1017,6 @@ name_die(Config) when is_list(Config) ->
write_high_level_trace(Config),
stop_nodes(Cps),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
kill_pid(Pid, File, Config) ->
@@ -1040,7 +1029,7 @@ basic_partition(doc) ->
["Tests that two partitioned networks exchange correct info."];
basic_partition(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -1088,7 +1077,6 @@ basic_partition(Config) when is_list(Config) ->
stop_node(Cp2),
stop_node(Cp3),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
basic_name_partition(suite) ->
@@ -1099,7 +1087,7 @@ basic_name_partition(doc) ->
"during connect phase are handled correctly."];
basic_name_partition(Config) when is_list(Config) ->
Timeout = 60,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -1167,7 +1155,6 @@ basic_name_partition(Config) when is_list(Config) ->
stop_node(Cp2),
stop_node(Cp3),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
%Peer nodes cp0 - cp6 are started. Break apart the connections from
@@ -1190,7 +1177,7 @@ advanced_partition(doc) ->
"partitioned networks connect."];
advanced_partition(Config) when is_list(Config) ->
Timeout = 60,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -1278,7 +1265,6 @@ advanced_partition(Config) when is_list(Config) ->
stop_node(Cp5),
stop_node(Cp6),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
%Peer nodes cp0 - cp6 are started, and partitioned just like in
@@ -1297,7 +1283,7 @@ stress_partition(doc) ->
"go up/down a bit."];
stress_partition(Config) when is_list(Config) ->
Timeout = 90,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -1377,7 +1363,6 @@ stress_partition(Config) when is_list(Config) ->
stop_node(Cp7),
stop_node(Cp8),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
@@ -1408,7 +1393,7 @@ ring(doc) ->
"Make sure that there's just one winner."];
ring(Config) when is_list(Config) ->
Timeout = 60,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -1486,7 +1471,6 @@ ring(Config) when is_list(Config) ->
stop_node(Cp7),
stop_node(Cp8),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
simple_ring(suite) ->
@@ -1499,7 +1483,7 @@ simple_ring(doc) ->
"Make sure that there's just one winner."];
simple_ring(Config) when is_list(Config) ->
Timeout = 60,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -1565,7 +1549,6 @@ simple_ring(Config) when is_list(Config) ->
stop_node(Cp4),
stop_node(Cp5),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
line(suite) ->
@@ -1576,7 +1559,7 @@ line(doc) ->
"Make sure that there's just one winner."];
line(Config) when is_list(Config) ->
Timeout = 60,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -1655,7 +1638,6 @@ line(Config) when is_list(Config) ->
stop_node(Cp7),
stop_node(Cp8),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
@@ -1669,7 +1651,7 @@ simple_line(doc) ->
"Make sure that there's just one winner."];
simple_line(Config) when is_list(Config) ->
Timeout = 60,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -1735,7 +1717,6 @@ simple_line(Config) when is_list(Config) ->
stop_node(Cp4),
stop_node(Cp5),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
otp_1849(suite) -> [];
@@ -1743,7 +1724,7 @@ otp_1849(doc) ->
["Test ticket: Global should keep track of all pids that set the same lock."];
otp_1849(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line {ok, Cp1} = start_node(cp1, Config),
@@ -1822,7 +1803,6 @@ otp_1849(Config) when is_list(Config) ->
stop_node(Cp2),
stop_node(Cp3),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
@@ -1840,7 +1820,7 @@ otp_3162(Config) when is_list(Config) ->
do_otp_3162(StartFun, Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line [Cp1, Cp2, Cp3] = StartFun(),
@@ -1898,7 +1878,6 @@ do_otp_3162(StartFun, Config) ->
stop_node(Cp2),
stop_node(Cp3),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
@@ -1907,7 +1886,7 @@ otp_5640(doc) ->
["OTP-5640. 'allow' multiple names for registered processes."];
otp_5640(Config) when is_list(Config) ->
Timeout = 25,
- ?line Dog = test_server:timetrap(test_server:seconds(Timeout)),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
init_condition(Config),
?line {ok, B} = start_node(b, Config),
@@ -1965,7 +1944,6 @@ otp_5640(Config) when is_list(Config) ->
write_high_level_trace(Config),
stop_node(B),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
otp_5640_proc(_Parent) ->
@@ -1979,7 +1957,7 @@ otp_5737(doc) ->
["OTP-5737. set_lock/3 and trans/4 accept Retries = 0."];
otp_5737(Config) when is_list(Config) ->
Timeout = 25,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
@@ -2000,7 +1978,6 @@ otp_5737(Config) when is_list(Config) ->
write_high_level_trace(Config),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
otp_6931(suite) -> [];
@@ -2025,7 +2002,7 @@ simple_disconnect(suite) -> [];
simple_disconnect(doc) -> ["OTP-5563. Disconnected nodes (not partitions)"];
simple_disconnect(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -2075,7 +2052,6 @@ simple_disconnect(Config) when is_list(Config) ->
write_high_level_trace(Config),
stop_nodes(Cps),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
%% Not used right now.
@@ -2118,7 +2094,7 @@ simple_resolve(suite) -> [];
simple_resolve(doc) -> ["OTP-5563. Partitions and names."];
simple_resolve(Config) when is_list(Config) ->
Timeout = 360,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -2245,7 +2221,6 @@ simple_resolve(Config) when is_list(Config) ->
write_high_level_trace(Config),
stop_nodes(Cps),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
simple_resolve2(suite) -> [];
@@ -2255,7 +2230,7 @@ simple_resolve2(Config) when is_list(Config) ->
%% always work to re-start z_2. "Cannot be a global bug."
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -2283,7 +2258,6 @@ simple_resolve2(Config) when is_list(Config) ->
write_high_level_trace(Config),
stop_nodes(Cps), % Not all nodes may be present, but it works anyway.
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
simple_resolve3(suite) -> [];
@@ -2292,7 +2266,7 @@ simple_resolve3(Config) when is_list(Config) ->
%% Continuation of simple_resolve.
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -2320,7 +2294,6 @@ simple_resolve3(Config) when is_list(Config) ->
write_high_level_trace(Config),
stop_nodes(Cps), % Not all nodes may be present, but it works anyway.
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
res({Res,Resolver}, [N1, A2, Z2], Cf) ->
@@ -2504,7 +2477,7 @@ leftover_name(suite) -> [];
leftover_name(doc) -> ["OTP-5563. Bug: nodedown while synching."];
leftover_name(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -2565,7 +2538,6 @@ leftover_name(Config) when is_list(Config) ->
write_high_level_trace(Config),
stop_nodes(Cps),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
%% Runs on n_1
@@ -2604,7 +2576,7 @@ re_register_name(Config) when is_list(Config) ->
%% occupied by links, that's all.
%% Later: now monitors are checked.
Timeout = 15,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
Me = self(),
@@ -2618,7 +2590,6 @@ re_register_name(Config) when is_list(Config) ->
receive {Pid2, MonitoredBy2} -> [_] = MonitoredBy2 end,
?line _ = global:unregister_name(name),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
proc(Parent) ->
@@ -2652,7 +2623,7 @@ do_name_exit(StartFun, Version, Config) ->
%% The current release uses monitors so this test is not so relevant.
Timeout = 60,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -2692,7 +2663,6 @@ do_name_exit(StartFun, Version, Config) ->
write_high_level_trace(Config),
stop_nodes(Cps),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
long_lock(Parent) ->
@@ -2709,7 +2679,7 @@ external_nodes(suite) -> [];
external_nodes(doc) -> ["OTP-5563. External nodes (cnodes)."];
external_nodes(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -2793,7 +2763,6 @@ external_nodes(Config) when is_list(Config) ->
?line ?UNTIL(length(get_ext_names()) =:= 0),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
get_ext_names() ->
@@ -2845,8 +2814,8 @@ many_nodes(suite) ->
many_nodes(doc) ->
["OTP-5770. Start many nodes. Make them connect at the same time."];
many_nodes(Config) when is_list(Config) ->
- Timeout = 180,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ Timeout = 240,
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -2902,7 +2871,6 @@ many_nodes(Config) when is_list(Config) ->
write_high_level_trace(Config),
?line stop_nodes(Cps),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
Diff = Time2 - Time,
Return = lists:flatten(io_lib:format("~w nodes took ~w ms",
[N_cps, Diff])),
@@ -2988,7 +2956,7 @@ sync_0(doc) ->
["OTP-5770. sync/0."];
sync_0(Config) when is_list(Config) ->
Timeout = 180,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
@@ -3013,7 +2981,6 @@ sync_0(Config) when is_list(Config) ->
stop_nodes(Cps),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
start_and_sync([]) ->
@@ -3031,7 +2998,7 @@ global_groups_change(suite) -> [];
global_groups_change(doc) -> ["Test change of global_groups parameter."];
global_groups_change(Config) ->
Timeout = 90,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line M = from($@, atom_to_list(node())),
@@ -3376,7 +3343,6 @@ global_groups_change(Config) ->
stop_node(CpE),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
sync_and_wait(Node) ->
@@ -3919,7 +3885,7 @@ global_lost_nodes(doc) ->
["Tests that locally loaded nodes do not loose contact with other nodes."];
global_lost_nodes(Config) when is_list(Config) ->
Timeout = 60,
- Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
@@ -3943,7 +3909,6 @@ global_lost_nodes(Config) when is_list(Config) ->
?line stop_node(Node1),
?line stop_node(Node2),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
global_load(MyName, OtherNode, OtherName) ->
@@ -3994,7 +3959,7 @@ mass_death(doc) ->
["Tests the simultaneous death of many processes with registered names"];
mass_death(Config) when is_list(Config) ->
Timeout = 90,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -4023,9 +3988,9 @@ mass_death(Config) when is_list(Config) ->
{H,M,S} = time(),
io:format("Started probing: ~.4.0w-~.2.0w-~.2.0w ~.2.0w:~.2.0w:~.2.0w~n",
[YYYY,MM,DD,H,M,S]),
- wait_mass_death(Dog, Nodes, OrigNames, erlang:now(), Config).
+ wait_mass_death(Nodes, OrigNames, erlang:now(), Config).
-wait_mass_death(Dog, Nodes, OrigNames, Then, Config) ->
+wait_mass_death(Nodes, OrigNames, Then, Config) ->
?line Names = global:registered_names(),
?line
case Names--OrigNames of
@@ -4036,12 +4001,11 @@ wait_mass_death(Dog, Nodes, OrigNames, Then, Config) ->
stop_node(Node)
end, Nodes),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
{comment,lists:flatten(io_lib:format("~.3f s~n", [T/1000.0]))};
Ndiff ->
?line io:format("Ndiff: ~p~n", [Ndiff]),
?line test_server:sleep(1000),
- ?line wait_mass_death(Dog, Nodes, OrigNames, Then, Config)
+ ?line wait_mass_death(Nodes, OrigNames, Then, Config)
end.
mass_spawn([]) ->
@@ -4213,7 +4177,7 @@ garbage_messages(suite) ->
[];
garbage_messages(Config) when is_list(Config) ->
Timeout = 25,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line [Slave] = start_nodes([garbage_messages], slave, Config),
@@ -4233,7 +4197,6 @@ garbage_messages(Config) when is_list(Config) ->
write_high_level_trace(Config),
?line stop_node(Slave),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
wait_for_ready_net(Config) ->
diff --git a/lib/kernel/test/heart_SUITE.erl b/lib/kernel/test/heart_SUITE.erl
index 31005a01e2..970a03cfd5 100644
--- a/lib/kernel/test/heart_SUITE.erl
+++ b/lib/kernel/test/heart_SUITE.erl
@@ -22,7 +22,10 @@
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2, start/1, restart/1,
- reboot/1, set_cmd/1, clear_cmd/1, get_cmd/1,
+ reboot/1,
+ node_start_immediately_after_crash/1,
+ node_start_soon_after_crash/1,
+ set_cmd/1, clear_cmd/1, get_cmd/1,
dont_drop/1, kill_pid/1]).
-export([init_per_testcase/2, end_per_testcase/2]).
@@ -38,15 +41,15 @@ init_per_testcase(_Func, Config) ->
end_per_testcase(_Func, Config) ->
Nodes = nodes(),
lists:foreach(fun(X) ->
- NNam = list_to_atom(hd(string:tokens(atom_to_list(X),"@"))),
- case NNam of
- heart_test ->
- ?t:format(1, "WARNING: Killed ~p~n", [X]),
- rpc:cast(X, erlang, halt, []);
- _ ->
- ok
- end
- end, Nodes),
+ NNam = list_to_atom(hd(string:tokens(atom_to_list(X),"@"))),
+ case NNam of
+ heart_test ->
+ ?t:format(1, "WARNING: Killed ~p~n", [X]),
+ rpc:cast(X, erlang, halt, []);
+ _ ->
+ ok
+ end
+ end, Nodes),
Dog=?config(watchdog, Config),
test_server:timetrap_cancel(Dog).
@@ -57,8 +60,13 @@ end_per_testcase(_Func, Config) ->
%%-----------------------------------------------------------------
suite() -> [{ct_hooks,[ts_install_cth]}].
-all() ->
- [start, restart, reboot, set_cmd, clear_cmd, get_cmd, kill_pid].
+all() -> [
+ start, restart, reboot,
+ node_start_immediately_after_crash,
+ node_start_soon_after_crash,
+ set_cmd, clear_cmd, get_cmd,
+ kill_pid
+ ].
groups() ->
[].
@@ -80,17 +88,22 @@ init_per_suite(Config) when is_list(Config) ->
end_per_suite(Config) when is_list(Config) ->
Config.
+
start_check(Type, Name) ->
+ start_check(Type, Name, []).
+start_check(Type, Name, Envs) ->
Args = case ?t:os_type() of
- {win32,_} -> "-heart -env HEART_COMMAND no_reboot";
- _ -> "-heart"
- end,
+ {win32,_} ->
+ "-heart " ++ env_encode([{"HEART_COMMAND", no_reboot}|Envs]);
+ _ ->
+ "-heart " ++ env_encode(Envs)
+ end,
{ok, Node} = case Type of
- loose ->
- loose_node:start(Name, Args, ?DEFAULT_TIMEOUT_SECS);
- _ ->
- ?t:start_node(Name, Type, [{args, Args}])
- end,
+ loose ->
+ loose_node:start(Name, Args, ?DEFAULT_TIMEOUT_SECS);
+ _ ->
+ ?t:start_node(Name, Type, [{args, Args}])
+ end,
erlang:monitor_node(Node, true),
case rpc:call(Node, erlang, whereis, [heart]) of
Pid when is_pid(Pid) ->
@@ -103,21 +116,19 @@ start_check(Type, Name) ->
start(doc) -> [];
start(suite) -> {req, [{time, 10}]};
start(Config) when is_list(Config) ->
- ?line {ok, Node} = start_check(slave, heart_test),
- ?line rpc:call(Node, init, reboot, []),
+ {ok, Node} = start_check(slave, heart_test),
+ rpc:call(Node, init, reboot, []),
receive
- {nodedown, Node} ->
- ok
- after 2000 ->
- test_server:fail(node_not_closed)
+ {nodedown, Node} -> ok
+ after 2000 -> test_server:fail(node_not_closed)
end,
test_server:sleep(5000),
- ?line case net_adm:ping(Node) of
- pang ->
- ok;
- _ ->
- test_server:fail(node_rebooted)
- end,
+ case net_adm:ping(Node) of
+ pang ->
+ ok;
+ _ ->
+ test_server:fail(node_rebooted)
+ end,
test_server:stop_node(Node).
%% Also test fixed bug in R1B (it was not possible to
@@ -125,6 +136,10 @@ start(Config) when is_list(Config) ->
%% Slave executes erlang:halt() on master nodedown.
%% Therefore the slave process has to be killed
%% before restart.
+
+%% restart
+%% Purpose:
+%% Check that a node is up and running after a init:restart/0
restart(doc) -> [];
restart(suite) ->
case ?t:os_type() of
@@ -134,8 +149,8 @@ restart(suite) ->
{skip, "Only run on unix and win32"}
end;
restart(Config) when is_list(Config) ->
- ?line {ok, Node} = start_check(loose, heart_test),
- ?line rpc:call(Node, init, restart, []),
+ {ok, Node} = start_check(loose, heart_test),
+ rpc:call(Node, init, restart, []),
receive
{nodedown, Node} ->
ok
@@ -143,32 +158,21 @@ restart(Config) when is_list(Config) ->
test_server:fail(node_not_closed)
end,
test_server:sleep(5000),
-
- ?line case net_adm:ping(Node) of
- pong ->
- erlang:monitor_node(Node, true),
- ?line rpc:call(Node, init, stop, []),
- receive
- {nodedown, Node} ->
- ok
- after 2000 ->
- test_server:fail(node_not_closed2)
- end,
- ok;
- _ ->
- test_server:fail(node_not_restarted)
- end,
+ node_check_up_down(Node, 2000),
loose_node:stop(Node).
+%% reboot
+%% Purpose:
+%% Check that a node is up and running after a init:reboot/0
reboot(doc) -> [];
reboot(suite) -> {req, [{time, 10}]};
reboot(Config) when is_list(Config) ->
{ok, Node} = start_check(slave, heart_test),
- ?line ok = rpc:call(Node, heart, set_cmd,
+ ok = rpc:call(Node, heart, set_cmd,
[atom_to_list(lib:progname()) ++
" -noshell -heart " ++ name(Node) ++ "&"]),
- ?line rpc:call(Node, init, reboot, []),
+ rpc:call(Node, init, reboot, []),
receive
{nodedown, Node} ->
ok
@@ -176,44 +180,119 @@ reboot(Config) when is_list(Config) ->
test_server:fail(node_not_closed)
end,
test_server:sleep(5000),
- ?line case net_adm:ping(Node) of
- pong ->
- erlang:monitor_node(Node, true),
- ?line rpc:call(Node, init, reboot, []),
- receive
- {nodedown, Node} ->
- ok
- after 2000 ->
- test_server:fail(node_not_closed2)
- end,
- ok;
- _ ->
- test_server:fail(node_not_rebooted)
- end,
+ node_check_up_down(Node, 2000),
ok.
+%% node_start_immediately_after_crash
+%% Purpose:
+%% Check that a node is up and running after a crash.
+%% This test exhausts the atom table on the remote node.
+%% ERL_CRASH_DUMP_SECONDS=0 will force beam not to dump an erl_crash.dump.
+node_start_immediately_after_crash(suite) -> {req, [{time, 10}]};
+node_start_immediately_after_crash(Config) when is_list(Config) ->
+ {ok, Node} = start_check(loose, heart_test_imm, [{"ERL_CRASH_DUMP_SECONDS", "0"}]),
+
+ ok = rpc:call(Node, heart, set_cmd,
+ [atom_to_list(lib:progname()) ++
+ " -noshell -heart " ++ name(Node) ++ "&"]),
+
+ Mod = exhaust_atoms,
+
+ Code = generate(Mod, [], [
+ "do() -> "
+ " Set = lists:seq($a,$z), "
+ " [ list_to_atom([A,B,C,D,E]) || "
+ " A <- Set, B <- Set, C <- Set, E <- Set, D <- Set ]."
+ ]),
+
+ %% crash it with atom exhaustion
+ rpc:call(Node, erlang, load_module, [Mod, Code]),
+ rpc:cast(Node, Mod, do, []),
+
+ T0 = now(),
+
+ receive {nodedown, Node} ->
+ test_server:format("Took ~.2f s. for node to go down~n", [timer:now_diff(now(), T0)/1000000]),
+ ok
+ %% timeout is very liberal here. nodedown is received in about 1 s. on linux (palantir)
+ %% and in about 10 s. on solaris (carcharoth)
+ after (15000*test_server:timetrap_scale_factor()) -> test_server:fail(node_not_closed)
+ end,
+ test_server:sleep(3000),
+ node_check_up_down(Node, 2000),
+ loose_node:stop(Node).
+
+%% node_start_soon_after_crash
+%% Purpose:
+%% Check that a node is up and running after a crash.
+%% This test exhausts the atom table on the remote node.
+%% ERL_CRASH_DUMP_SECONDS=10 will force beam
+%% to only dump an erl_crash.dump for 10 seconds.
+node_start_soon_after_crash(suite) -> {req, [{time, 10}]};
+node_start_soon_after_crash(Config) when is_list(Config) ->
+ {ok, Node} = start_check(loose, heart_test_soon, [{"ERL_CRASH_DUMP_SECONDS", "10"}]),
+
+ ok = rpc:call(Node, heart, set_cmd,
+ [atom_to_list(lib:progname()) ++
+ " -noshell -heart " ++ name(Node) ++ "&"]),
+
+ Mod = exhaust_atoms,
+
+ Code = generate(Mod, [], [
+ "do() -> "
+ " Set = lists:seq($a,$z), "
+ " [ list_to_atom([A,B,C,D,E]) || "
+ " A <- Set, B <- Set, C <- Set, E <- Set, D <- Set ]."
+ ]),
+
+ %% crash it with atom exhaustion
+ rpc:call(Node, erlang, load_module, [Mod, Code]),
+ rpc:cast(Node, Mod, do, []),
+
+ receive {nodedown, Node} -> ok
+ after (15000*test_server:timetrap_scale_factor()) -> test_server:fail(node_not_closed)
+ end,
+ test_server:sleep(20000),
+ node_check_up_down(Node, 15000),
+ loose_node:stop(Node).
+
+
+node_check_up_down(Node, Tmo) ->
+ case net_adm:ping(Node) of
+ pong ->
+ erlang:monitor_node(Node, true),
+ rpc:call(Node, init, reboot, []),
+ receive
+ {nodedown, Node} -> ok
+ after Tmo ->
+ test_server:fail(node_not_closed2)
+ end;
+ _ ->
+ test_server:fail(node_not_rebooted)
+ end.
+
%% Only tests bad command, correct behaviour is tested in reboot/1.
set_cmd(suite) -> [];
set_cmd(Config) when is_list(Config) ->
- ?line {ok, Node} = start_check(slave, heart_test),
+ {ok, Node} = start_check(slave, heart_test),
Cmd = wrong_atom,
- ?line {error, {bad_cmd, Cmd}} = rpc:call(Node, heart, set_cmd, [Cmd]),
+ {error, {bad_cmd, Cmd}} = rpc:call(Node, heart, set_cmd, [Cmd]),
Cmd1 = lists:duplicate(2047, $a),
- ?line {error, {bad_cmd, Cmd1}} = rpc:call(Node, heart, set_cmd, [Cmd1]),
+ {error, {bad_cmd, Cmd1}} = rpc:call(Node, heart, set_cmd, [Cmd1]),
Cmd2 = lists:duplicate(28, $a),
- ?line ok = rpc:call(Node, heart, set_cmd, [Cmd2]),
+ ok = rpc:call(Node, heart, set_cmd, [Cmd2]),
Cmd3 = lists:duplicate(2000, $a),
- ?line ok = rpc:call(Node, heart, set_cmd, [Cmd3]),
+ ok = rpc:call(Node, heart, set_cmd, [Cmd3]),
stop_node(Node),
ok.
clear_cmd(suite) -> {req,[{time,15}]};
clear_cmd(Config) when is_list(Config) ->
- ?line {ok, Node} = start_check(slave, heart_test),
- ?line ok = rpc:call(Node, heart, set_cmd,
+ {ok, Node} = start_check(slave, heart_test),
+ ok = rpc:call(Node, heart, set_cmd,
[atom_to_list(lib:progname()) ++
" -noshell -heart " ++ name(Node) ++ "&"]),
- ?line rpc:call(Node, init, reboot, []),
+ rpc:call(Node, init, reboot, []),
receive
{nodedown, Node} ->
ok
@@ -221,16 +300,16 @@ clear_cmd(Config) when is_list(Config) ->
test_server:fail(node_not_closed)
end,
test_server:sleep(5000),
- ?line case net_adm:ping(Node) of
- pong ->
- erlang:monitor_node(Node, true);
- _ ->
- test_server:fail(node_not_rebooted)
- end,
- ?line ok = rpc:call(Node, heart, set_cmd,
+ case net_adm:ping(Node) of
+ pong ->
+ erlang:monitor_node(Node, true);
+ _ ->
+ test_server:fail(node_not_rebooted)
+ end,
+ ok = rpc:call(Node, heart, set_cmd,
["erl -noshell -heart " ++ name(Node) ++ "&"]),
- ?line ok = rpc:call(Node, heart, clear_cmd, []),
- ?line rpc:call(Node, init, reboot, []),
+ ok = rpc:call(Node, heart, clear_cmd, []),
+ rpc:call(Node, init, reboot, []),
receive
{nodedown, Node} ->
ok
@@ -238,20 +317,20 @@ clear_cmd(Config) when is_list(Config) ->
test_server:fail(node_not_closed)
end,
test_server:sleep(5000),
- ?line case net_adm:ping(Node) of
- pang ->
- ok;
- _ ->
- test_server:fail(node_rebooted)
- end,
+ case net_adm:ping(Node) of
+ pang ->
+ ok;
+ _ ->
+ test_server:fail(node_rebooted)
+ end,
ok.
get_cmd(suite) -> [];
get_cmd(Config) when is_list(Config) ->
- ?line {ok, Node} = start_check(slave, heart_test),
+ {ok, Node} = start_check(slave, heart_test),
Cmd = "test",
- ?line ok = rpc:call(Node, heart, set_cmd, [Cmd]),
- ?line {ok, Cmd} = rpc:call(Node, heart, get_cmd, []),
+ ok = rpc:call(Node, heart, set_cmd, [Cmd]),
+ {ok, Cmd} = rpc:call(Node, heart, get_cmd, []),
stop_node(Node),
ok.
@@ -269,57 +348,53 @@ dont_drop(Config) when is_list(Config) ->
[ok,ok,ok,ok,ok,ok,ok,ok,ok,ok] = do_dont_drop(Config,10),
ok.
-do_dont_drop(_,0) ->
- [];
+do_dont_drop(_,0) -> [];
do_dont_drop(Config,N) ->
%% Name of first slave node
- ?line NN1 = atom_to_list(?MODULE) ++ "slave_1",
+ NN1 = atom_to_list(?MODULE) ++ "slave_1",
%% Name of node started by heart on failure
- ?line NN2 = atom_to_list(?MODULE) ++ "slave_2",
+ NN2 = atom_to_list(?MODULE) ++ "slave_2",
%% Name of node started by heart on success
- ?line NN3 = atom_to_list(?MODULE) ++ "slave_3",
- ?line Host = hd(tl(string:tokens(atom_to_list(node()),"@"))),
+ NN3 = atom_to_list(?MODULE) ++ "slave_3",
+ Host = hd(tl(string:tokens(atom_to_list(node()),"@"))),
%% The initial heart command
- ?line FirstCmd = erl() ++ name(NN2 ++ "@" ++ Host),
+ FirstCmd = erl() ++ name(NN2 ++ "@" ++ Host),
%% Separated the parameters to start_node_run for clarity...
- ?line Name = list_to_atom(NN1),
- ?line Env = [{"HEART_COMMAND", FirstCmd}],
- ?line Func = "start_heart_stress",
- ?line Arg = NN3 ++ "@" ++ Host ++ " " ++
+ Name = list_to_atom(NN1),
+ Env = [{"HEART_COMMAND", FirstCmd}],
+ Func = "start_heart_stress",
+ Arg = NN3 ++ "@" ++ Host ++ " " ++
filename:join(?config(data_dir, Config), "simple_echo"),
- ?line start_node_run(Name,Env,Func,Arg),
- ?line case wait_for_any_of(list_to_atom(NN2 ++ "@" ++ Host),
- list_to_atom(NN3 ++ "@" ++ Host)) of
- 2 ->
- ?line [ok | do_dont_drop(Config,N-1)];
- _ ->
- ?line false
- end.
+ start_node_run(Name,Env,Func,Arg),
+ case wait_for_any_of(list_to_atom(NN2 ++ "@" ++ Host),
+ list_to_atom(NN3 ++ "@" ++ Host)) of
+ 2 ->
+ [ok | do_dont_drop(Config,N-1)];
+ _ ->
+ false
+ end.
wait_for_any_of(N1,N2) ->
- ?line wait_for_any_of(N1,N2,45).
+ wait_for_any_of(N1,N2,45).
wait_for_any_of(_N1,_N2,0) ->
- ?line false;
+ false;
wait_for_any_of(N1,N2,Times) ->
- ?line receive
- after 1000 ->
- ?line ok
- end,
- ?line case net_adm:ping(N1) of
- pang ->
- ?line case net_adm:ping(N2) of
- pang ->
- ?line wait_for_any_of(N1,N2,Times - 1);
- pong ->
- ?line rpc:call(N2,init,stop,[]),
- ?line 2
- end;
- pong ->
- ?line rpc:call(N1,init,stop,[]),
- ?line 1
- end.
+ receive after 1000 -> ok end,
+ case net_adm:ping(N1) of
+ pang ->
+ case net_adm:ping(N2) of
+ pang ->
+ wait_for_any_of(N1,N2,Times - 1);
+ pong ->
+ rpc:call(N2,init,stop,[]),
+ 2
+ end;
+ pong ->
+ rpc:call(N1,init,stop,[]),
+ 1
+ end.
kill_pid(suite) ->
@@ -336,9 +411,7 @@ do_kill_pid(_Config) ->
{ok,Node} = start_node_run(Name,Env,suicide_by_heart,[]),
ok = wait_for_node(Node,15),
erlang:monitor_node(Node, true),
- receive
- {nodedown,Node} ->
- ok
+ receive {nodedown,Node} -> ok
after 30000 ->
false
end.
@@ -346,23 +419,16 @@ do_kill_pid(_Config) ->
wait_for_node(_,0) ->
false;
wait_for_node(Node,N) ->
- receive
- after 1000 ->
- ok
- end,
+ receive after 1000 -> ok end,
case net_adm:ping(Node) of
- pong ->
- ok;
- pang ->
- wait_for_node(Node,N-1)
+ pong -> ok;
+ pang -> wait_for_node(Node,N-1)
end.
erl() ->
case os:type() of
- {win32,_} ->
- "werl ";
- _ ->
- "erl "
+ {win32,_} -> "werl ";
+ _ -> "erl "
end.
name(Node) when is_list(Node) -> name(Node,[]);
@@ -379,15 +445,13 @@ name([H|T], Name) ->
name(T, [H|Name]).
-atom_conv(A) when is_atom(A) ->
- atom_to_list(A);
-atom_conv(A) when is_list(A) ->
- A.
+enc(A) when is_atom(A) -> atom_to_list(A);
+enc(A) when is_binary(A) -> binary_to_list(A);
+enc(A) when is_list(A) -> A.
-env_conv([]) ->
- [];
-env_conv([{X,Y}|T]) ->
- atom_conv(X) ++ " \"" ++ atom_conv(Y) ++ "\" " ++ env_conv(T).
+env_encode([]) -> [];
+env_encode([{X,Y}|T]) ->
+ "-env " ++ enc(X) ++ " \"" ++ enc(Y) ++ "\" " ++ env_encode(T).
%%%
%%% Starts a node and runs a function in this
@@ -398,12 +462,12 @@ env_conv([{X,Y}|T]) ->
%%% Argument is the argument(s) to send through erl -s
%%%
start_node_run(Name, Env, Function, Argument) ->
- ?line PA = filename:dirname(code:which(?MODULE)),
- ?line Params = "-heart -env " ++ env_conv(Env) ++ " -pa " ++ PA ++
- " -s " ++
- atom_conv(?MODULE) ++ " " ++ atom_conv(Function) ++ " " ++
- atom_conv(Argument),
- ?line start_node(Name, Params).
+ PA = filename:dirname(code:which(?MODULE)),
+ Params = "-heart " ++ env_encode(Env) ++ " -pa " ++ PA ++
+ " -s " ++
+ enc(?MODULE) ++ " " ++ enc(Function) ++ " " ++
+ enc(Argument),
+ start_node(Name, Params).
start_node(Name, Param) ->
test_server:start_node(Name, slave, [{args, Param}]).
@@ -469,3 +533,24 @@ suicide_by_heart() ->
{makaronipudding} ->
sallad
end.
+
+
+%% generate a module from binary
+generate(Module, Attributes, FunStrings) ->
+ FunForms = function_forms(FunStrings),
+ Forms = [
+ {attribute,1,module,Module},
+ {attribute,2,export,[FA || {FA,_} <- FunForms]}
+ ] ++ [{attribute, 3, A, V}|| {A, V} <- Attributes] ++
+ [ Function || {_, Function} <- FunForms],
+ {ok, Module, Bin} = compile:forms(Forms),
+ Bin.
+
+
+function_forms([]) -> [];
+function_forms([S|Ss]) ->
+ {ok, Ts,_} = erl_scan:string(S),
+ {ok, Form} = erl_parse:parse_form(Ts),
+ Fun = element(3, Form),
+ Arity = element(4, Form),
+ [{{Fun,Arity}, Form}|function_forms(Ss)].
diff --git a/lib/kernel/test/inet_sockopt_SUITE.erl b/lib/kernel/test/inet_sockopt_SUITE.erl
index 087ae6055b..75496ce745 100644
--- a/lib/kernel/test/inet_sockopt_SUITE.erl
+++ b/lib/kernel/test/inet_sockopt_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2007-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2007-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -53,6 +53,8 @@
simple/1, loop_all/1, simple_raw/1, simple_raw_getbin/1,
doc_examples_raw/1,doc_examples_raw_getbin/1,
large_raw/1,large_raw_getbin/1,combined/1,combined_getbin/1,
+ ipv6_v6only_udp/1, ipv6_v6only_tcp/1, ipv6_v6only_sctp/1,
+ use_ipv6_v6only_udp/1,
type_errors/1]).
-export([init_per_testcase/2, end_per_testcase/2]).
@@ -64,6 +66,8 @@ all() ->
[simple, loop_all, simple_raw, simple_raw_getbin,
doc_examples_raw, doc_examples_raw_getbin, large_raw,
large_raw_getbin, combined, combined_getbin,
+ ipv6_v6only_udp, ipv6_v6only_tcp, ipv6_v6only_sctp,
+ use_ipv6_v6only_udp,
type_errors].
groups() ->
@@ -127,7 +131,7 @@ loop_all(Config) when is_list(Config) ->
io_lib:format("Non mandatory failed:~w",
[Failed]))}
end.
-
+
simple_raw(suite) -> [];
@@ -461,6 +465,153 @@ do_combined(Config,Binary) when is_list(Config) ->
ok
end.
+
+
+ipv6_v6only_udp(suite) -> [];
+ipv6_v6only_udp(doc) -> "Test socket option ipv6_v6only for UDP";
+ipv6_v6only_udp(Config) when is_list(Config) ->
+ ipv6_v6only(Config, gen_udp).
+
+ipv6_v6only_tcp(suite) -> [];
+ipv6_v6only_tcp(doc) -> "Test socket option ipv6_v6only for TCP";
+ipv6_v6only_tcp(Config) when is_list(Config) ->
+ ipv6_v6only(Config, gen_tcp).
+
+ipv6_v6only_sctp(suite) -> [];
+ipv6_v6only_sctp(doc) -> "Test socket option ipv6_v6only for SCTP";
+ipv6_v6only_sctp(Config) when is_list(Config) ->
+ ipv6_v6only(Config, gen_sctp).
+
+ipv6_v6only(Config, Module) when is_list(Config) ->
+ ?line case ipv6_v6only_open(Module, []) of
+ {ok,S1} ->
+ ?line case inet:getopts(S1, [ipv6_v6only]) of
+ {ok,[{ipv6_v6only,Default}]}
+ when is_boolean(Default) ->
+ ?line ok =
+ ipv6_v6only_close(Module, S1),
+ ?line ipv6_v6only(Config, Module, Default);
+ {ok,[]} ->
+ ?line io:format("Not implemented.~n", []),
+ %% This list of OS:es where the option is
+ %% supposed to be not implemented is just
+ %% a guess, and may grow with time.
+ ?line case {os:type(),os:version()} of
+ {{unix,linux},{2,M,_}}
+ when M =< 4 -> ok
+ end,
+ %% At least this should work
+ ?line {ok,S2} =
+ ipv6_v6only_open(
+ Module,
+ [{ipv6_v6only,true}]),
+ ?line ok =
+ ipv6_v6only_close(Module, S2)
+ end;
+ {error,_} ->
+ {skipped,"Socket type not supported"}
+ end.
+
+ipv6_v6only(Config, Module, Default) when is_list(Config) ->
+ ?line io:format("Default ~w.~n", [Default]),
+ ?line {ok,S1} =
+ ipv6_v6only_open(Module, [{ipv6_v6only,Default}]),
+ ?line {ok,[{ipv6_v6only,Default}]} =
+ inet:getopts(S1, [ipv6_v6only]),
+ ?line ok =
+ ipv6_v6only_close(Module, S1),
+ ?line NotDefault = not Default,
+ ?line case ipv6_v6only_open(Module, [{ipv6_v6only,NotDefault}]) of
+ {ok,S2} ->
+ ?line io:format("Read-write.~n", []),
+ ?line {ok,[{ipv6_v6only,NotDefault}]} =
+ inet:getopts(S2, [ipv6_v6only]),
+ ok;
+ {error,einval} ->
+ ?line io:format("Read-only.~n", []),
+ %% This option is known to be read-only and true
+ %% on Windows and OpenBSD
+ ?line case os:type() of
+ {unix,openbsd} when Default =:= true -> ok;
+ {win32,_} when Default =:= true -> ok
+ end
+ end.
+
+ipv6_v6only_open(Module, Opts) ->
+ Module:case Module of
+ gen_tcp -> listen;
+ _ -> open
+ end(0, [inet6|Opts]).
+
+ipv6_v6only_close(Module, Socket) ->
+ Module:close(Socket).
+
+
+use_ipv6_v6only_udp(suite) -> [];
+use_ipv6_v6only_udp(doc) -> "Test using socket option ipv6_v6only for UDP";
+use_ipv6_v6only_udp(Config) when is_list(Config) ->
+ ?line case gen_udp:open(0, [inet6,{ipv6_v6only,true}]) of
+ {ok,S6} ->
+ ?line case inet:getopts(S6, [ipv6_v6only]) of
+ {ok,[{ipv6_v6only,true}]} ->
+ use_ipv6_v6only_udp(Config, S6);
+ {ok,Other} ->
+ {skipped,{getopts,Other}}
+ end;
+ {error,_} ->
+ {skipped,"Socket type not supported"}
+ end.
+
+use_ipv6_v6only_udp(_Config, S6) ->
+ ?line {ok,Port} = inet:port(S6),
+ ?line {ok,S4} = gen_udp:open(Port, [inet]),
+ ?line E6 = " IPv6-echo.",
+ ?line E4 = " IPv4-echo.",
+ ?line Sender =
+ spawn_link(fun () -> use_ipv6_v6only_udp_sender(Port, E6, E4) end),
+ ?line use_ipv6_v6only_udp_listener(
+ S6, S4, E6, E4, monitor(process, Sender)).
+
+use_ipv6_v6only_udp_listener(S6, S4, E6, E4, Mref) ->
+ ?line receive
+ {udp,S6,IP,P,Data} ->
+ ?line ok = gen_udp:send(S6, IP, P, [Data|E6]),
+ ?line use_ipv6_v6only_udp_listener(S6, S4, E6, E4, Mref);
+ {udp,S4,IP,P,Data} ->
+ ?line ok = gen_udp:send(S4, IP, P, [Data|E4]),
+ ?line use_ipv6_v6only_udp_listener(S6, S4, E6, E4, Mref);
+ {'DOWN',Mref,_,_,normal} ->
+ ok;
+ {'DOWN',Mref,_,_,Result} ->
+ %% Since we are linked we will never arrive here
+ Result;
+ Other ->
+ ?line exit({failed,{listener_unexpected,Other}})
+ end.
+
+use_ipv6_v6only_udp_sender(Port, E6, E4) ->
+ D6 = "IPv6-send.",
+ D4 = "IPv4-send.",
+ R6 = D6 ++ E6,
+ R4 = D4 ++ E4,
+ R6 = sndrcv({0,0,0,0,0,0,0,1}, Port, [inet6], D6),
+ R4 = sndrcv({127,0,0,1}, Port, [inet], D4),
+ ok.
+
+sndrcv(Ip, Port, Opts, Data) ->
+ {ok,S} = gen_udp:open(0, Opts),
+ io:format("[~w:~w] ! ~s~n", [Ip,Port,Data]),
+ ok = gen_udp:send(S, Ip, Port, Data),
+ receive
+ {udp,S,Ip,Port,RecData} ->
+ io:format("[~w:~w] : ~s~n", [Ip,Port,RecData]),
+ RecData;
+ Other ->
+ exit({failed,{sndrcv_unexpectec,Other}})
+ end.
+
+
+
type_errors(suite) ->
[];
type_errors(doc) ->
diff --git a/lib/kernel/test/interactive_shell_SUITE.erl b/lib/kernel/test/interactive_shell_SUITE.erl
index 4787f19250..36e13cec26 100644
--- a/lib/kernel/test/interactive_shell_SUITE.erl
+++ b/lib/kernel/test/interactive_shell_SUITE.erl
@@ -29,20 +29,11 @@
-export([toerl_server/3]).
init_per_testcase(_Func, Config) ->
- Dog = test_server:timetrap(test_server:seconds(60)),
- Term = case os:getenv("TERM") of
- List when is_list(List) ->
- List;
- _ ->
- "dumb"
- end,
- os:putenv("TERM","vt100"),
- [{watchdog,Dog},{term,Term}|Config].
+ Dog = test_server:timetrap(test_server:minutes(3)),
+ [{watchdog,Dog}|Config].
end_per_testcase(_Func, Config) ->
Dog = ?config(watchdog, Config),
- Term = ?config(term,Config),
- os:putenv("TERM",Term),
test_server:timetrap_cancel(Dog).
@@ -56,9 +47,19 @@ groups() ->
[].
init_per_suite(Config) ->
- Config.
+ Term = case os:getenv("TERM") of
+ List when is_list(List) ->
+ List;
+ _ ->
+ "dumb"
+ end,
+ os:putenv("TERM","vt100"),
+ DefShell = get_default_shell(),
+ [{default_shell,DefShell},{term,Term}|Config].
-end_per_suite(_Config) ->
+end_per_suite(Config) ->
+ Term = ?config(term,Config),
+ os:putenv("TERM",Term),
ok.
init_per_group(_GroupName, Config) ->
@@ -78,70 +79,118 @@ end_per_group(_GroupName, Config) ->
get_columns_and_rows(suite) -> [];
get_columns_and_rows(doc) -> ["Test that the shell can access columns and rows"];
get_columns_and_rows(Config) when is_list(Config) ->
- ?line rtnode([{putline,""},
- {putline, "2."},
- {getline, "2"},
- {putline,"io:columns()."},
-%% Behaviour change in R12B-5, returns 80
-%% {getline,"{error,enotsup}"},
- {getline,"{ok,80}"},
- {putline,"io:rows()."},
-%% Behaviour change in R12B-5, returns 24
-%% {getline,"{error,enotsup}"}
- {getline,"{ok,24}"}
- ],[]),
- ?line rtnode([{putline,""},
- {putline, "2."},
- {getline, "2"},
- {putline,"io:columns()."},
- {getline,"{ok,90}"},
- {putline,"io:rows()."},
- {getline,"{ok,40}"}],
- [],
- "stty rows 40; stty columns 90; ").
+ case proplists:get_value(default_shell,Config) of
+ old ->
+ %% Old shell tests
+ ?dbg(old_shell),
+ ?line rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline,"io:columns()."},
+ {getline_re,".*{error,enotsup}"},
+ {putline,"io:rows()."},
+ {getline_re,".*{error,enotsup}"}
+
+ ],[]),
+ ?line rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline,"io:columns()."},
+ {getline_re,".*{ok,90}"},
+ {putline,"io:rows()."},
+ {getline_re,".*{ok,40}"}],
+ [],
+ "stty rows 40; stty columns 90; ");
+ new ->
+ % New shell tests
+ ?dbg(new_shell),
+ ?line rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline,"io:columns()."},
+ %% Behaviour change in R12B-5, returns 80
+ %% {getline,"{error,enotsup}"},
+ {getline,"{ok,80}"},
+ {putline,"io:rows()."},
+ %% Behaviour change in R12B-5, returns 24
+ %% {getline,"{error,enotsup}"}
+ {getline,"{ok,24}"}
+ ],[]),
+ ?line rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline,"io:columns()."},
+ {getline,"{ok,90}"},
+ {putline,"io:rows()."},
+ {getline,"{ok,40}"}],
+ [],
+ "stty rows 40; stty columns 90; ")
+ end.
exit_initial(suite) -> [];
exit_initial(doc) -> ["Tests that exit of initial shell restarts shell"];
exit_initial(Config) when is_list(Config) ->
- ?line rtnode([{putline,""},
- {putline, "2."},
- {getline, "2"},
- {putline,"exit()."},
- {getline,""},
- {getline,"Eshell"},
- {putline,""},
- {putline,"35."},
- {getline,"35"}],[]).
+ case proplists:get_value(default_shell,Config) of
+ old ->
+ rtnode([{putline,""},
+ {putline, "2."},
+ {getline_re, ".*2"},
+ {putline,"exit()."},
+ {getline,""},
+ {getline,"Eshell"},
+ {putline,""},
+ {putline,"35."},
+ {getline_re,".*35"}],[]);
+ new ->
+ rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline,"exit()."},
+ {getline,""},
+ {getline,"Eshell"},
+ {putline,""},
+ {putline,"35."},
+ {getline_re,"35"}],[])
+ end.
job_control_local(suite) -> [];
job_control_local(doc) -> [ "Tests that local shell can be "
"started by means of job control" ];
job_control_local(Config) when is_list(Config) ->
- ?line rtnode([{putline,""},
- {putline, "2."},
- {getline, "2"},
- {putline,[7]},
- {sleep,timeout(short)},
- {putline,""},
- {getline," -->"},
- {putline,"s"},
- {putline,"c"},
- {putline_raw,""},
- {getline,"Eshell"},
- {putline_raw,""},
- {getline,"1>"},
- {putline,"35."},
- {getline,"35"}],[]).
+ case proplists:get_value(default_shell,Config) of
+ old ->
+ %% Old shell tests
+ {skip,"No new shell found"};
+ new ->
+ %% New shell tests
+ ?line rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline,[7]},
+ {sleep,timeout(short)},
+ {putline,""},
+ {getline," -->"},
+ {putline,"s"},
+ {putline,"c"},
+ {putline_raw,""},
+ {getline,"Eshell"},
+ {putline_raw,""},
+ {getline,"1>"},
+ {putline,"35."},
+ {getline,"35"}],[])
+ end.
job_control_remote(suite) -> [];
job_control_remote(doc) -> [ "Tests that remote shell can be "
"started by means of job control" ];
job_control_remote(Config) when is_list(Config) ->
- case node() of
- nonode@nohost ->
+ case {node(),proplists:get_value(default_shell,Config)} of
+ {nonode@nohost,_} ->
?line exit(not_distributed);
+ {_,old} ->
+ {skip,"No new shell found"};
_ ->
?line RNode = create_nodename(),
?line MyNode = atom_to_list(node()),
@@ -190,9 +239,11 @@ job_control_remote_noshell(doc) ->
[ "Tests that remote shell can be "
"started by means of job control to -noshell node" ];
job_control_remote_noshell(Config) when is_list(Config) ->
- case node() of
- nonode@nohost ->
+ case {node(),proplists:get_value(default_shell,Config)} of
+ {nonode@nohost,_} ->
?line exit(not_distributed);
+ {_,old} ->
+ {skip,"No new shell found"};
_ ->
?line RNode = create_nodename(),
?line NSNode = start_noshell_node(interactive_shell_noshell),
@@ -351,6 +402,33 @@ get_and_put(CPid, [{getline, Match}|T],N) ->
end
end;
+%% Hey ho copy paste from stdlib/io_proto_SUITE
+get_and_put(CPid, [{getline_re, Match}|T],N) ->
+ ?dbg({getline_re, Match}),
+ CPid ! {self(), {get_line, timeout(normal)}},
+ receive
+ {get_line, timeout} ->
+ error_logger:error_msg("~p: getline_re timeout waiting for \"~s\" "
+ "(command number ~p, skipped: ~p)~n",
+ [?MODULE, Match,N,get(getline_skipped)]),
+ {error, timeout};
+ {get_line, Data} ->
+ ?dbg({data,Data}),
+ case re:run(Data, Match,[{capture,none}]) of
+ match ->
+ erase(getline_skipped),
+ get_and_put(CPid, T,N+1);
+ _ ->
+ case get(getline_skipped) of
+ undefined ->
+ put(getline_skipped,[Data]);
+ List ->
+ put(getline_skipped,List ++ [Data])
+ end,
+ get_and_put(CPid, [{getline_re, Match}|T],N)
+ end
+ end;
+
get_and_put(CPid, [{putline_raw, Line}|T],N) ->
?dbg({putline_raw, Line}),
CPid ! {self(), {send_line, Line}},
@@ -631,6 +709,13 @@ get_data_within(Port, Timeout, Acc) ->
timeout
end.
-
-
-
+get_default_shell() ->
+ try
+ rtnode([{putline,""},
+ {putline, "whereis(user_drv)."},
+ {getline, "undefined"}],[]),
+ old
+ catch E:R ->
+ ?dbg({E,R}),
+ new
+ end.
diff --git a/lib/kernel/test/kernel.cover b/lib/kernel/test/kernel.cover
index f6967ca651..af1dd7eaad 100644
--- a/lib/kernel/test/kernel.cover
+++ b/lib/kernel/test/kernel.cover
@@ -1,3 +1,3 @@
%% -*- erlang -*-
-{incl_mods,[gen_udp,inet6_udp,inet_res,inet_dns]}.
+{incl_app,kernel,details}.
diff --git a/lib/kernel/test/wrap_log_reader_SUITE.erl b/lib/kernel/test/wrap_log_reader_SUITE.erl
index 96dc3e6d33..6c47fda9c5 100644
--- a/lib/kernel/test/wrap_log_reader_SUITE.erl
+++ b/lib/kernel/test/wrap_log_reader_SUITE.erl
@@ -557,7 +557,7 @@ rec(M, Where) ->
M ->
ok;
Else -> ?t:fail({error, {Where, Else}})
- after 1000 -> ?t:fail({error, {Where, time_out}})
+ after 5000 -> ?t:fail({error, {Where, time_out}})
end.
pps() ->
diff --git a/lib/public_key/asn1/AuthenticationFramework.asn1 b/lib/public_key/asn1/AuthenticationFramework.asn1
new file mode 100644
index 0000000000..3754486473
--- /dev/null
+++ b/lib/public_key/asn1/AuthenticationFramework.asn1
@@ -0,0 +1,367 @@
+AuthenticationFramework {joint-iso-itu-t ds(5) module(1)
+ authenticationFramework(7) 6} DEFINITIONS ::=
+BEGIN
+
+-- EXPORTS All
+-- The types and values defined in this module are exported for use in the other ASN.1 modules contained
+-- within the Directory Specifications, and for the use of other applications which will use them to access
+-- Directory services. Other applications may use them for their own purposes, but this will not constrain
+-- extensions and modifications needed to maintain or improve the Directory service.
+IMPORTS
+ id-at, id-nf, id-oc, informationFramework, selectedAttributeTypes,
+ basicAccessControl, certificateExtensions
+ FROM UsefulDefinitions {joint-iso-itu-t ds(5) module(1)
+ usefulDefinitions(0) 6}
+ Name, ATTRIBUTE, OBJECT-CLASS, NAME-FORM, top
+ FROM InformationFramework informationFramework
+ UniqueIdentifier, octetStringMatch, commonName, UnboundedDirectoryString
+ FROM SelectedAttributeTypes selectedAttributeTypes
+ certificateExactMatch, certificatePairExactMatch, certificateListExactMatch,
+ KeyUsage, GeneralNames, CertificatePoliciesSyntax,
+ algorithmIdentifierMatch, CertPolicyId
+ FROM CertificateExtensions certificateExtensions;
+
+-- parameterized types
+ENCRYPTED{ToBeEnciphered} ::=
+ BIT STRING
+ (CONSTRAINED BY {
+ -- shall be the result of applying an encipherment procedure
+ -- to the BER-encoded octets of a value of --ToBeEnciphered})
+
+HASH{ToBeHashed} ::= SEQUENCE {
+ algorithmIdentifier AlgorithmIdentifier{{SupportedAlgorithms}},
+ hashValue
+ BIT STRING
+ (CONSTRAINED BY {
+ -- shall be the result of applying a hashing procedure to the DER-encoded octets
+ -- of a value of -- ToBeHashed})
+}
+
+ENCRYPTED-HASH{ToBeSigned} ::=
+ BIT STRING
+ (CONSTRAINED BY {
+ -- shall be the result of applying a hashing procedure to the DER-encoded (see 6.1) octets
+ -- of a value of --ToBeSigned -- and then applying an encipherment procedure to those octets --})
+
+SIGNATURE{ToBeSigned} ::= SEQUENCE {
+ algorithmIdentifier AlgorithmIdentifier{{SupportedAlgorithms}},
+ encrypted ENCRYPTED-HASH{ToBeSigned}
+}
+
+SIGNED{ToBeSigned} ::= SEQUENCE {
+ toBeSigned ToBeSigned,
+ COMPONENTS OF SIGNATURE{ToBeSigned}
+}
+
+-- public-key certificate definition
+Certificate ::= SIGNED{CertificateContent}
+
+CertificateContent ::= SEQUENCE {
+ version [0] Version DEFAULT v1,
+ serialNumber CertificateSerialNumber,
+ signature AlgorithmIdentifier{{SupportedAlgorithms}},
+ issuer Name,
+ validity Validity,
+ subject Name,
+ subjectPublicKeyInfo SubjectPublicKeyInfo,
+ issuerUniqueIdentifier [1] IMPLICIT UniqueIdentifier OPTIONAL,
+ -- if present, version shall be v2 or v3
+ subjectUniqueIdentifier [2] IMPLICIT UniqueIdentifier OPTIONAL,
+ -- if present, version shall be v2 or v3
+ extensions [3] Extensions OPTIONAL
+ -- If present, version shall be v3
+}
+
+Version ::= INTEGER {v1(0), v2(1), v3(2)}
+
+CertificateSerialNumber ::= INTEGER
+
+AlgorithmIdentifier{ALGORITHM:SupportedAlgorithms} ::= SEQUENCE {
+ algorithm ALGORITHM.&id({SupportedAlgorithms}),
+ parameters ALGORITHM.&Type({SupportedAlgorithms}{@algorithm}) OPTIONAL
+}
+
+-- Definition of the following information object set is deferred, perhaps to standardized
+-- profiles or to protocol implementation conformance statements. The set is required to
+-- specify a table constraint on the parameters component of AlgorithmIdentifier.
+SupportedAlgorithms ALGORITHM ::=
+ {...}
+
+Validity ::= SEQUENCE {notBefore Time,
+ notAfter Time
+}
+
+SubjectPublicKeyInfo ::= SEQUENCE {
+ algorithm AlgorithmIdentifier{{SupportedAlgorithms}},
+ subjectPublicKey BIT STRING
+}
+
+Time ::= CHOICE {utcTime UTCTime,
+ generalizedTime GeneralizedTime
+}
+
+Extensions ::= SEQUENCE OF Extension
+
+-- For those extensions where ordering of individual extensions within the SEQUENCE is significant, the
+-- specification of those individual extensions shall include the rules for the significance of the order therein
+Extension ::= SEQUENCE {
+ extnId EXTENSION.&id({ExtensionSet}),
+ critical BOOLEAN DEFAULT FALSE,
+ extnValue
+ OCTET STRING
+ (CONTAINING EXTENSION.&ExtnType({ExtensionSet}{@extnId})
+ ENCODED BY
+ der)
+}
+
+der OBJECT IDENTIFIER ::=
+ {joint-iso-itu-t asn1(1) ber-derived(2) distinguished-encoding(1)}
+
+ExtensionSet EXTENSION ::=
+ {...}
+
+EXTENSION ::= CLASS {&id OBJECT IDENTIFIER UNIQUE,
+ &ExtnType
+}WITH SYNTAX {SYNTAX &ExtnType
+ IDENTIFIED BY &id
+}
+
+ALGORITHM ::= CLASS {&Type OPTIONAL,
+ &id OBJECT IDENTIFIER UNIQUE
+}WITH SYNTAX {[&Type]
+ IDENTIFIED BY &id
+}
+
+-- other PKI certificate constructs
+Certificates ::= SEQUENCE {
+ userCertificate Certificate,
+ certificationPath ForwardCertificationPath OPTIONAL
+}
+
+CertificationPath ::= SEQUENCE {
+ userCertificate Certificate,
+ theCACertificates SEQUENCE OF CertificatePair OPTIONAL
+}
+
+ForwardCertificationPath ::= SEQUENCE OF CrossCertificates
+
+CrossCertificates ::= SET OF Certificate
+
+PkiPath ::= SEQUENCE OF Certificate
+
+-- certificate revocation list (CRL)
+CertificateList ::=
+ SIGNED{CertificateListContent}
+
+CertificateListContent ::= SEQUENCE {
+ version Version OPTIONAL,
+ -- if present, version shall be v2
+ signature AlgorithmIdentifier{{SupportedAlgorithms}},
+ issuer Name,
+ thisUpdate Time,
+ nextUpdate Time OPTIONAL,
+ revokedCertificates
+ SEQUENCE OF
+ SEQUENCE {serialNumber CertificateSerialNumber,
+ revocationDate Time,
+ crlEntryExtensions Extensions OPTIONAL} OPTIONAL,
+ crlExtensions [0] Extensions OPTIONAL
+}
+
+-- PKI object classes
+pkiUser OBJECT-CLASS ::= {
+ SUBCLASS OF {top}
+ KIND auxiliary
+ MAY CONTAIN {userCertificate}
+ ID id-oc-pkiUser
+}
+
+pkiCA OBJECT-CLASS ::= {
+ SUBCLASS OF {top}
+ KIND auxiliary
+ MAY CONTAIN
+ {cACertificate | certificateRevocationList | authorityRevocationList |
+ crossCertificatePair}
+ ID id-oc-pkiCA
+}
+
+cRLDistributionPoint OBJECT-CLASS ::= {
+ SUBCLASS OF {top}
+ KIND structural
+ MUST CONTAIN {commonName}
+ MAY CONTAIN
+ {certificateRevocationList | authorityRevocationList | deltaRevocationList}
+ ID id-oc-cRLDistributionPoint
+}
+
+cRLDistPtNameForm NAME-FORM ::= {
+ NAMES cRLDistributionPoint
+ WITH ATTRIBUTES {commonName}
+ ID id-nf-cRLDistPtNameForm
+}
+
+deltaCRL OBJECT-CLASS ::= {
+ SUBCLASS OF {top}
+ KIND auxiliary
+ MAY CONTAIN {deltaRevocationList}
+ ID id-oc-deltaCRL
+}
+
+cpCps OBJECT-CLASS ::= {
+ SUBCLASS OF {top}
+ KIND auxiliary
+ MAY CONTAIN {certificatePolicy | certificationPracticeStmt}
+ ID id-oc-cpCps
+}
+
+pkiCertPath OBJECT-CLASS ::= {
+ SUBCLASS OF {top}
+ KIND auxiliary
+ MAY CONTAIN {pkiPath}
+ ID id-oc-pkiCertPath
+}
+
+-- PKI directory attributes
+userCertificate ATTRIBUTE ::= {
+ WITH SYNTAX Certificate
+ EQUALITY MATCHING RULE certificateExactMatch
+ ID id-at-userCertificate
+}
+
+cACertificate ATTRIBUTE ::= {
+ WITH SYNTAX Certificate
+ EQUALITY MATCHING RULE certificateExactMatch
+ ID id-at-cAcertificate
+}
+
+crossCertificatePair ATTRIBUTE ::= {
+ WITH SYNTAX CertificatePair
+ EQUALITY MATCHING RULE certificatePairExactMatch
+ ID id-at-crossCertificatePair
+}
+
+CertificatePair ::= SEQUENCE {
+ forward [0] Certificate OPTIONAL,
+ reverse [1] Certificate OPTIONAL
+ -- at least one of the pair shall be present
+}
+(WITH COMPONENTS {
+ ...,
+ forward PRESENT
+ } | WITH COMPONENTS {
+ ...,
+ reverse PRESENT
+ })
+
+certificateRevocationList ATTRIBUTE ::= {
+ WITH SYNTAX CertificateList
+ EQUALITY MATCHING RULE certificateListExactMatch
+ ID id-at-certificateRevocationList
+}
+
+authorityRevocationList ATTRIBUTE ::= {
+ WITH SYNTAX CertificateList
+ EQUALITY MATCHING RULE certificateListExactMatch
+ ID id-at-authorityRevocationList
+}
+
+deltaRevocationList ATTRIBUTE ::= {
+ WITH SYNTAX CertificateList
+ EQUALITY MATCHING RULE certificateListExactMatch
+ ID id-at-deltaRevocationList
+}
+
+supportedAlgorithms ATTRIBUTE ::= {
+ WITH SYNTAX SupportedAlgorithm
+ EQUALITY MATCHING RULE algorithmIdentifierMatch
+ ID id-at-supportedAlgorithms
+}
+
+SupportedAlgorithm ::= SEQUENCE {
+ algorithmIdentifier AlgorithmIdentifier{{SupportedAlgorithms}},
+ intendedUsage [0] KeyUsage OPTIONAL,
+ intendedCertificatePolicies [1] CertificatePoliciesSyntax OPTIONAL
+}
+
+certificationPracticeStmt ATTRIBUTE ::= {
+ WITH SYNTAX InfoSyntax
+ ID id-at-certificationPracticeStmt
+}
+
+InfoSyntax ::= CHOICE {
+ content UnboundedDirectoryString,
+ pointer SEQUENCE {name GeneralNames,
+ hash HASH{HashedPolicyInfo} OPTIONAL}
+}
+
+POLICY ::= TYPE-IDENTIFIER
+
+HashedPolicyInfo ::= POLICY.&Type({Policies})
+
+Policies POLICY ::=
+ {...} -- Defined by implementors
+
+certificatePolicy ATTRIBUTE ::= {
+ WITH SYNTAX PolicySyntax
+ ID id-at-certificatePolicy
+}
+
+PolicySyntax ::= SEQUENCE {
+ policyIdentifier PolicyID,
+ policySyntax InfoSyntax
+}
+
+PolicyID ::= CertPolicyId
+
+pkiPath ATTRIBUTE ::= {WITH SYNTAX PkiPath
+ ID id-at-pkiPath
+}
+
+userPassword ATTRIBUTE ::= {
+ WITH SYNTAX OCTET STRING(SIZE (0..MAX))
+ EQUALITY MATCHING RULE octetStringMatch
+ ID id-at-userPassword
+}
+
+-- object identifier assignments
+-- object classes
+id-oc-cRLDistributionPoint OBJECT IDENTIFIER ::=
+ {id-oc 19}
+
+id-oc-pkiUser OBJECT IDENTIFIER ::= {id-oc 21}
+
+id-oc-pkiCA OBJECT IDENTIFIER ::= {id-oc 22}
+
+id-oc-deltaCRL OBJECT IDENTIFIER ::= {id-oc 23}
+
+id-oc-cpCps OBJECT IDENTIFIER ::= {id-oc 30}
+
+id-oc-pkiCertPath OBJECT IDENTIFIER ::= {id-oc 31}
+
+-- name forms
+id-nf-cRLDistPtNameForm OBJECT IDENTIFIER ::= {id-nf 14}
+
+-- directory attributes
+id-at-userPassword OBJECT IDENTIFIER ::= {id-at 35}
+
+id-at-userCertificate OBJECT IDENTIFIER ::= {id-at 36}
+
+id-at-cAcertificate OBJECT IDENTIFIER ::= {id-at 37}
+
+id-at-authorityRevocationList OBJECT IDENTIFIER ::= {id-at 38}
+
+id-at-certificateRevocationList OBJECT IDENTIFIER ::= {id-at 39}
+
+id-at-crossCertificatePair OBJECT IDENTIFIER ::= {id-at 40}
+
+id-at-supportedAlgorithms OBJECT IDENTIFIER ::= {id-at 52}
+
+id-at-deltaRevocationList OBJECT IDENTIFIER ::= {id-at 53}
+
+id-at-certificationPracticeStmt OBJECT IDENTIFIER ::= {id-at 68}
+
+id-at-certificatePolicy OBJECT IDENTIFIER ::= {id-at 69}
+
+id-at-pkiPath OBJECT IDENTIFIER ::= {id-at 70}
+
+END -- AuthenticationFramework
diff --git a/lib/public_key/asn1/InformationFramework.asn1 b/lib/public_key/asn1/InformationFramework.asn1
new file mode 100644
index 0000000000..4aed43a39e
--- /dev/null
+++ b/lib/public_key/asn1/InformationFramework.asn1
@@ -0,0 +1,682 @@
+InformationFramework {joint-iso-itu-t ds(5) module(1) informationFramework(1)
+ 6} DEFINITIONS ::=
+BEGIN
+
+-- EXPORTS All
+-- The types and values defined in this module are exported for use in the other ASN.1 modules contained
+-- within the Directory Specifications, and for the use of other applications which will use them to access
+-- Directory services. Other applications may use them for their own purposes, but this will not constrain
+-- extensions and modifications needed to maintain or improve the Directory service.
+IMPORTS
+ -- from ITU-T Rec. X.501 | ISO/IEC 9594-2
+ directoryAbstractService, id-ar, id-at, id-mr, id-nf, id-oa, id-oc,
+ id-sc, selectedAttributeTypes, serviceAdministration
+ FROM UsefulDefinitions {joint-iso-itu-t ds(5) module(1)
+ usefulDefinitions(0) 6}
+ SearchRule
+ FROM ServiceAdministration serviceAdministration
+ -- from ITU-T Rec. X.511 | ISO/IEC 9594-3
+ TypeAndContextAssertion
+ FROM DirectoryAbstractService directoryAbstractService
+ -- from ITU-T Rec. X.520 | ISO/IEC 9594-6
+ booleanMatch, commonName, generalizedTimeMatch, generalizedTimeOrderingMatch,
+ integerFirstComponentMatch, integerMatch, integerOrderingMatch,
+ objectIdentifierFirstComponentMatch, UnboundedDirectoryString
+ FROM SelectedAttributeTypes selectedAttributeTypes;
+
+-- attribute data types
+Attribute{ATTRIBUTE:SupportedAttributes} ::= SEQUENCE {
+ type ATTRIBUTE.&id({SupportedAttributes}),
+ values
+ SET SIZE (0..MAX) OF ATTRIBUTE.&Type({SupportedAttributes}{@type}),
+ valuesWithContext
+ SET SIZE (1..MAX) OF
+ SEQUENCE {value ATTRIBUTE.&Type({SupportedAttributes}{@type}),
+ contextList SET SIZE (1..MAX) OF Context} OPTIONAL
+}
+
+AttributeType ::= ATTRIBUTE.&id
+
+AttributeValue ::= ATTRIBUTE.&Type
+
+Context ::= SEQUENCE {
+ contextType CONTEXT.&id({SupportedContexts}),
+ contextValues
+ SET SIZE (1..MAX) OF CONTEXT.&Type({SupportedContexts}{@contextType}),
+ fallback BOOLEAN DEFAULT FALSE
+}
+
+AttributeValueAssertion ::= SEQUENCE {
+ type ATTRIBUTE.&id({SupportedAttributes}),
+ assertion
+ ATTRIBUTE.&equality-match.&AssertionType
+ ({SupportedAttributes}{@type}),
+ assertedContexts
+ CHOICE {allContexts [0] NULL,
+ selectedContexts [1] SET SIZE (1..MAX) OF ContextAssertion
+ } OPTIONAL
+}
+
+ContextAssertion ::= SEQUENCE {
+ contextType CONTEXT.&id({SupportedContexts}),
+ contextValues
+ SET SIZE (1..MAX) OF
+ CONTEXT.&Assertion({SupportedContexts}{@contextType})
+}
+
+AttributeTypeAssertion ::= SEQUENCE {
+ type ATTRIBUTE.&id({SupportedAttributes}),
+ assertedContexts SEQUENCE SIZE (1..MAX) OF ContextAssertion OPTIONAL
+}
+
+-- Definition of the following information object set is deferred, perhaps to standardized
+-- profiles or to protocol implementation conformance statements. The set is required to
+-- specify a table constraint on the values component of Attribute, the value component
+-- of AttributeTypeAndValue, and the assertion component of AttributeValueAssertion.
+SupportedAttributes ATTRIBUTE ::=
+ {objectClass | aliasedEntryName, ...}
+
+-- Definition of the following information object set is deferred, perhaps to standardized
+-- profiles or to protocol implementation conformance statements. The set is required to
+-- specify a table constraint on the context specifications
+SupportedContexts CONTEXT ::=
+ {...}
+
+-- naming data types
+Name ::= CHOICE { -- only one possibility for now --rdnSequence RDNSequence
+}
+
+RDNSequence ::= SEQUENCE OF RelativeDistinguishedName
+
+DistinguishedName ::= RDNSequence
+
+RelativeDistinguishedName ::=
+ SET SIZE (1..MAX) OF AttributeTypeAndDistinguishedValue
+
+AttributeTypeAndDistinguishedValue ::= SEQUENCE {
+ type ATTRIBUTE.&id({SupportedAttributes}),
+ value ATTRIBUTE.&Type({SupportedAttributes}{@type}),
+ primaryDistinguished BOOLEAN DEFAULT TRUE,
+ valuesWithContext
+ SET SIZE (1..MAX) OF
+ SEQUENCE {distingAttrValue
+ [0] ATTRIBUTE.&Type({SupportedAttributes}{@type})
+ OPTIONAL,
+ contextList SET SIZE (1..MAX) OF Context} OPTIONAL
+}
+
+-- subtree data types
+SubtreeSpecification ::= SEQUENCE {
+ base [0] LocalName DEFAULT {},
+ COMPONENTS OF ChopSpecification,
+ specificationFilter [4] Refinement OPTIONAL
+}
+
+-- empty sequence specifies whole administrative area
+LocalName ::= RDNSequence
+
+ChopSpecification ::= SEQUENCE {
+ specificExclusions
+ [1] SET SIZE (1..MAX) OF
+ CHOICE {chopBefore [0] LocalName,
+ chopAfter [1] LocalName} OPTIONAL,
+ minimum [2] BaseDistance DEFAULT 0,
+ maximum [3] BaseDistance OPTIONAL
+}
+
+BaseDistance ::= INTEGER(0..MAX)
+
+Refinement ::= CHOICE {
+ item [0] OBJECT-CLASS.&id,
+ and [1] SET SIZE (1..MAX) OF Refinement,
+ or [2] SET SIZE (1..MAX) OF Refinement,
+ not [3] Refinement
+}
+
+-- OBJECT-CLASS information object class specification
+OBJECT-CLASS ::= CLASS {
+ &Superclasses OBJECT-CLASS OPTIONAL,
+ &kind ObjectClassKind DEFAULT structural,
+ &MandatoryAttributes ATTRIBUTE OPTIONAL,
+ &OptionalAttributes ATTRIBUTE OPTIONAL,
+ &id OBJECT IDENTIFIER UNIQUE
+}
+WITH SYNTAX {
+ [SUBCLASS OF &Superclasses]
+ [KIND &kind]
+ [MUST CONTAIN &MandatoryAttributes]
+ [MAY CONTAIN &OptionalAttributes]
+ ID &id
+}
+
+ObjectClassKind ::= ENUMERATED {abstract(0), structural(1), auxiliary(2)}
+
+-- object classes
+top OBJECT-CLASS ::= {
+ KIND abstract
+ MUST CONTAIN {objectClass}
+ ID id-oc-top
+}
+
+alias OBJECT-CLASS ::= {
+ SUBCLASS OF {top}
+ MUST CONTAIN {aliasedEntryName}
+ ID id-oc-alias
+}
+
+parent OBJECT-CLASS ::= {KIND abstract
+ ID id-oc-parent
+}
+
+child OBJECT-CLASS ::= {KIND auxiliary
+ ID id-oc-child
+}
+
+-- ATTRIBUTE information object class specification
+ATTRIBUTE ::= CLASS {
+ &derivation ATTRIBUTE OPTIONAL,
+ &Type OPTIONAL, -- either &Type or &derivation required
+ &equality-match MATCHING-RULE OPTIONAL,
+ &ordering-match MATCHING-RULE OPTIONAL,
+ &substrings-match MATCHING-RULE OPTIONAL,
+ &single-valued BOOLEAN DEFAULT FALSE,
+ &collective BOOLEAN DEFAULT FALSE,
+ &dummy BOOLEAN DEFAULT FALSE,
+ -- operational extensions
+ &no-user-modification BOOLEAN DEFAULT FALSE,
+ &usage AttributeUsage DEFAULT userApplications,
+ &id OBJECT IDENTIFIER UNIQUE
+}
+WITH SYNTAX {
+ [SUBTYPE OF &derivation]
+ [WITH SYNTAX &Type]
+ [EQUALITY MATCHING RULE &equality-match]
+ [ORDERING MATCHING RULE &ordering-match]
+ [SUBSTRINGS MATCHING RULE &substrings-match]
+ [SINGLE VALUE &single-valued]
+ [COLLECTIVE &collective]
+ [DUMMY &dummy]
+ [NO USER MODIFICATION &no-user-modification]
+ [USAGE &usage]
+ ID &id
+}
+
+AttributeUsage ::= ENUMERATED {
+ userApplications(0), directoryOperation(1), distributedOperation(2),
+ dSAOperation(3)}
+
+-- attributes
+objectClass ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT IDENTIFIER
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ ID id-at-objectClass
+}
+
+aliasedEntryName ATTRIBUTE ::= {
+ WITH SYNTAX DistinguishedName
+ EQUALITY MATCHING RULE distinguishedNameMatch
+ SINGLE VALUE TRUE
+ ID id-at-aliasedEntryName
+}
+
+-- MATCHING-RULE information object class specification
+MATCHING-RULE ::= CLASS {
+ &ParentMatchingRules MATCHING-RULE OPTIONAL,
+ &AssertionType OPTIONAL,
+ &uniqueMatchIndicator ATTRIBUTE OPTIONAL,
+ &id OBJECT IDENTIFIER UNIQUE
+}
+WITH SYNTAX {
+ [PARENT &ParentMatchingRules]
+ [SYNTAX &AssertionType]
+ [UNIQUE-MATCH-INDICATOR &uniqueMatchIndicator]
+ ID &id
+}
+
+-- matching rules
+objectIdentifierMatch MATCHING-RULE ::= {
+ SYNTAX OBJECT IDENTIFIER
+ ID id-mr-objectIdentifierMatch
+}
+
+distinguishedNameMatch MATCHING-RULE ::= {
+ SYNTAX DistinguishedName
+ ID id-mr-distinguishedNameMatch
+}
+
+MAPPING-BASED-MATCHING{SelectedBy, BOOLEAN:combinable, MappingResult,
+ OBJECT IDENTIFIER:matchingRule} ::= CLASS {
+ &selectBy SelectedBy OPTIONAL,
+ &ApplicableTo ATTRIBUTE,
+ &subtypesIncluded BOOLEAN DEFAULT TRUE,
+ &combinable BOOLEAN(combinable),
+ &mappingResults MappingResult OPTIONAL,
+ &userControl BOOLEAN DEFAULT FALSE,
+ &exclusive BOOLEAN DEFAULT TRUE,
+ &matching-rule MATCHING-RULE.&id(matchingRule),
+ &id OBJECT IDENTIFIER UNIQUE
+}
+WITH SYNTAX {
+ [SELECT BY &selectBy]
+ APPLICABLE TO &ApplicableTo
+ [SUBTYPES INCLUDED &subtypesIncluded]
+ COMBINABLE &combinable
+ [MAPPING RESULTS &mappingResults]
+ [USER CONTROL &userControl]
+ [EXCLUSIVE &exclusive]
+ MATCHING RULE &matching-rule
+ ID &id
+}
+
+-- NAME-FORM information object class specification
+NAME-FORM ::= CLASS {
+ &namedObjectClass OBJECT-CLASS,
+ &MandatoryAttributes ATTRIBUTE,
+ &OptionalAttributes ATTRIBUTE OPTIONAL,
+ &id OBJECT IDENTIFIER UNIQUE
+}
+WITH SYNTAX {
+ NAMES &namedObjectClass
+ WITH ATTRIBUTES &MandatoryAttributes
+ [AND OPTIONALLY &OptionalAttributes]
+ ID &id
+}
+
+-- STRUCTURE-RULE class and DIT structure rule data types
+DITStructureRule ::= SEQUENCE {
+ ruleIdentifier RuleIdentifier,
+ -- shall be unique within the scope of the subschema
+ nameForm NAME-FORM.&id,
+ superiorStructureRules SET SIZE (1..MAX) OF RuleIdentifier OPTIONAL
+}
+
+RuleIdentifier ::= INTEGER
+
+STRUCTURE-RULE ::= CLASS {
+ &nameForm NAME-FORM,
+ &SuperiorStructureRules STRUCTURE-RULE OPTIONAL,
+ &id RuleIdentifier
+}
+WITH SYNTAX {
+ NAME FORM &nameForm
+ [SUPERIOR RULES &SuperiorStructureRules]
+ ID &id
+}
+
+-- DIT content rule data type and CONTENT-RULE class
+DITContentRule ::= SEQUENCE {
+ structuralObjectClass OBJECT-CLASS.&id,
+ auxiliaries SET SIZE (1..MAX) OF OBJECT-CLASS.&id OPTIONAL,
+ mandatory [1] SET SIZE (1..MAX) OF ATTRIBUTE.&id OPTIONAL,
+ optional [2] SET SIZE (1..MAX) OF ATTRIBUTE.&id OPTIONAL,
+ precluded [3] SET SIZE (1..MAX) OF ATTRIBUTE.&id OPTIONAL
+}
+
+CONTENT-RULE ::= CLASS {
+ &structuralClass OBJECT-CLASS.&id UNIQUE,
+ &Auxiliaries OBJECT-CLASS OPTIONAL,
+ &Mandatory ATTRIBUTE OPTIONAL,
+ &Optional ATTRIBUTE OPTIONAL,
+ &Precluded ATTRIBUTE OPTIONAL
+}
+WITH SYNTAX {
+ STRUCTURAL OBJECT-CLASS &structuralClass
+ [AUXILIARY OBJECT-CLASSES &Auxiliaries]
+ [MUST CONTAIN &Mandatory]
+ [MAY CONTAIN &Optional]
+ [MUST-NOT CONTAIN &Precluded]
+}
+
+CONTEXT ::= CLASS {
+ &Type ,
+ &DefaultValue OPTIONAL,
+ &Assertion OPTIONAL,
+ &absentMatch BOOLEAN DEFAULT TRUE,
+ &id OBJECT IDENTIFIER UNIQUE
+}
+WITH SYNTAX {
+ WITH SYNTAX &Type
+ [DEFAULT-VALUE &DefaultValue]
+ [ASSERTED AS &Assertion]
+ [ABSENT-MATCH &absentMatch]
+ ID &id
+}
+
+DITContextUse ::= SEQUENCE {
+ attributeType ATTRIBUTE.&id,
+ mandatoryContexts [1] SET SIZE (1..MAX) OF CONTEXT.&id OPTIONAL,
+ optionalContexts [2] SET SIZE (1..MAX) OF CONTEXT.&id OPTIONAL
+}
+
+DIT-CONTEXT-USE-RULE ::= CLASS {
+ &attributeType ATTRIBUTE.&id UNIQUE,
+ &Mandatory CONTEXT OPTIONAL,
+ &Optional CONTEXT OPTIONAL
+}
+WITH SYNTAX {
+ ATTRIBUTE TYPE &attributeType
+ [MANDATORY CONTEXTS &Mandatory]
+ [OPTIONAL CONTEXTS &Optional]
+}
+
+FRIENDS ::= CLASS {
+ &anchor ATTRIBUTE.&id UNIQUE,
+ &Friends ATTRIBUTE
+}WITH SYNTAX {ANCHOR &anchor
+ FRIENDS &Friends
+}
+
+-- system schema information objects
+-- object classes
+subentry OBJECT-CLASS ::= {
+ SUBCLASS OF {top}
+ KIND structural
+ MUST CONTAIN {commonName | subtreeSpecification}
+ ID id-sc-subentry
+}
+
+subentryNameForm NAME-FORM ::= {
+ NAMES subentry
+ WITH ATTRIBUTES {commonName}
+ ID id-nf-subentryNameForm
+}
+
+subtreeSpecification ATTRIBUTE ::= {
+ WITH SYNTAX SubtreeSpecification
+ USAGE directoryOperation
+ ID id-oa-subtreeSpecification
+}
+
+administrativeRole ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT-CLASS.&id
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ USAGE directoryOperation
+ ID id-oa-administrativeRole
+}
+
+createTimestamp ATTRIBUTE ::= {
+ WITH SYNTAX GeneralizedTime
+ -- as per 46.3 b) or c) of ITU-T Rec. X.680 | ISO/IEC 8824-1
+ EQUALITY MATCHING RULE generalizedTimeMatch
+ ORDERING MATCHING RULE generalizedTimeOrderingMatch
+ SINGLE VALUE TRUE
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-createTimestamp
+}
+
+modifyTimestamp ATTRIBUTE ::= {
+ WITH SYNTAX GeneralizedTime
+ -- as per 46.3 b) or c) of ITU-T Rec. X.680 | ISO/IEC 8824-1
+ EQUALITY MATCHING RULE generalizedTimeMatch
+ ORDERING MATCHING RULE generalizedTimeOrderingMatch
+ SINGLE VALUE TRUE
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-modifyTimestamp
+}
+
+subschemaTimestamp ATTRIBUTE ::= {
+ WITH SYNTAX GeneralizedTime
+ -- as per 46.3 b) or c) of ITU-T Rec. X.680 | ISO/IEC 8824-1
+ EQUALITY MATCHING RULE generalizedTimeMatch
+ ORDERING MATCHING RULE generalizedTimeOrderingMatch
+ SINGLE VALUE TRUE
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-subschemaTimestamp
+}
+
+creatorsName ATTRIBUTE ::= {
+ WITH SYNTAX DistinguishedName
+ EQUALITY MATCHING RULE distinguishedNameMatch
+ SINGLE VALUE TRUE
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-creatorsName
+}
+
+modifiersName ATTRIBUTE ::= {
+ WITH SYNTAX DistinguishedName
+ EQUALITY MATCHING RULE distinguishedNameMatch
+ SINGLE VALUE TRUE
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-modifiersName
+}
+
+subschemaSubentryList ATTRIBUTE ::= {
+ WITH SYNTAX DistinguishedName
+ EQUALITY MATCHING RULE distinguishedNameMatch
+ SINGLE VALUE TRUE
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-subschemaSubentryList
+}
+
+accessControlSubentryList ATTRIBUTE ::= {
+ WITH SYNTAX DistinguishedName
+ EQUALITY MATCHING RULE distinguishedNameMatch
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-accessControlSubentryList
+}
+
+collectiveAttributeSubentryList ATTRIBUTE ::= {
+ WITH SYNTAX DistinguishedName
+ EQUALITY MATCHING RULE distinguishedNameMatch
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-collectiveAttributeSubentryList
+}
+
+contextDefaultSubentryList ATTRIBUTE ::= {
+ WITH SYNTAX DistinguishedName
+ EQUALITY MATCHING RULE distinguishedNameMatch
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-contextDefaultSubentryList
+}
+
+serviceAdminSubentryList ATTRIBUTE ::= {
+ WITH SYNTAX DistinguishedName
+ EQUALITY MATCHING RULE distinguishedNameMatch
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-serviceAdminSubentryList
+}
+
+hasSubordinates ATTRIBUTE ::= {
+ WITH SYNTAX BOOLEAN
+ EQUALITY MATCHING RULE booleanMatch
+ SINGLE VALUE TRUE
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-hasSubordinates
+}
+
+accessControlSubentry OBJECT-CLASS ::= {
+ KIND auxiliary
+ ID id-sc-accessControlSubentry
+}
+
+collectiveAttributeSubentry OBJECT-CLASS ::= {
+ KIND auxiliary
+ ID id-sc-collectiveAttributeSubentry
+}
+
+collectiveExclusions ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT IDENTIFIER
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ USAGE directoryOperation
+ ID id-oa-collectiveExclusions
+}
+
+contextAssertionSubentry OBJECT-CLASS ::= {
+ KIND auxiliary
+ MUST CONTAIN {contextAssertionDefaults}
+ ID id-sc-contextAssertionSubentry
+}
+
+contextAssertionDefaults ATTRIBUTE ::= {
+ WITH SYNTAX TypeAndContextAssertion
+ EQUALITY MATCHING RULE objectIdentifierFirstComponentMatch
+ USAGE directoryOperation
+ ID id-oa-contextAssertionDefault
+}
+
+serviceAdminSubentry OBJECT-CLASS ::= {
+ KIND auxiliary
+ MUST CONTAIN {searchRules}
+ ID id-sc-serviceAdminSubentry
+}
+
+searchRules ATTRIBUTE ::= {
+ WITH SYNTAX SearchRuleDescription
+ EQUALITY MATCHING RULE integerFirstComponentMatch
+ USAGE directoryOperation
+ ID id-oa-searchRules
+}
+
+SearchRuleDescription ::= SEQUENCE {
+ COMPONENTS OF SearchRule,
+ name [28] SET SIZE (1..MAX) OF UnboundedDirectoryString OPTIONAL,
+ description [29] UnboundedDirectoryString OPTIONAL
+}
+
+hierarchyLevel ATTRIBUTE ::= {
+ WITH SYNTAX HierarchyLevel
+ EQUALITY MATCHING RULE integerMatch
+ ORDERING MATCHING RULE integerOrderingMatch
+ SINGLE VALUE TRUE
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-hierarchyLevel
+}
+
+HierarchyLevel ::= INTEGER
+
+hierarchyBelow ATTRIBUTE ::= {
+ WITH SYNTAX HierarchyBelow
+ EQUALITY MATCHING RULE booleanMatch
+ SINGLE VALUE TRUE
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-hierarchyBelow
+}
+
+HierarchyBelow ::= BOOLEAN
+
+hierarchyParent ATTRIBUTE ::= {
+ WITH SYNTAX DistinguishedName
+ EQUALITY MATCHING RULE distinguishedNameMatch
+ SINGLE VALUE TRUE
+ USAGE directoryOperation
+ ID id-oa-hierarchyParent
+}
+
+hierarchyTop ATTRIBUTE ::= {
+ WITH SYNTAX DistinguishedName
+ EQUALITY MATCHING RULE distinguishedNameMatch
+ SINGLE VALUE TRUE
+ USAGE directoryOperation
+ ID id-oa-hierarchyTop
+}
+
+-- object identifier assignments
+-- object classes
+id-oc-top OBJECT IDENTIFIER ::=
+ {id-oc 0}
+
+id-oc-alias OBJECT IDENTIFIER ::= {id-oc 1}
+
+id-oc-parent OBJECT IDENTIFIER ::= {id-oc 28}
+
+id-oc-child OBJECT IDENTIFIER ::= {id-oc 29}
+
+-- attributes
+id-at-objectClass OBJECT IDENTIFIER ::= {id-at 0}
+
+id-at-aliasedEntryName OBJECT IDENTIFIER ::= {id-at 1}
+
+-- matching rules
+id-mr-objectIdentifierMatch OBJECT IDENTIFIER ::= {id-mr 0}
+
+id-mr-distinguishedNameMatch OBJECT IDENTIFIER ::= {id-mr 1}
+
+-- operational attributes
+id-oa-excludeAllCollectiveAttributes OBJECT IDENTIFIER ::=
+ {id-oa 0}
+
+id-oa-createTimestamp OBJECT IDENTIFIER ::= {id-oa 1}
+
+id-oa-modifyTimestamp OBJECT IDENTIFIER ::= {id-oa 2}
+
+id-oa-creatorsName OBJECT IDENTIFIER ::= {id-oa 3}
+
+id-oa-modifiersName OBJECT IDENTIFIER ::= {id-oa 4}
+
+id-oa-administrativeRole OBJECT IDENTIFIER ::= {id-oa 5}
+
+id-oa-subtreeSpecification OBJECT IDENTIFIER ::= {id-oa 6}
+
+id-oa-collectiveExclusions OBJECT IDENTIFIER ::= {id-oa 7}
+
+id-oa-subschemaTimestamp OBJECT IDENTIFIER ::= {id-oa 8}
+
+id-oa-hasSubordinates OBJECT IDENTIFIER ::= {id-oa 9}
+
+id-oa-subschemaSubentryList OBJECT IDENTIFIER ::= {id-oa 10}
+
+id-oa-accessControlSubentryList OBJECT IDENTIFIER ::= {id-oa 11}
+
+id-oa-collectiveAttributeSubentryList OBJECT IDENTIFIER ::= {id-oa 12}
+
+id-oa-contextDefaultSubentryList OBJECT IDENTIFIER ::= {id-oa 13}
+
+id-oa-contextAssertionDefault OBJECT IDENTIFIER ::= {id-oa 14}
+
+id-oa-serviceAdminSubentryList OBJECT IDENTIFIER ::= {id-oa 15}
+
+id-oa-searchRules OBJECT IDENTIFIER ::= {id-oa 16}
+
+id-oa-hierarchyLevel OBJECT IDENTIFIER ::= {id-oa 17}
+
+id-oa-hierarchyBelow OBJECT IDENTIFIER ::= {id-oa 18}
+
+id-oa-hierarchyParent OBJECT IDENTIFIER ::= {id-oa 19}
+
+id-oa-hierarchyTop OBJECT IDENTIFIER ::= {id-oa 20}
+
+-- subentry classes
+id-sc-subentry OBJECT IDENTIFIER ::= {id-sc 0}
+
+id-sc-accessControlSubentry OBJECT IDENTIFIER ::= {id-sc 1}
+
+id-sc-collectiveAttributeSubentry OBJECT IDENTIFIER ::= {id-sc 2}
+
+id-sc-contextAssertionSubentry OBJECT IDENTIFIER ::= {id-sc 3}
+
+id-sc-serviceAdminSubentry OBJECT IDENTIFIER ::= {id-sc 4}
+
+-- Name forms
+id-nf-subentryNameForm OBJECT IDENTIFIER ::= {id-nf 16}
+
+-- administrative roles
+id-ar-autonomousArea OBJECT IDENTIFIER ::= {id-ar 1}
+
+id-ar-accessControlSpecificArea OBJECT IDENTIFIER ::= {id-ar 2}
+
+id-ar-accessControlInnerArea OBJECT IDENTIFIER ::= {id-ar 3}
+
+id-ar-subschemaAdminSpecificArea OBJECT IDENTIFIER ::= {id-ar 4}
+
+id-ar-collectiveAttributeSpecificArea OBJECT IDENTIFIER ::= {id-ar 5}
+
+id-ar-collectiveAttributeInnerArea OBJECT IDENTIFIER ::= {id-ar 6}
+
+id-ar-contextDefaultSpecificArea OBJECT IDENTIFIER ::= {id-ar 7}
+
+id-ar-serviceSpecificArea OBJECT IDENTIFIER ::= {id-ar 8}
+
+END -- InformationFramework
diff --git a/lib/public_key/asn1/Makefile b/lib/public_key/asn1/Makefile
index 4bd043ee5d..957c332cad 100644
--- a/lib/public_key/asn1/Makefile
+++ b/lib/public_key/asn1/Makefile
@@ -40,7 +40,8 @@ RELSYSDIR = $(RELEASE_PATH)/lib/public_key-$(VSN)
ASN_TOP = OTP-PUB-KEY PKCS-FRAME
ASN_MODULES = PKIX1Explicit88 PKIX1Implicit88 PKIX1Algorithms88 \
- PKIXAttributeCertificate PKCS-1 PKCS-3 PKCS-8 PKCS5v2-0 OTP-PKIX
+ PKIXAttributeCertificate PKCS-1 PKCS-3 PKCS-7 PKCS-8 PKCS-10 PKCS5v2-0 OTP-PKIX \
+ InformationFramework
ASN_ASNS = $(ASN_MODULES:%=%.asn1)
ASN_ERLS = $(ASN_TOP:%=%.erl)
ASN_HRLS = $(ASN_TOP:%=%.hrl)
@@ -112,9 +113,12 @@ OTP-PUB-KEY.asn1db: PKIX1Algorithms88.asn1 \
PKIXAttributeCertificate.asn1 \
PKCS-1.asn1\
PKCS-3.asn1\
+ PKCS-7.asn1\
+ PKCS-10.asn1\
+ InformationFramework.asn1\
OTP-PKIX.asn1
$(EBIN)/PKCS-FRAME.beam: PKCS-FRAME.erl PKCS-FRAME.hrl
-PKCS-FRAME.erl PKCS-FRAME.hrl: PKCS-FRAME.asn1db
-PKCS-FRAME.asn1db: PKCS-8.asn1\
- PKCS5v2-0.asn1 \ No newline at end of file
+PKCS-FRAME.erl PKCS-FRAME.hrl: PKCS-FRAME.asn1db
+PKCS-FRAME.asn1db: PKCS5v2-0.asn1\
+ PKCS-8.asn1\
diff --git a/lib/public_key/asn1/OTP-PUB-KEY.set.asn b/lib/public_key/asn1/OTP-PUB-KEY.set.asn
index 5c76d13115..f8fb318c93 100644
--- a/lib/public_key/asn1/OTP-PUB-KEY.set.asn
+++ b/lib/public_key/asn1/OTP-PUB-KEY.set.asn
@@ -6,3 +6,5 @@ PKIX1Algorithms88.asn1
PKCS-1.asn1
PKCS-3.asn1
DSS.asn1
+PKCS-7.asn1
+PKCS-10.asn1
diff --git a/lib/public_key/asn1/PKCS-10.asn1 b/lib/public_key/asn1/PKCS-10.asn1
new file mode 100644
index 0000000000..333104d230
--- /dev/null
+++ b/lib/public_key/asn1/PKCS-10.asn1
@@ -0,0 +1,70 @@
+PKCS-10 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1)
+pkcs-10(10) modules(1) pkcs-10(1)}
+
+-- $Revision: 1.3 $ --
+
+DEFINITIONS IMPLICIT TAGS ::=
+
+BEGIN
+
+-- EXPORTS All --
+-- All types and values defined in this module are exported for use
+-- in other ASN.1 modules.
+
+IMPORTS
+
+--informationFramework, authenticationFramework
+-- FROM UsefulDefinitions {joint-iso-itu-t(2) ds(5) module(1)
+-- usefulDefinitions(0) 3}
+
+ ATTRIBUTE
+ FROM InformationFramework informationFramework
+
+ Name
+ FROM PKIX1Explicit88 --InformationFramework informationFramework
+
+ ALGORITHM
+ FROM PKCS-7; --AuthenticationFramework authenticationFramework;
+
+-- Certificate requests
+
+CertificationRequestInfo ::= SEQUENCE {
+ version INTEGER { v1(0) } (v1,...),
+ subject Name,
+ subjectPKInfo SubjectPublicKeyInfo-PKCS-10{{ PKInfoAlgorithms }},
+ attributes [0] Attributes{{ CRIAttributes }}
+}
+
+SubjectPublicKeyInfo-PKCS-10 {ALGORITHM: IOSet} ::= SEQUENCE {
+ algorithm AlgorithmIdentifierPKCS-10{{IOSet}},
+ subjectPublicKey BIT STRING
+}
+
+PKInfoAlgorithms ALGORITHM ::= {
+ ... -- add any locally defined algorithms here -- }
+
+Attributes { ATTRIBUTE:IOSet } ::= SET OF AttributePKCS-10{{ IOSet }}
+
+CRIAttributes ATTRIBUTE ::= {
+... -- add any locally defined attributes here -- }
+
+AttributePKCS-10 { ATTRIBUTE:IOSet } ::= SEQUENCE {
+ type ATTRIBUTE.&id({IOSet}),
+ values SET SIZE(1..MAX) OF ATTRIBUTE.&Type({IOSet}{@type})
+}
+
+CertificationRequest ::= SEQUENCE {
+ certificationRequestInfo CertificationRequestInfo,
+ signatureAlgorithm AlgorithmIdentifierPKCS-10{{ SignatureAlgorithms }},
+ signature BIT STRING
+}
+
+AlgorithmIdentifierPKCS-10 {ALGORITHM:IOSet } ::= SEQUENCE {
+ algorithm ALGORITHM.&id({IOSet}),
+ parameters ALGORITHM.&Type({IOSet}{@algorithm}) OPTIONAL
+}
+
+SignatureAlgorithms ALGORITHM ::= {
+ ... -- add any locally defined algorithms here -- }
+
+END
diff --git a/lib/public_key/asn1/PKCS-7.asn1 b/lib/public_key/asn1/PKCS-7.asn1
new file mode 100644
index 0000000000..a6dfd57d80
--- /dev/null
+++ b/lib/public_key/asn1/PKCS-7.asn1
@@ -0,0 +1,387 @@
+PKCS-7 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-7(7)
+ modules(0) pkcs-7(1)}
+
+DEFINITIONS EXPLICIT TAGS ::=
+BEGIN
+
+--
+-- 3. Definitions
+--
+
+-- EXPORTS All;
+
+IMPORTS
+
+informationFramework, authenticationFramework
+ FROM UsefulDefinitions {joint-iso-itu-t ds(5) module(1)
+ usefulDefinitions(0) 3}
+
+ ATTRIBUTE
+ FROM InformationFramework informationFramework
+
+ Name, Certificate, CertificateSerialNumber,
+ CertificateList, Time
+ FROM PKIX1Explicit88; -- AuthenticationFramework authenticationFramework;
+
+-- contentType, messageDigest, signingTime
+-- , counterSignature
+-- FROM PKCS-9 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1)
+-- pkcs-9(9) modules(0) pkcs-9(1)};
+--
+-- 6. Useful types
+--
+
+-- inlined from AuthenticationFramework
+
+ALGORITHM ::= CLASS {&Type OPTIONAL,
+ &id OBJECT IDENTIFIER UNIQUE
+}WITH SYNTAX {[&Type]
+ IDENTIFIED BY &id
+}
+
+-- inlined from PKCS-9
+
+pkcs-9 OBJECT IDENTIFIER ::= {iso(1) member-body(2) us(840)
+ rsadsi(113549) pkcs(1) 9}
+
+contentType ATTRIBUTE ::= {
+ WITH SYNTAX ContentType
+-- EQUALITY MATCHING RULE objectIdentifierMatch
+ SINGLE VALUE TRUE
+ ID pkcs-9-at-contentType
+}
+
+pkcs-9-at-contentType OBJECT IDENTIFIER ::= {pkcs-9 3}
+pkcs-9-at-messageDigest OBJECT IDENTIFIER ::= {pkcs-9 4}
+pkcs-9-at-signingTime OBJECT IDENTIFIER ::= {pkcs-9 5}
+pkcs-9-at-counterSignature OBJECT IDENTIFIER ::= {pkcs-9 6}
+
+counterSignature ATTRIBUTE ::= {
+ WITH SYNTAX SignerInfo
+ ID pkcs-9-at-counterSignature
+}
+messageDigest ATTRIBUTE ::= {
+ WITH SYNTAX MessageDigest
+-- EQUALITY MATCHING RULE octetStringMatch
+ SINGLE VALUE TRUE
+ ID pkcs-9-at-messageDigest
+}
+
+MessageDigest ::= OCTET STRING
+
+signingTime ATTRIBUTE ::= {
+ WITH SYNTAX SigningTime
+-- EQUALITY MATCHING RULE signingTimeMatch
+ SINGLE VALUE TRUE
+ ID pkcs-9-at-signingTime
+}
+
+SigningTime ::= Time -- imported from ISO/IEC 9594-8
+
+
+-- Also defined in X.509
+-- Redeclared here as a parameterized type
+AlgorithmIdentifierPKSC-7 {ALGORITHM:IOSet} ::= SEQUENCE {
+ algorithm ALGORITHM.&id({IOSet}),
+ parameters ALGORITHM.&Type({IOSet}{@algorithm}) OPTIONAL
+}
+
+-- Also defined in X.501
+-- Redeclared here as a parameterized type
+AttributePKCS-7 { ATTRIBUTE:IOSet } ::= SEQUENCE {
+ type ATTRIBUTE.&id({IOSet}),
+ values SET SIZE (1..MAX) OF ATTRIBUTE.&Type({IOSet}{@type})
+}
+
+CertificateRevocationLists ::=
+ SET OF CertificateList
+
+Certificates ::=
+ SEQUENCE OF Certificate
+
+CRLSequence ::=
+ SEQUENCE OF CertificateList
+
+ContentEncryptionAlgorithmIdentifier ::=
+ AlgorithmIdentifierPKSC-7 {{ContentEncryptionAlgorithms}}
+
+ContentEncryptionAlgorithms ALGORITHM ::= {
+ ... -- add any application-specific algorithms here
+}
+
+DigestAlgorithmIdentifier ::=
+ AlgorithmIdentifierPKSC-7 {{DigestAlgorithms}}
+
+DigestAlgorithms ALGORITHM ::= {
+ ... -- add any application-specific algorithms here
+}
+
+DigestEncryptionAlgorithmIdentifier ::=
+ AlgorithmIdentifierPKSC-7 {{DigestEncryptionAlgorithms}}
+
+DigestEncryptionAlgorithms ALGORITHM ::= {
+ ... -- add any application-specific algorithms here
+}
+
+ExtendedCertificateOrCertificate ::= CHOICE {
+ certificate Certificate, -- X.509
+ extendedCertificate [0] IMPLICIT ExtendedCertificate -- PKCS#6
+}
+
+ExtendedCertificate ::= Certificate -- cheating
+
+ExtendedCertificatesAndCertificates ::=
+ SET OF ExtendedCertificateOrCertificate
+
+IssuerAndSerialNumber ::= SEQUENCE {
+ issuer Name,
+ serialNumber CertificateSerialNumber
+}
+
+KeyEncryptionAlgorithmIdentifier ::=
+ AlgorithmIdentifierPKSC-7 {{KeyEncryptionAlgorithms}}
+
+KeyEncryptionAlgorithms ALGORITHM ::= {
+ ... -- add any application-specific algorithms here
+}
+
+--
+-- 7. General syntax
+--
+
+ContentInfo ::= SEQUENCE {
+-- contentType ContentType,
+ contentType CONTENTS.&id({Contents}),
+ content [0] EXPLICIT CONTENTS.&Type({Contents}{@contentType})
+OPTIONAL
+}
+
+CONTENTS ::= TYPE-IDENTIFIER
+
+Contents CONTENTS ::= {
+ {Data IDENTIFIED BY data} |
+ {SignedData IDENTIFIED BY signedData} |
+ {EnvelopedData IDENTIFIED BY envelopedData} |
+ {SignedAndEnvelopedData IDENTIFIED BY signedAndEnvelopedData} |
+ {DigestedData IDENTIFIED BY digestedData} |
+ {EncryptedData IDENTIFIED BY encryptedData},
+ ... -- add any application-specific types/contents here
+}
+
+ContentType ::= CONTENTS.&id({Contents})
+
+--
+-- 8. Data content type
+--
+
+Data ::= OCTET STRING
+
+--
+-- 9. Signed-data content type
+--
+
+SignedData ::= SEQUENCE {
+-- version INTEGER {sdVer1(1), sdVer2(2)} (sdVer1 | sdVer2),
+ version INTEGER {sdVer1(1), sdVer2(2)},
+ digestAlgorithms
+ DigestAlgorithmIdentifiers,
+ contentInfo ContentInfo,
+ certificates CHOICE {
+ certSet [0] IMPLICIT ExtendedCertificatesAndCertificates,
+ certSequence [2] IMPLICIT Certificates
+ } OPTIONAL,
+ crls CHOICE {
+ crlSet [1] IMPLICIT CertificateRevocationLists,
+ crlSequence [3] IMPLICIT CRLSequence
+ } OPTIONAL,
+ signerInfos SignerInfos
+} (WITH COMPONENTS { ..., version (sdVer1),
+ digestAlgorithms (WITH COMPONENTS { ..., daSet PRESENT }),
+ certificates (WITH COMPONENTS { ..., certSequence ABSENT }),
+ crls (WITH COMPONENTS { ..., crlSequence ABSENT }),
+ signerInfos (WITH COMPONENTS { ..., siSet PRESENT })
+ } |
+ WITH COMPONENTS { ..., version (sdVer2),
+ digestAlgorithms (WITH COMPONENTS { ..., daSequence PRESENT }),
+ certificates (WITH COMPONENTS { ..., certSet ABSENT }),
+ crls (WITH COMPONENTS { ..., crlSet ABSENT }),
+ signerInfos (WITH COMPONENTS { ..., siSequence PRESENT })
+})
+
+SignerInfos ::= CHOICE {
+ siSet SET OF SignerInfo,
+ siSequence SEQUENCE OF SignerInfo
+}
+
+DigestAlgorithmIdentifiers ::= CHOICE {
+ daSet SET OF DigestAlgorithmIdentifier,
+ daSequence SEQUENCE OF DigestAlgorithmIdentifier
+}
+
+SignerInfo ::= SEQUENCE {
+-- version INTEGER {siVer1(1), siVer2(2)} (siVer1 | siVer2),
+ version INTEGER {siVer1(1), siVer2(2)},
+ issuerAndSerialNumber
+ IssuerAndSerialNumber,
+ digestAlgorithm DigestAlgorithmIdentifier,
+ authenticatedAttributes CHOICE {
+ aaSet [0] IMPLICIT SET OF AttributePKCS-7 {{Authenticated}},
+ aaSequence [2] EXPLICIT SEQUENCE OF AttributePKCS-7 {{Authenticated}}
+ -- Explicit because easier to compute digest on sequence of attributes and then reuse
+ -- encoded sequence in aaSequence.
+ } OPTIONAL,
+ digestEncryptionAlgorithm
+ DigestEncryptionAlgorithmIdentifier,
+ encryptedDigest EncryptedDigest,
+ unauthenticatedAttributes CHOICE {
+ uaSet [1] IMPLICIT SET OF AttributePKCS-7 {{Unauthenticated}},
+ uaSequence [3] IMPLICIT SEQUENCE OF AttributePKCS-7 {{Unauthenticated}}
+ } OPTIONAL
+} (WITH COMPONENTS { ..., version (siVer1),
+ authenticatedAttributes (WITH COMPONENTS { ..., aaSequence ABSENT }),
+ unauthenticatedAttributes (WITH COMPONENTS { ..., uaSequence ABSENT })
+} | WITH COMPONENTS { ..., version (siVer2),
+ authenticatedAttributes (WITH COMPONENTS { ..., aaSet ABSENT }),
+ unauthenticatedAttributes (WITH COMPONENTS { ..., uaSet ABSENT })
+})
+
+Authenticated ATTRIBUTE ::= {
+ contentType |
+ messageDigest,
+ ..., -- add application-specific attributes here
+ signingTime
+}
+
+Unauthenticated ATTRIBUTE ::= {
+ contentType |
+ messageDigest,
+ ..., -- add application-specific attributes here
+ counterSignature
+-- ..., add application-specific attributes here
+-- counterSignature
+}
+
+EncryptedDigest ::= OCTET STRING
+
+DigestInfo ::= SEQUENCE {
+ digestAlgorithm DigestAlgorithmIdentifier,
+ digest Digest
+}
+
+Digest ::= OCTET STRING
+
+--
+-- 10. Enveloped-data content type
+--
+
+EnvelopedData ::= SEQUENCE {
+-- version INTEGER {edVer0(0), edVer1(1)} (edVer0 | edVer1),
+ version INTEGER {edVer0(0), edVer1(1)},
+ recipientInfos RecipientInfos,
+ encryptedContentInfo
+ EncryptedContentInfo
+} (WITH COMPONENTS { ..., version (edVer0),
+ recipientInfos (WITH COMPONENTS { ..., riSet PRESENT })
+} | WITH COMPONENTS { ..., version (edVer1),
+ recipientInfos (WITH COMPONENTS { ..., riSequence PRESENT })
+})
+
+RecipientInfos ::= CHOICE {
+ riSet SET OF RecipientInfo,
+ riSequence SEQUENCE OF RecipientInfo
+}
+
+EncryptedContentInfo ::= SEQUENCE {
+ contentType ContentType,
+ contentEncryptionAlgorithm
+ ContentEncryptionAlgorithmIdentifier,
+ encryptedContent
+ [0] IMPLICIT EncryptedContent OPTIONAL
+}
+
+EncryptedContent ::= OCTET STRING
+
+RecipientInfo ::= SEQUENCE {
+-- version INTEGER {riVer0(0)} (riVer0),
+ version INTEGER {riVer0(0)},
+ issuerAndSerialNumber
+ IssuerAndSerialNumber,
+ keyEncryptionAlgorithm
+ KeyEncryptionAlgorithmIdentifier,
+ encryptedKey EncryptedKey
+}
+
+EncryptedKey ::= OCTET STRING
+
+--
+-- 11. Signed-and-enveloped-data content type
+--
+
+SignedAndEnvelopedData ::= SEQUENCE {
+-- version INTEGER {seVer1(1), seVer2(2)} (seVer1 | seVer2),
+ version INTEGER {seVer1(1), seVer2(2)},
+ recipientInfos RecipientInfos,
+ digestAlgorithms
+ DigestAlgorithmIdentifiers,
+ encryptedContentInfo
+ EncryptedContentInfo,
+ certificates CHOICE {
+ certSet [0] IMPLICIT ExtendedCertificatesAndCertificates,
+ certSequence [2] IMPLICIT Certificates
+ } OPTIONAL,
+ crls CHOICE {
+ crlSet [1] IMPLICIT CertificateRevocationLists,
+ crlSequence [3] IMPLICIT CRLSequence
+ } OPTIONAL,
+ signerInfos SignerInfos
+} (WITH COMPONENTS { ..., version (seVer1),
+ recipientInfos (WITH COMPONENTS { ..., riSet PRESENT }),
+ digestAlgorithms (WITH COMPONENTS { ..., daSet PRESENT }),
+ certificates (WITH COMPONENTS { ..., certSequence ABSENT }),
+ crls (WITH COMPONENTS { ..., crlSequence ABSENT }),
+ signerInfos (WITH COMPONENTS { ..., siSet PRESENT })
+} |
+ WITH COMPONENTS { ..., version (seVer2),
+ recipientInfos (WITH COMPONENTS { ..., riSequence PRESENT }),
+ digestAlgorithms (WITH COMPONENTS { ..., daSequence PRESENT }),
+ certificates (WITH COMPONENTS { ..., certSet ABSENT }),
+ crls (WITH COMPONENTS { ..., crlSet ABSENT }),
+ signerInfos (WITH COMPONENTS { ..., siSequence PRESENT })
+})
+
+--
+-- 12. Digested-data content type
+--pbeWithSHAAnd3-KeyTripleDES-CBC
+
+DigestedData ::= SEQUENCE {
+-- version INTEGER {ddVer0(0)} (ddVer0),
+ version INTEGER {ddVer0(0)},
+ digestAlgorithm DigestAlgorithmIdentifier,
+ contentInfo ContentInfo,
+ digest Digest
+}
+
+--
+-- 13. Encrypted-data content type
+--
+
+EncryptedData ::= SEQUENCE {
+-- version INTEGER {edVer0(0)} (edVer0),
+ version INTEGER {edVer0(0)},
+ encryptedContentInfo EncryptedContentInfo
+}
+
+--
+-- 14. Object Identifiers
+--
+
+pkcs-7 OBJECT IDENTIFIER ::=
+ { iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 7 }
+data OBJECT IDENTIFIER ::= { pkcs-7 1 }
+signedData OBJECT IDENTIFIER ::= { pkcs-7 2 }
+envelopedData OBJECT IDENTIFIER ::= { pkcs-7 3 }
+signedAndEnvelopedData OBJECT IDENTIFIER ::= { pkcs-7 4 }
+digestedData OBJECT IDENTIFIER ::= { pkcs-7 5 }
+encryptedData OBJECT IDENTIFIER ::= { pkcs-7 6 }
+
+END
diff --git a/lib/public_key/asn1/PKIX1Explicit88.asn1 b/lib/public_key/asn1/PKIX1Explicit88.asn1
index 03e9da3e05..91758d7269 100644
--- a/lib/public_key/asn1/PKIX1Explicit88.asn1
+++ b/lib/public_key/asn1/PKIX1Explicit88.asn1
@@ -206,13 +206,12 @@ DomainComponent ::= IA5String
-- Legacy attributes
-pkcs-9 OBJECT IDENTIFIER ::=
- { iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 9 }
-
id-emailAddress AttributeType ::= { pkcs-9 1 }
EmailAddress ::= IA5String (SIZE (1..ub-emailaddress-length))
+-- Legacy attributes
+
-- naming data types --
Name ::= CHOICE { -- only one possibility for now --
diff --git a/lib/public_key/asn1/SelectedAttributeTypes.asn1 b/lib/public_key/asn1/SelectedAttributeTypes.asn1
new file mode 100644
index 0000000000..3ef7077370
--- /dev/null
+++ b/lib/public_key/asn1/SelectedAttributeTypes.asn1
@@ -0,0 +1,1575 @@
+SelectedAttributeTypes {joint-iso-itu-t ds(5) module(1)
+ selectedAttributeTypes(5) 6} DEFINITIONS ::=
+BEGIN
+
+-- EXPORTS All
+-- The types and values defined in this module are exported for use in the other ASN.1 modules contained
+-- within the Directory Specifications, and for the use of other applications which will use them to access
+-- Directory services. Other applications may use them for their own purposes, but this will not constrain
+-- extensions and modifications needed to maintain or improve the Directory service.
+IMPORTS
+ -- from ITU-T Rec. X.501 | ISO/IEC 9594-2
+ directoryAbstractService, id-at, id-avc, id-cat, id-mr, id-not, id-pr,
+ informationFramework, serviceAdministration
+ FROM UsefulDefinitions {joint-iso-itu-t ds(5) module(1)
+ usefulDefinitions(0) 6}
+ Attribute{}, ATTRIBUTE, AttributeType, AttributeValueAssertion, CONTEXT,
+ ContextAssertion, DistinguishedName, distinguishedNameMatch,
+ MAPPING-BASED-MATCHING{}, MATCHING-RULE, OBJECT-CLASS,
+ objectIdentifierMatch, SupportedAttributes
+ FROM InformationFramework informationFramework
+ AttributeCombination, ContextCombination, MRMapping
+ FROM ServiceAdministration serviceAdministration
+ -- from ITU-T Rec. X.511 | ISO/IEC 9594-3
+ FilterItem, HierarchySelections, SearchControlOptions, ServiceControlOptions
+ FROM DirectoryAbstractService directoryAbstractService
+ -- from ITU-T Rec. X.411 | ISO/IEC 10021-4
+ G3FacsimileNonBasicParameters
+ FROM MTSAbstractService {joint-iso-itu-t mhs(6) mts(3) modules(0)
+ mts-abstract-service(1) version-1999(1)};
+
+/*from IETF RFC 3727
+
+The following import is provided for information only (see 7.2.16), it is not referenced by any ASN.1 construct within these Directory Specifications. Note that the ASN.1 module in RFC 3727 imports from the InformationFramework module of edition 4 of ITU-T Rec. X.501 | ISO/IEC 9594-2. A specification importing from both these Directory Specifications and from RFC 3727 should take corrective actions, e.g., by making a copy of the ASN.1 module of
+RFC 3727 and then update the IMPORT statement.
+
+ allComponentsMatch, componentFilterMatch, directoryComponentsMatch, presentMatch, rdnMatch
+ FROM ComponentMatching {iso(1) 2 36 79672281 xed(3) module (0)
+ component-matching(4)} */
+-- Directory string type
+UnboundedDirectoryString ::= CHOICE {
+ teletexString TeletexString(SIZE (1..MAX)),
+ printableString PrintableString(SIZE (1..MAX)),
+ bmpString BMPString(SIZE (1..MAX)),
+ universalString UniversalString(SIZE (1..MAX)),
+ uTF8String UTF8String(SIZE (1..MAX))
+}
+
+DirectoryString{INTEGER:maxSize} ::= CHOICE {
+ teletexString TeletexString(SIZE (1..maxSize)),
+ printableString PrintableString(SIZE (1..maxSize)),
+ bmpString BMPString(SIZE (1..maxSize)),
+ universalString UniversalString(SIZE (1..maxSize)),
+ uTF8String UTF8String(SIZE (1..maxSize))
+}
+
+-- Attribute types
+knowledgeInformation ATTRIBUTE ::= {
+ WITH SYNTAX UnboundedDirectoryString
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ ID id-at-knowledgeInformation
+}
+
+name ATTRIBUTE ::= {
+ WITH SYNTAX UnboundedDirectoryString
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreSubstringsMatch
+ ID id-at-name
+}
+
+commonName ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-commonName
+}
+
+surname ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-surname
+}
+
+givenName ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-givenName
+}
+
+initials ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-initials
+}
+
+generationQualifier ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-generationQualifier
+}
+
+uniqueIdentifier ATTRIBUTE ::= {
+ WITH SYNTAX UniqueIdentifier
+ EQUALITY MATCHING RULE bitStringMatch
+ ID id-at-uniqueIdentifier
+}
+
+UniqueIdentifier ::= BIT STRING
+
+dnQualifier ATTRIBUTE ::= {
+ WITH SYNTAX PrintableString
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ ORDERING MATCHING RULE caseIgnoreOrderingMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreSubstringsMatch
+ ID id-at-dnQualifier
+}
+
+serialNumber ATTRIBUTE ::= {
+ WITH SYNTAX PrintableString(SIZE (1..MAX))
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreSubstringsMatch
+ ID id-at-serialNumber
+}
+
+pseudonym ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-pseudonym
+}
+
+uUIDPair ATTRIBUTE ::= {
+ WITH SYNTAX UUIDPair
+ EQUALITY MATCHING RULE uUIDPairMatch
+ ID id-at-uuidpair
+}
+
+UUIDPair ::= SEQUENCE {issuerUUID UUID,
+ subjectUUID UUID
+}
+
+UUID ::= OCTET STRING(SIZE (16)) -- UUID format only
+
+
+countryName ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX CountryName
+ SINGLE VALUE TRUE
+ ID id-at-countryName
+}
+
+CountryName ::= PrintableString(SIZE (2)) -- ISO 3166 codes only
+
+
+localityName ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-localityName
+}
+
+collectiveLocalityName ATTRIBUTE ::= {
+ SUBTYPE OF localityName
+ COLLECTIVE TRUE
+ ID id-at-collectiveLocalityName
+}
+
+stateOrProvinceName ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-stateOrProvinceName
+}
+
+collectiveStateOrProvinceName ATTRIBUTE ::= {
+ SUBTYPE OF stateOrProvinceName
+ COLLECTIVE TRUE
+ ID id-at-collectiveStateOrProvinceName
+}
+
+streetAddress ATTRIBUTE ::= {
+ WITH SYNTAX UnboundedDirectoryString
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreSubstringsMatch
+ ID id-at-streetAddress
+}
+
+collectiveStreetAddress ATTRIBUTE ::= {
+ SUBTYPE OF streetAddress
+ COLLECTIVE TRUE
+ ID id-at-collectiveStreetAddress
+}
+
+houseIdentifier ATTRIBUTE ::= {
+ WITH SYNTAX UnboundedDirectoryString
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreSubstringsMatch
+ ID id-at-houseIdentifier
+}
+
+organizationName ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-organizationName
+}
+
+collectiveOrganizationName ATTRIBUTE ::= {
+ SUBTYPE OF organizationName
+ COLLECTIVE TRUE
+ ID id-at-collectiveOrganizationName
+}
+
+organizationalUnitName ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-organizationalUnitName
+}
+
+collectiveOrganizationalUnitName ATTRIBUTE ::= {
+ SUBTYPE OF organizationalUnitName
+ COLLECTIVE TRUE
+ ID id-at-collectiveOrganizationalUnitName
+}
+
+title ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-title
+}
+
+description ATTRIBUTE ::= {
+ WITH SYNTAX UnboundedDirectoryString
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreSubstringsMatch
+ ID id-at-description
+}
+
+searchGuide ATTRIBUTE ::= {WITH SYNTAX Guide
+ ID id-at-searchGuide
+}
+
+Guide ::= SET {
+ objectClass [0] OBJECT-CLASS.&id OPTIONAL,
+ criteria [1] Criteria
+}
+
+Criteria ::= CHOICE {
+ type [0] CriteriaItem,
+ and [1] SET OF Criteria,
+ or [2] SET OF Criteria,
+ not [3] Criteria
+}
+
+CriteriaItem ::= CHOICE {
+ equality [0] AttributeType,
+ substrings [1] AttributeType,
+ greaterOrEqual [2] AttributeType,
+ lessOrEqual [3] AttributeType,
+ approximateMatch [4] AttributeType
+}
+
+enhancedSearchGuide ATTRIBUTE ::= {
+ WITH SYNTAX EnhancedGuide
+ ID id-at-enhancedSearchGuide
+}
+
+EnhancedGuide ::= SEQUENCE {
+ objectClass [0] OBJECT-CLASS.&id,
+ criteria [1] Criteria,
+ subset
+ [2] INTEGER {baseObject(0), oneLevel(1), wholeSubtree(2)} DEFAULT oneLevel
+}
+
+businessCategory ATTRIBUTE ::= {
+ WITH SYNTAX UnboundedDirectoryString
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreSubstringsMatch
+ ID id-at-businessCategory
+}
+
+postalAddress ATTRIBUTE ::= {
+ WITH SYNTAX PostalAddress
+ EQUALITY MATCHING RULE caseIgnoreListMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreListSubstringsMatch
+ ID id-at-postalAddress
+}
+
+PostalAddress ::= SEQUENCE SIZE (1..MAX) OF UnboundedDirectoryString
+
+collectivePostalAddress ATTRIBUTE ::= {
+ SUBTYPE OF postalAddress
+ COLLECTIVE TRUE
+ ID id-at-collectivePostalAddress
+}
+
+postalCode ATTRIBUTE ::= {
+ WITH SYNTAX UnboundedDirectoryString
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreSubstringsMatch
+ ID id-at-postalCode
+}
+
+collectivePostalCode ATTRIBUTE ::= {
+ SUBTYPE OF postalCode
+ COLLECTIVE TRUE
+ ID id-at-collectivePostalCode
+}
+
+postOfficeBox ATTRIBUTE ::= {
+ WITH SYNTAX UnboundedDirectoryString
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreSubstringsMatch
+ ID id-at-postOfficeBox
+}
+
+collectivePostOfficeBox ATTRIBUTE ::= {
+ SUBTYPE OF postOfficeBox
+ COLLECTIVE TRUE
+ ID id-at-collectivePostOfficeBox
+}
+
+physicalDeliveryOfficeName ATTRIBUTE ::= {
+ WITH SYNTAX UnboundedDirectoryString
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreSubstringsMatch
+ ID id-at-physicalDeliveryOfficeName
+}
+
+collectivePhysicalDeliveryOfficeName ATTRIBUTE ::= {
+ SUBTYPE OF physicalDeliveryOfficeName
+ COLLECTIVE TRUE
+ ID id-at-collectivePhysicalDeliveryOfficeName
+}
+
+telephoneNumber ATTRIBUTE ::= {
+ WITH SYNTAX TelephoneNumber
+ EQUALITY MATCHING RULE telephoneNumberMatch
+ SUBSTRINGS MATCHING RULE telephoneNumberSubstringsMatch
+ ID id-at-telephoneNumber
+}
+
+TelephoneNumber ::= PrintableString(SIZE (1..ub-telephone-number))
+
+-- String complying with ITU-T Rec. E.123 only
+ub-telephone-number INTEGER ::=
+ 32
+
+collectiveTelephoneNumber ATTRIBUTE ::= {
+ SUBTYPE OF telephoneNumber
+ COLLECTIVE TRUE
+ ID id-at-collectiveTelephoneNumber
+}
+
+telexNumber ATTRIBUTE ::= {
+ WITH SYNTAX TelexNumber
+ ID id-at-telexNumber
+}
+
+TelexNumber ::= SEQUENCE {
+ telexNumber PrintableString(SIZE (1..ub-telex-number)),
+ countryCode PrintableString(SIZE (1..ub-country-code)),
+ answerback PrintableString(SIZE (1..ub-answerback))
+}
+
+ub-telex-number INTEGER ::= 14
+
+ub-country-code INTEGER ::= 4
+
+ub-answerback INTEGER ::= 8
+
+collectiveTelexNumber ATTRIBUTE ::= {
+ SUBTYPE OF telexNumber
+ COLLECTIVE TRUE
+ ID id-at-collectiveTelexNumber
+}
+
+facsimileTelephoneNumber ATTRIBUTE ::= {
+ WITH SYNTAX FacsimileTelephoneNumber
+ EQUALITY MATCHING RULE facsimileNumberMatch
+ SUBSTRINGS MATCHING RULE facsimileNumberSubstringsMatch
+ ID id-at-facsimileTelephoneNumber
+}
+
+FacsimileTelephoneNumber ::= SEQUENCE {
+ telephoneNumber TelephoneNumber,
+ parameters G3FacsimileNonBasicParameters OPTIONAL
+}
+
+collectiveFacsimileTelephoneNumber ATTRIBUTE ::= {
+ SUBTYPE OF facsimileTelephoneNumber
+ COLLECTIVE TRUE
+ ID id-at-collectiveFacsimileTelephoneNumber
+}
+
+x121Address ATTRIBUTE ::= {
+ WITH SYNTAX X121Address
+ EQUALITY MATCHING RULE numericStringMatch
+ SUBSTRINGS MATCHING RULE numericStringSubstringsMatch
+ ID id-at-x121Address
+}
+
+X121Address ::= NumericString(SIZE (1..ub-x121-address))
+
+-- String as defined by ITU-T Rec. X.121
+ub-x121-address INTEGER ::= 15
+
+internationalISDNNumber ATTRIBUTE ::= {
+ WITH SYNTAX InternationalISDNNumber
+ EQUALITY MATCHING RULE numericStringMatch
+ SUBSTRINGS MATCHING RULE numericStringSubstringsMatch
+ ID id-at-internationalISDNNumber
+}
+
+InternationalISDNNumber ::=
+ NumericString(SIZE (1..ub-international-isdn-number))
+
+-- String complying with ITU-T Rec. E.164 only
+ub-international-isdn-number INTEGER ::=
+ 16
+
+collectiveInternationalISDNNumber ATTRIBUTE ::= {
+ SUBTYPE OF internationalISDNNumber
+ COLLECTIVE TRUE
+ ID id-at-collectiveInternationalISDNNumber
+}
+
+registeredAddress ATTRIBUTE ::= {
+ SUBTYPE OF postalAddress
+ WITH SYNTAX PostalAddress
+ ID id-at-registeredAddress
+}
+
+destinationIndicator ATTRIBUTE ::= {
+ WITH SYNTAX DestinationIndicator
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreSubstringsMatch
+ ID id-at-destinationIndicator
+}
+
+DestinationIndicator ::= PrintableString(SIZE (1..MAX))
+
+-- alphabetical characters only
+communicationsService ATTRIBUTE ::= {
+ WITH SYNTAX CommunicationsService
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ ID id-at-communicationsService
+}
+
+CommunicationsService ::= OBJECT IDENTIFIER
+
+communicationsNetwork ATTRIBUTE ::= {
+ WITH SYNTAX CommunicationsNetwork
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ SINGLE VALUE TRUE
+ ID id-at-communicationsNetwork
+}
+
+CommunicationsNetwork ::= OBJECT IDENTIFIER
+
+preferredDeliveryMethod ATTRIBUTE ::= {
+ WITH SYNTAX PreferredDeliveryMethod
+ SINGLE VALUE TRUE
+ ID id-at-preferredDeliveryMethod
+}
+
+PreferredDeliveryMethod ::=
+ SEQUENCE OF
+ INTEGER {any-delivery-method(0), mhs-delivery(1), physical-delivery(2),
+ telex-delivery(3), teletex-delivery(4), g3-facsimile-delivery(5),
+ g4-facsimile-delivery(6), ia5-terminal-delivery(7),
+ videotex-delivery(8), telephone-delivery(9)}
+
+presentationAddress ATTRIBUTE ::= {
+ WITH SYNTAX PresentationAddress
+ EQUALITY MATCHING RULE presentationAddressMatch
+ SINGLE VALUE TRUE
+ ID id-at-presentationAddress
+}
+
+PresentationAddress ::= SEQUENCE {
+ pSelector [0] OCTET STRING OPTIONAL,
+ sSelector [1] OCTET STRING OPTIONAL,
+ tSelector [2] OCTET STRING OPTIONAL,
+ nAddresses [3] SET SIZE (1..MAX) OF OCTET STRING
+}
+
+supportedApplicationContext ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT IDENTIFIER
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ ID id-at-supportedApplicationContext
+}
+
+protocolInformation ATTRIBUTE ::= {
+ WITH SYNTAX ProtocolInformation
+ EQUALITY MATCHING RULE protocolInformationMatch
+ ID id-at-protocolInformation
+}
+
+ProtocolInformation ::= SEQUENCE {
+ nAddress OCTET STRING,
+ profiles SET OF OBJECT IDENTIFIER
+}
+
+distinguishedName ATTRIBUTE ::= {
+ WITH SYNTAX DistinguishedName
+ EQUALITY MATCHING RULE distinguishedNameMatch
+ ID id-at-distinguishedName
+}
+
+member ATTRIBUTE ::= {SUBTYPE OF distinguishedName
+ ID id-at-member
+}
+
+uniqueMember ATTRIBUTE ::= {
+ WITH SYNTAX NameAndOptionalUID
+ EQUALITY MATCHING RULE uniqueMemberMatch
+ ID id-at-uniqueMember
+}
+
+NameAndOptionalUID ::= SEQUENCE {
+ dn DistinguishedName,
+ uid UniqueIdentifier OPTIONAL
+}
+
+owner ATTRIBUTE ::= {SUBTYPE OF distinguishedName
+ ID id-at-owner
+}
+
+roleOccupant ATTRIBUTE ::= {
+ SUBTYPE OF distinguishedName
+ ID id-at-roleOccupant
+}
+
+seeAlso ATTRIBUTE ::= {SUBTYPE OF distinguishedName
+ ID id-at-seeAlso
+}
+
+dmdName ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-dmdName
+}
+
+-- Attributes for tag-based identification
+tagOid ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT IDENTIFIER
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ SINGLE VALUE TRUE
+ ID id-at-tagOid
+}
+
+uiiFormat ATTRIBUTE ::= {
+ WITH SYNTAX UnboundedDirectoryString
+ SINGLE VALUE TRUE
+ ID id-at-uiiFormat
+}
+
+uiiInUrn ATTRIBUTE ::= {
+ WITH SYNTAX UTF8String
+ EQUALITY MATCHING RULE caseExactMatch
+ SINGLE VALUE TRUE
+ ID id-at-uiiInUrn
+}
+
+contentUri ATTRIBUTE ::= {
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-contentUri
+}
+
+-- Notification attributes
+dSAProblem ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT IDENTIFIER
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ ID id-not-dSAProblem
+}
+
+searchServiceProblem ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT IDENTIFIER
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ SINGLE VALUE TRUE
+ ID id-not-searchServiceProblem
+}
+
+serviceType ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT IDENTIFIER
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ SINGLE VALUE TRUE
+ ID id-not-serviceType
+}
+
+attributeTypeList ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT IDENTIFIER
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ ID id-not-attributeTypeList
+}
+
+matchingRuleList ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT IDENTIFIER
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ ID id-not-matchingRuleList
+}
+
+filterItem ATTRIBUTE ::= {
+ WITH SYNTAX FilterItem
+ ID id-not-filterItem
+}
+
+attributeCombinations ATTRIBUTE ::= {
+ WITH SYNTAX AttributeCombination
+ ID id-not-attributeCombinations
+}
+
+contextTypeList ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT IDENTIFIER
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ ID id-not-contextTypeList
+}
+
+contextList ATTRIBUTE ::= {
+ WITH SYNTAX ContextAssertion
+ ID id-not-contextList
+}
+
+contextCombinations ATTRIBUTE ::= {
+ WITH SYNTAX ContextCombination
+ ID id-not-contextCombinations
+}
+
+hierarchySelectList ATTRIBUTE ::= {
+ WITH SYNTAX HierarchySelections
+ SINGLE VALUE TRUE
+ ID id-not-hierarchySelectList
+}
+
+searchControlOptionsList ATTRIBUTE ::= {
+ WITH SYNTAX SearchControlOptions
+ SINGLE VALUE TRUE
+ ID id-not-searchControlOptionsList
+}
+
+serviceControlOptionsList ATTRIBUTE ::= {
+ WITH SYNTAX ServiceControlOptions
+ SINGLE VALUE TRUE
+ ID id-not-serviceControlOptionsList
+}
+
+multipleMatchingLocalities ATTRIBUTE ::= {
+ WITH SYNTAX MultipleMatchingLocalities
+ ID id-not-multipleMatchingLocalities
+}
+
+MultipleMatchingLocalities ::= SEQUENCE {
+ matchingRuleUsed MATCHING-RULE.&id OPTIONAL,
+ attributeList SEQUENCE OF AttributeValueAssertion
+}
+
+proposedRelaxation ATTRIBUTE ::= {
+ WITH SYNTAX MRMappings
+ ID id-not-proposedRelaxation
+}
+
+MRMappings ::= SEQUENCE OF MRMapping
+
+appliedRelaxation ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT IDENTIFIER
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ ID id-not-appliedRelaxation
+}
+
+-- Matching rules
+caseExactMatch MATCHING-RULE ::= {
+ SYNTAX UnboundedDirectoryString
+ ID id-mr-caseExactMatch
+}
+
+caseIgnoreMatch MATCHING-RULE ::= {
+ SYNTAX UnboundedDirectoryString
+ ID id-mr-caseIgnoreMatch
+}
+
+caseExactOrderingMatch MATCHING-RULE ::= {
+ SYNTAX UnboundedDirectoryString
+ ID id-mr-caseExactOrderingMatch
+}
+
+caseIgnoreOrderingMatch MATCHING-RULE ::= {
+ SYNTAX UnboundedDirectoryString
+ ID id-mr-caseIgnoreOrderingMatch
+}
+
+caseExactSubstringsMatch MATCHING-RULE ::= {
+ SYNTAX SubstringAssertion -- only the PrintableString choice
+ ID id-mr-caseExactSubstringsMatch
+}
+
+caseIgnoreSubstringsMatch MATCHING-RULE ::= {
+ SYNTAX SubstringAssertion
+ ID id-mr-caseIgnoreSubstringsMatch
+}
+
+SubstringAssertion ::=
+ SEQUENCE OF
+ CHOICE {initial [0] UnboundedDirectoryString,
+ any [1] UnboundedDirectoryString,
+ final [2] UnboundedDirectoryString,
+ control Attribute{{SupportedAttributes}}
+ } -- Used to specify interpretation of the following items
+
+-- at most one initial and one final component
+numericStringMatch MATCHING-RULE ::= {
+ SYNTAX NumericString
+ ID id-mr-numericStringMatch
+}
+
+numericStringOrderingMatch MATCHING-RULE ::= {
+ SYNTAX NumericString
+ ID id-mr-numericStringOrderingMatch
+}
+
+numericStringSubstringsMatch MATCHING-RULE ::= {
+ SYNTAX SubstringAssertion
+ ID id-mr-numericStringSubstringsMatch
+}
+
+caseIgnoreListMatch MATCHING-RULE ::= {
+ SYNTAX CaseIgnoreList
+ ID id-mr-caseIgnoreListMatch
+}
+
+CaseIgnoreList ::= SEQUENCE OF UnboundedDirectoryString
+
+caseIgnoreListSubstringsMatch MATCHING-RULE ::= {
+ SYNTAX SubstringAssertion
+ ID id-mr-caseIgnoreListSubstringsMatch
+}
+
+storedPrefixMatch MATCHING-RULE ::= {
+ SYNTAX UnboundedDirectoryString
+ ID id-mr-storedPrefixMatch
+}
+
+booleanMatch MATCHING-RULE ::= {SYNTAX BOOLEAN
+ ID id-mr-booleanMatch
+}
+
+integerMatch MATCHING-RULE ::= {SYNTAX INTEGER
+ ID id-mr-integerMatch
+}
+
+integerOrderingMatch MATCHING-RULE ::= {
+ SYNTAX INTEGER
+ ID id-mr-integerOrderingMatch
+}
+
+bitStringMatch MATCHING-RULE ::= {
+ SYNTAX BIT STRING
+ ID id-mr-bitStringMatch
+}
+
+octetStringMatch MATCHING-RULE ::= {
+ SYNTAX OCTET STRING
+ ID id-mr-octetStringMatch
+}
+
+octetStringOrderingMatch MATCHING-RULE ::= {
+ SYNTAX OCTET STRING
+ ID id-mr-octetStringOrderingMatch
+}
+
+octetStringSubstringsMatch MATCHING-RULE ::= {
+ SYNTAX OctetSubstringAssertion
+ ID id-mr-octetStringSubstringsMatch
+}
+
+OctetSubstringAssertion ::=
+ SEQUENCE OF
+ CHOICE {initial [0] OCTET STRING,
+ any [1] OCTET STRING,
+ final [2] OCTET STRING}
+
+-- at most one initial and one final component
+telephoneNumberMatch MATCHING-RULE ::= {
+ SYNTAX TelephoneNumber
+ ID id-mr-telephoneNumberMatch
+}
+
+telephoneNumberSubstringsMatch MATCHING-RULE ::= {
+ SYNTAX SubstringAssertion
+ ID id-mr-telephoneNumberSubstringsMatch
+}
+
+presentationAddressMatch MATCHING-RULE ::= {
+ SYNTAX PresentationAddress
+ ID id-mr-presentationAddressMatch
+}
+
+uniqueMemberMatch MATCHING-RULE ::= {
+ SYNTAX NameAndOptionalUID
+ ID id-mr-uniqueMemberMatch
+}
+
+protocolInformationMatch MATCHING-RULE ::= {
+ SYNTAX OCTET STRING
+ ID id-mr-protocolInformationMatch
+}
+
+facsimileNumberMatch MATCHING-RULE ::= {
+ SYNTAX TelephoneNumber
+ ID id-mr-facsimileNumberMatch
+}
+
+facsimileNumberSubstringsMatch MATCHING-RULE ::= {
+ SYNTAX SubstringAssertion
+ ID id-mr-facsimileNumberSubstringsMatch
+}
+
+uUIDPairMatch MATCHING-RULE ::= {SYNTAX UUIDPair
+ ID id-mr-uuidpairmatch
+}
+
+uTCTimeMatch MATCHING-RULE ::= {SYNTAX UTCTime
+ ID id-mr-uTCTimeMatch
+}
+
+uTCTimeOrderingMatch MATCHING-RULE ::= {
+ SYNTAX UTCTime
+ ID id-mr-uTCTimeOrderingMatch
+}
+
+generalizedTimeMatch MATCHING-RULE ::= {
+ SYNTAX GeneralizedTime
+ -- as per 46.3 b) or c) of ITU-T Rec. X.680 | ISO/IEC 8824-1
+ ID id-mr-generalizedTimeMatch
+}
+
+generalizedTimeOrderingMatch MATCHING-RULE ::= {
+ SYNTAX GeneralizedTime
+ -- as per 46.3 b) or c) of ITU-T Rec. X.680 | ISO/IEC 8824-1
+ ID id-mr-generalizedTimeOrderingMatch
+}
+
+systemProposedMatch MATCHING-RULE ::= {ID id-mr-systemProposedMatch
+}
+
+integerFirstComponentMatch MATCHING-RULE ::= {
+ SYNTAX INTEGER
+ ID id-mr-integerFirstComponentMatch
+}
+
+objectIdentifierFirstComponentMatch MATCHING-RULE ::= {
+ SYNTAX OBJECT IDENTIFIER
+ ID id-mr-objectIdentifierFirstComponentMatch
+}
+
+directoryStringFirstComponentMatch MATCHING-RULE ::= {
+ SYNTAX UnboundedDirectoryString
+ ID id-mr-directoryStringFirstComponentMatch
+}
+
+wordMatch MATCHING-RULE ::= {
+ SYNTAX UnboundedDirectoryString
+ ID id-mr-wordMatch
+}
+
+keywordMatch MATCHING-RULE ::= {
+ SYNTAX UnboundedDirectoryString
+ ID id-mr-keywordMatch
+}
+
+generalWordMatch MATCHING-RULE ::= {
+ SYNTAX SubstringAssertion
+ ID id-mr-generalWordMatch
+}
+
+sequenceMatchType ATTRIBUTE ::= {
+ WITH SYNTAX SequenceMatchType
+ SINGLE VALUE TRUE
+ ID id-cat-sequenceMatchType
+} -- defaulting to sequenceExact
+
+SequenceMatchType ::= ENUMERATED {
+ sequenceExact(0), sequenceDeletion(1), sequenceRestrictedDeletion(2),
+ sequencePermutation(3), sequencePermutationAndDeletion(4),
+ sequenceProviderDefined(5)}
+
+wordMatchTypes ATTRIBUTE ::= {
+ WITH SYNTAX WordMatchTypes
+ SINGLE VALUE TRUE
+ ID id-cat-wordMatchType
+} -- defaulting to wordExact
+
+WordMatchTypes ::= ENUMERATED {
+ wordExact(0), wordTruncated(1), wordPhonetic(2), wordProviderDefined(3)
+}
+
+characterMatchTypes ATTRIBUTE ::= {
+ WITH SYNTAX CharacterMatchTypes
+ SINGLE VALUE TRUE
+ ID id-cat-characterMatchTypes
+}
+
+CharacterMatchTypes ::= ENUMERATED {
+ characterExact(0), characterCaseIgnore(1), characterMapped(2)}
+
+selectedContexts ATTRIBUTE ::= {
+ WITH SYNTAX ContextAssertion
+ ID id-cat-selectedContexts
+}
+
+approximateStringMatch MATCHING-RULE ::= {ID id-mr-approximateStringMatch
+}
+
+ignoreIfAbsentMatch MATCHING-RULE ::= {ID id-mr-ignoreIfAbsentMatch
+}
+
+nullMatch MATCHING-RULE ::= {ID id-mr-nullMatch
+}
+
+ZONAL-MATCHING ::=
+ MAPPING-BASED-MATCHING{ZonalSelect, TRUE, ZonalResult, zonalMatch.&id}
+
+ZonalSelect ::= SEQUENCE OF AttributeType
+
+ZonalResult ::= ENUMERATED {
+ cannot-select-mapping(0), zero-mappings(2), multiple-mappings(3)}
+
+zonalMatch MATCHING-RULE ::= {
+ UNIQUE-MATCH-INDICATOR multipleMatchingLocalities
+ ID id-mr-zonalMatch
+}
+
+-- Contexts
+languageContext CONTEXT ::= {
+ WITH SYNTAX LanguageContextSyntax
+ ID id-avc-language
+}
+
+LanguageContextSyntax ::= PrintableString(SIZE (2..3)) -- ISO 639-2 codes only
+
+
+temporalContext CONTEXT ::= {
+ WITH SYNTAX TimeSpecification
+ ASSERTED AS TimeAssertion
+ ID id-avc-temporal
+}
+
+TimeSpecification ::= SEQUENCE {
+ time
+ CHOICE {absolute
+ SEQUENCE {startTime [0] GeneralizedTime OPTIONAL,
+ endTime [1] GeneralizedTime OPTIONAL},
+ periodic SET SIZE (1..MAX) OF Period},
+ notThisTime BOOLEAN DEFAULT FALSE,
+ timeZone TimeZone OPTIONAL
+}
+
+Period ::= SEQUENCE {
+ timesOfDay [0] SET SIZE (1..MAX) OF DayTimeBand OPTIONAL,
+ days
+ [1] CHOICE {intDay SET OF INTEGER,
+ bitDay
+ BIT STRING {sunday(0), monday(1), tuesday(2), wednesday(3),
+ thursday(4), friday(5), saturday(6)},
+ dayOf XDayOf} OPTIONAL,
+ weeks
+ [2] CHOICE {allWeeks NULL,
+ intWeek SET OF INTEGER,
+ bitWeek
+ BIT STRING {week1(0), week2(1), week3(2), week4(3), week5(4)}
+ } OPTIONAL,
+ months
+ [3] CHOICE {allMonths NULL,
+ intMonth SET OF INTEGER,
+ bitMonth
+ BIT STRING {january(0), february(1), march(2), april(3),
+ may(4), june(5), july(6), august(7),
+ september(8), october(9), november(10),
+ december(11)}} OPTIONAL,
+ years [4] SET OF INTEGER(1000..MAX) OPTIONAL
+}
+
+XDayOf ::= CHOICE {
+ first [1] NamedDay,
+ second [2] NamedDay,
+ third [3] NamedDay,
+ fourth [4] NamedDay,
+ fifth [5] NamedDay
+}
+
+NamedDay ::= CHOICE {
+ intNamedDays
+ ENUMERATED {sunday(1), monday(2), tuesday(3), wednesday(4), thursday(5),
+ friday(6), saturday(7)},
+ bitNamedDays
+ BIT STRING {sunday(0), monday(1), tuesday(2), wednesday(3), thursday(4),
+ friday(5), saturday(6)}
+}
+
+DayTimeBand ::= SEQUENCE {
+ startDayTime [0] DayTime DEFAULT {hour 0},
+ endDayTime [1] DayTime DEFAULT {hour 23, minute 59, second 59}
+}
+
+DayTime ::= SEQUENCE {
+ hour [0] INTEGER(0..23),
+ minute [1] INTEGER(0..59) DEFAULT 0,
+ second [2] INTEGER(0..59) DEFAULT 0
+}
+
+TimeZone ::= INTEGER(-12..12)
+
+TimeAssertion ::= CHOICE {
+ now NULL,
+ at GeneralizedTime,
+ between
+ SEQUENCE {startTime [0] GeneralizedTime,
+ endTime [1] GeneralizedTime OPTIONAL,
+ entirely BOOLEAN DEFAULT FALSE}
+}
+
+localeContext CONTEXT ::= {
+ WITH SYNTAX LocaleContextSyntax
+ ID id-avc-locale
+}
+
+LocaleContextSyntax ::= CHOICE {
+ localeID1 OBJECT IDENTIFIER,
+ localeID2 UnboundedDirectoryString
+}
+
+ldapAttributeOptionContext CONTEXT ::= {
+ WITH SYNTAX AttributeOptionList
+ ASSERTED AS AttributeOptionList
+ ABSENT-MATCH FALSE
+ ID id-avc-ldapAttributeOption
+}
+
+AttributeOptionList ::= SEQUENCE OF UTF8String
+
+-- Object identifier assignments
+-- object identifiers assigned in other modules are shown in comments
+-- Attributes
+-- id-at-objectClass OBJECT IDENTIFIER ::= {id-at 0}
+-- id-at-aliasedEntryName OBJECT IDENTIFIER ::= {id-at 1}
+-- id-at-encryptedAliasedEntryName OBJECT IDENTIFIER ::= {id-at 1 2}
+id-at-knowledgeInformation OBJECT IDENTIFIER ::=
+ {id-at 2}
+
+id-at-commonName OBJECT IDENTIFIER ::= {id-at 3}
+
+-- id-at-encryptedCommonName OBJECT IDENTIFIER ::= {id-at 3 2}
+id-at-surname OBJECT IDENTIFIER ::=
+ {id-at 4}
+
+-- id-at-encryptedSurname OBJECT IDENTIFIER ::= {id-at 4 2}
+id-at-serialNumber OBJECT IDENTIFIER ::=
+ {id-at 5}
+
+-- id-at-encryptedSerialNumbe r OBJECT IDENTIFIER ::= {id-at 5 2}
+id-at-countryName OBJECT IDENTIFIER ::=
+ {id-at 6}
+
+-- id-at-encryptedCountryName OBJECT IDENTIFIER ::= {id-at 6 2}
+id-at-localityName OBJECT IDENTIFIER ::=
+ {id-at 7}
+
+-- id-at-encryptedLocalityName OBJECT IDENTIFIER ::= {id-at 7 2}
+id-at-collectiveLocalityName OBJECT IDENTIFIER ::=
+ {id-at 7 1}
+
+-- id-at-encryptedCollectiveLocalityName OBJECT IDENTIFIER ::= {id-at 7 1 2}
+id-at-stateOrProvinceName OBJECT IDENTIFIER ::=
+ {id-at 8}
+
+-- id-at-encryptedStateOrProvinceName OBJECT IDENTIFIER ::= {id-at 8 2}
+id-at-collectiveStateOrProvinceName OBJECT IDENTIFIER ::=
+ {id-at 8 1}
+
+-- id-at-encryptedCollectiveStateOrProvinceName OBJECT IDENTIFIER ::= {id-at 8 1 2}
+id-at-streetAddress OBJECT IDENTIFIER ::=
+ {id-at 9}
+
+-- id-at-encryptedStreetAddress OBJECT IDENTIFIER ::= {id-at 9 2}
+id-at-collectiveStreetAddress OBJECT IDENTIFIER ::=
+ {id-at 9 1}
+
+-- id-at-encryptedCollectiveStreetAddress OBJECT IDENTIFIER ::= {id-at 9 1 2}
+id-at-organizationName OBJECT IDENTIFIER ::=
+ {id-at 10}
+
+-- id-at-encryptedOrganizationName OBJECT IDENTIFIER ::= {id-at 10 2}
+id-at-collectiveOrganizationName OBJECT IDENTIFIER ::=
+ {id-at 10 1}
+
+-- id-at-encryptedCollectiveOrganizationName OBJECT IDENTIFIER ::= {id-at 10 1 2}
+id-at-organizationalUnitName OBJECT IDENTIFIER ::=
+ {id-at 11}
+
+-- id-at-encryptedOrganizationalUnitName OBJECT IDENTIFIER ::= {id-at 11 2}
+id-at-collectiveOrganizationalUnitName OBJECT IDENTIFIER ::=
+ {id-at 11 1}
+
+-- id-at-encryptedCollectiveOrganizationalUnitNam OBJECT IDENTIFIER ::= {id-at 11 1 2}
+id-at-title OBJECT IDENTIFIER ::=
+ {id-at 12}
+
+-- id-at-encryptedTitle OBJECT IDENTIFIER ::= {id-at 12 2}
+id-at-description OBJECT IDENTIFIER ::=
+ {id-at 13}
+
+-- id-at-encryptedDescription OBJECT IDENTIFIER ::= {id-at 13 2}
+id-at-searchGuide OBJECT IDENTIFIER ::=
+ {id-at 14}
+
+-- id-at-encryptedSearchGuide OBJECT IDENTIFIER ::= {id-at 14 2}
+id-at-businessCategory OBJECT IDENTIFIER ::=
+ {id-at 15}
+
+-- id-at-encryptedBusinessCategory OBJECT IDENTIFIER ::= {id-at 15 2}
+id-at-postalAddress OBJECT IDENTIFIER ::=
+ {id-at 16}
+
+-- id-at-encryptedPostalAddress OBJECT IDENTIFIER ::= {id-at 16 2}
+id-at-collectivePostalAddress OBJECT IDENTIFIER ::=
+ {id-at 16 1}
+
+-- id-at-encryptedCollectivePostalAddress OBJECT IDENTIFIER ::= {id-at 16 1 2}
+id-at-postalCode OBJECT IDENTIFIER ::=
+ {id-at 17}
+
+-- id-at-encryptedPostalCode OBJECT IDENTIFIER ::= {id-at 17 2}
+id-at-collectivePostalCode OBJECT IDENTIFIER ::=
+ {id-at 17 1}
+
+-- id-at-encryptedCollectivePostalCode OBJECT IDENTIFIER ::= {id-at 17 1 2}
+id-at-postOfficeBox OBJECT IDENTIFIER ::=
+ {id-at 18}
+
+id-at-collectivePostOfficeBox OBJECT IDENTIFIER ::= {id-at 18 1}
+
+-- id-at-encryptedPostOfficeBox OBJECT IDENTIFIER ::= {id-at 18 2}
+-- id-at-encryptedCollectivePostOfficeBox OBJECT IDENTIFIER ::= {id-at 18 1 2}
+id-at-physicalDeliveryOfficeName OBJECT IDENTIFIER ::=
+ {id-at 19}
+
+id-at-collectivePhysicalDeliveryOfficeName OBJECT IDENTIFIER ::= {id-at 19 1}
+
+-- id-at-encryptedPhysicalDeliveryOfficeName OBJECT IDENTIFIER ::= {id-at 19 2}
+-- id-at-encryptedCollectivePhysicalDeliveryOfficeName OBJECT IDENTIFIER ::= {id-at 19 1 2}
+id-at-telephoneNumber OBJECT IDENTIFIER ::=
+ {id-at 20}
+
+-- id-at-encryptedTelephoneNumber OBJECT IDENTIFIER ::= {id-at 20 2}
+id-at-collectiveTelephoneNumber OBJECT IDENTIFIER ::=
+ {id-at 20 1}
+
+-- id-at-encryptedCollectiveTelephoneNumber OBJECT IDENTIFIER ::= {id-at 20 1 2}
+id-at-telexNumber OBJECT IDENTIFIER ::=
+ {id-at 21}
+
+-- id-at-encryptedTelexNumber OBJECT IDENTIFIER ::= {id-at 21 2}
+id-at-collectiveTelexNumber OBJECT IDENTIFIER ::=
+ {id-at 21 1}
+
+-- id-at-encryptedCollectiveTelexNumber OBJECT IDENTIFIER ::= {id-at 21 1 2}
+-- id-at-teletexTerminalIdentifier OBJECT IDENTIFIER ::= {id-at 22}
+-- id-at-encryptedTeletexTerminalIdentifier OBJECT IDENTIFIER ::= {id-at 22 2}
+-- id-at-collectiveTeletexTerminalIdentifier OBJECT IDENTIFIER ::= {id-at 22 1}
+-- id-at-encryptedCollectiveTeletexTerminalIdentifier OBJECT IDENTIFIER ::= {id-at 22 1 2}
+id-at-facsimileTelephoneNumber OBJECT IDENTIFIER ::=
+ {id-at 23}
+
+-- id-at-encryptedFacsimileTelephoneNumber OBJECT IDENTIFIER ::= {id-at 23 2}
+id-at-collectiveFacsimileTelephoneNumber OBJECT IDENTIFIER ::=
+ {id-at 23 1}
+
+-- id-at-encryptedCollectiveFacsimileTelephoneNumber OBJECT IDENTIFIER ::= {id-at 23 1 2}
+id-at-x121Address OBJECT IDENTIFIER ::=
+ {id-at 24}
+
+-- id-at-encryptedX121Address OBJECT IDENTIFIER ::= {id-at 24 2}
+id-at-internationalISDNNumber OBJECT IDENTIFIER ::=
+ {id-at 25}
+
+-- id-at-encryptedInternationalISDNNumber OBJECT IDENTIFIER ::= {id-at 25 2}
+id-at-collectiveInternationalISDNNumber OBJECT IDENTIFIER ::=
+ {id-at 25 1}
+
+-- id-at-encryptedCollectiveInternationalISDNNumber OBJECT IDENTIFIER ::= {id-at 25 1 2}
+id-at-registeredAddress OBJECT IDENTIFIER ::=
+ {id-at 26}
+
+-- id-at-encryptedRegisteredAddress OBJECT IDENTIFIER ::= {id-at 26 2}
+id-at-destinationIndicator OBJECT IDENTIFIER ::=
+ {id-at 27}
+
+-- id-at-encryptedDestinationIndicator OBJECT IDENTIFIER ::= {id-at 27 2}
+id-at-preferredDeliveryMethod OBJECT IDENTIFIER ::=
+ {id-at 28}
+
+-- id-at-encryptedPreferredDeliveryMethod OBJECT IDENTIFIER ::= {id-at 28 2}
+id-at-presentationAddress OBJECT IDENTIFIER ::=
+ {id-at 29}
+
+-- id-at-encryptedPresentationAddress OBJECT IDENTIFIER ::= {id-at 29 2}
+id-at-supportedApplicationContext OBJECT IDENTIFIER ::=
+ {id-at 30}
+
+-- id-at-encryptedSupportedApplicationContext OBJECT IDENTIFIER ::= {id-at 30 2}
+id-at-member OBJECT IDENTIFIER ::=
+ {id-at 31}
+
+-- id-at-encryptedMember OBJECT IDENTIFIER ::= {id-at 31 2}
+id-at-owner OBJECT IDENTIFIER ::=
+ {id-at 32}
+
+-- id-at-encryptedOwner OBJECT IDENTIFIER ::= {id-at 32 2}
+id-at-roleOccupant OBJECT IDENTIFIER ::=
+ {id-at 33}
+
+-- id-at-encryptedRoleOccupant OBJECT IDENTIFIER ::= {id-at 33 2}
+id-at-seeAlso OBJECT IDENTIFIER ::=
+ {id-at 34}
+
+-- id-at-encryptedSeeAlso OBJECT IDENTIFIER ::= {id-at 34 2}
+-- id-at-userPassword OBJECT IDENTIFIER ::= {id-at 35} X.509|Part8
+-- id-at-encryptedUserPassword OBJECT IDENTIFIER ::= {id-at 35 2}
+-- id-at-userCertificate OBJECT IDENTIFIER ::= {id-at 36} X.509|Part8
+-- id-at-encryptedUserCertificate OBJECT IDENTIFIER ::= {id-at 36 2}
+-- id-at-cACertificate OBJECT IDENTIFIER ::= {id-at 37} X.509|Part8
+-- id-at-encryptedCACertificate OBJECT IDENTIFIER ::= {id-at 37 2}
+-- id-at-authorityRevocationList OBJECT IDENTIFIER ::= {id-at 38} X.509|Part8
+-- id-at-encryptedAuthorityRevocationList OBJECT IDENTIFIER ::= {id-at 38 2}
+-- id-at-certificateRevocationList OBJECT IDENTIFIER ::= {id-at 39} X.509|Part8
+-- id-at-encryptedCertificateRevocationList OBJECT IDENTIFIER ::= {id-at 39 2}
+-- id-at-crossCertificatePair OBJECT IDENTIFIER ::= {id-at 40} X.509|Part8
+-- id-at-encryptedCrossCertificatePair OBJECT IDENTIFIER ::= {id-at 40 2}
+id-at-name OBJECT IDENTIFIER ::=
+ {id-at 41}
+
+id-at-givenName OBJECT IDENTIFIER ::= {id-at 42}
+
+-- id-at-encryptedGivenName OBJECT IDENTIFIER ::= {id-at 42 2}
+id-at-initials OBJECT IDENTIFIER ::=
+ {id-at 43}
+
+-- id-at-encryptedInitials OBJECT IDENTIFIER ::= {id-at 43 2}
+id-at-generationQualifier OBJECT IDENTIFIER ::=
+ {id-at 44}
+
+-- id-at-encryptedGenerationQualifier OBJECT IDENTIFIER ::= {id-at 44 2}
+id-at-uniqueIdentifier OBJECT IDENTIFIER ::=
+ {id-at 45}
+
+-- id-at-encryptedUniqueIdentifier OBJECT IDENTIFIER ::= {id-at 45 2}
+id-at-dnQualifier OBJECT IDENTIFIER ::=
+ {id-at 46}
+
+-- id-at-encryptedDnQualifier OBJECT IDENTIFIER ::= {id-at 46 2}
+id-at-enhancedSearchGuide OBJECT IDENTIFIER ::=
+ {id-at 47}
+
+-- id-at-encryptedEnhancedSearchGuide OBJECT IDENTIFIER ::= {id-at 47 2}
+id-at-protocolInformation OBJECT IDENTIFIER ::=
+ {id-at 48}
+
+-- id-at-encryptedProtocolInformation OBJECT IDENTIFIER ::= {id-at 48 2}
+id-at-distinguishedName OBJECT IDENTIFIER ::=
+ {id-at 49}
+
+-- id-at-encryptedDistinguishedName OBJECT IDENTIFIER ::= {id-at 49 2}
+id-at-uniqueMember OBJECT IDENTIFIER ::=
+ {id-at 50}
+
+-- id-at-encryptedUniqueMember OBJECT IDENTIFIER ::= {id-at 50 2}
+id-at-houseIdentifier OBJECT IDENTIFIER ::=
+ {id-at 51}
+
+-- id-at-encryptedHouseIdentifier OBJECT IDENTIFIER ::= {id-at 51 2}
+-- id-at-supportedAlgorithms OBJECT IDENTIFIER ::= {id-at 52} X.509|Part8
+-- id-at-encryptedSupportedAlgorithms OBJECT IDENTIFIER ::= {id-at 52 2}
+-- id-at-deltaRevocationList OBJECT IDENTIFIER ::= {id-at 53} X.509|Part8
+-- id-at-encryptedDeltaRevocationList OBJECT IDENTIFIER ::= {id-at 53 2}
+id-at-dmdName OBJECT IDENTIFIER ::=
+ {id-at 54}
+
+-- id-at-encryptedDmdName OBJECT IDENTIFIER ::= {id-at 54 2}
+-- id-at-clearance OBJECT IDENTIFIER ::= {id-at 55}
+-- id-at-encryptedClearance OBJECT IDENTIFIER ::= {id-at 55 2}
+-- id-at-defaultDirQop OBJECT IDENTIFIER ::= {id-at 56}
+-- id-at-encryptedDefaultDirQop OBJECT IDENTIFIER ::= {id-at 56 2}
+-- id-at-attributeIntegrityInfo OBJECT IDENTIFIER ::= {id-at 57}
+-- id-at-encryptedAttributeIntegrityInfo OBJECT IDENTIFIER ::= {id-at 57 2}
+-- id-at-attributeCertificate OBJECT IDENTIFIER ::= {id-at 58} X.509|Part8
+-- id-at-encryptedAttributeCertificate OBJECT IDENTIFIER ::= {id-at 58 2}
+-- id-at-attributeCertificateRevocationList OBJECT IDENTIFIER ::= {id-at 59} X.509|Part8
+-- id-at-encryptedAttributeCertificateRevocationList OBJECT IDENTIFIER ::= {id-at 59 2}
+-- id-at-confKeyInfo OBJECT IDENTIFIER ::= {id-at 60}
+-- id-at-encryptedConfKeyInfo OBJECT IDENTIFIER ::= {id-at 60 2}
+-- id-at-aACertificate OBJECT IDENTIFIER ::= {id-at 61} X.509|Part8
+-- id-at-attributeDescriptorCertificate OBJECT IDENTIFIER ::= {id-at 62} X.509|Part8
+-- id-at-attributeAuthorityRevocationList OBJECT IDENTIFIER ::= {id-at 63} X.509|Part8
+-- id-at-family-information OBJECT IDENTIFIER ::= {id-at 64}
+id-at-pseudonym OBJECT IDENTIFIER ::=
+ {id-at 65}
+
+id-at-communicationsService OBJECT IDENTIFIER ::= {id-at 66}
+
+id-at-communicationsNetwork OBJECT IDENTIFIER ::= {id-at 67}
+
+-- id-at-certificationPracticeStmt OBJECT IDENTIFIER ::= {id-at 68} X.509|Part8
+-- id-at-certificatePolicy OBJECT IDENTIFIER ::= {id-at 69} X.509|Part8
+-- id-at-pkiPath OBJECT IDENTIFIER ::= {id-at 70} X.509|Part8
+-- id-at-privPolicy OBJECT IDENTIFIER ::= {id-at 71} X.509|Part8
+-- id-at-role OBJECT IDENTIFIER ::= {id-at 72} X.509|Part8
+-- id-at-delegationPath OBJECT IDENTIFIER ::= {id-at 73} X.509|Part8
+-- id-at-protPrivPolicy OBJECT IDENTIFIER ::= {id-at 74} X.509|Part8
+-- id-at-xMLPrivilegeInfo OBJECT IDENTIFIER ::= {id-at 75} X.509|Part8
+-- id-at-xmlPrivPolicy OBJECT IDENTIFIER ::= {id-at 76} X.509|Part8
+id-at-uuidpair OBJECT IDENTIFIER ::=
+ {id-at 77}
+
+id-at-tagOid OBJECT IDENTIFIER ::= {id-at 78}
+
+id-at-uiiFormat OBJECT IDENTIFIER ::= {id-at 79}
+
+id-at-uiiInUrn OBJECT IDENTIFIER ::= {id-at 80}
+
+id-at-contentUri OBJECT IDENTIFIER ::= {id-at 81}
+
+-- id-at-permission OBJECT IDENTIFIER ::= {id-at 82} X.509|Part8
+-- Control attributes
+id-cat-sequenceMatchType OBJECT IDENTIFIER ::=
+ {id-cat 1}
+
+id-cat-wordMatchType OBJECT IDENTIFIER ::= {id-cat 2}
+
+id-cat-characterMatchTypes OBJECT IDENTIFIER ::= {id-cat 3}
+
+id-cat-selectedContexts OBJECT IDENTIFIER ::= {id-cat 4}
+
+-- Notification attributes
+id-not-dSAProblem OBJECT IDENTIFIER ::= {id-not 0}
+
+id-not-searchServiceProblem OBJECT IDENTIFIER ::= {id-not 1}
+
+id-not-serviceType OBJECT IDENTIFIER ::= {id-not 2}
+
+id-not-attributeTypeList OBJECT IDENTIFIER ::= {id-not 3}
+
+id-not-matchingRuleList OBJECT IDENTIFIER ::= {id-not 4}
+
+id-not-filterItem OBJECT IDENTIFIER ::= {id-not 5}
+
+id-not-attributeCombinations OBJECT IDENTIFIER ::= {id-not 6}
+
+id-not-contextTypeList OBJECT IDENTIFIER ::= {id-not 7}
+
+id-not-contextList OBJECT IDENTIFIER ::= {id-not 8}
+
+id-not-contextCombinations OBJECT IDENTIFIER ::= {id-not 9}
+
+id-not-hierarchySelectList OBJECT IDENTIFIER ::= {id-not 10}
+
+id-not-searchControlOptionsList OBJECT IDENTIFIER ::= {id-not 11}
+
+id-not-serviceControlOptionsList OBJECT IDENTIFIER ::= {id-not 12}
+
+id-not-multipleMatchingLocalities OBJECT IDENTIFIER ::= {id-not 13}
+
+id-not-proposedRelaxation OBJECT IDENTIFIER ::= {id-not 14}
+
+id-not-appliedRelaxation OBJECT IDENTIFIER ::= {id-not 15}
+
+-- Problem definitions
+id-pr-targetDsaUnavailable OBJECT IDENTIFIER ::=
+ {id-pr 1}
+
+id-pr-dataSourceUnavailable OBJECT IDENTIFIER ::= {id-pr 2}
+
+id-pr-unidentifiedOperation OBJECT IDENTIFIER ::= {id-pr 3}
+
+id-pr-unavailableOperation OBJECT IDENTIFIER ::= {id-pr 4}
+
+id-pr-searchAttributeViolation OBJECT IDENTIFIER ::= {id-pr 5}
+
+id-pr-searchAttributeCombinationViolation OBJECT IDENTIFIER ::= {id-pr 6}
+
+id-pr-searchValueNotAllowed OBJECT IDENTIFIER ::= {id-pr 7}
+
+id-pr-missingSearchAttribute OBJECT IDENTIFIER ::= {id-pr 8}
+
+id-pr-searchValueViolation OBJECT IDENTIFIER ::= {id-pr 9}
+
+id-pr-attributeNegationViolation OBJECT IDENTIFIER ::= {id-pr 10}
+
+id-pr-searchValueRequired OBJECT IDENTIFIER ::= {id-pr 11}
+
+id-pr-invalidSearchValue OBJECT IDENTIFIER ::= {id-pr 12}
+
+id-pr-searchContextViolation OBJECT IDENTIFIER ::= {id-pr 13}
+
+id-pr-searchContextCombinationViolation OBJECT IDENTIFIER ::= {id-pr 14}
+
+id-pr-missingSearchContext OBJECT IDENTIFIER ::= {id-pr 15}
+
+id-pr-searchContextValueViolation OBJECT IDENTIFIER ::= {id-pr 16}
+
+id-pr-searchContextValueRequired OBJECT IDENTIFIER ::= {id-pr 17}
+
+id-pr-invalidContextSearchValue OBJECT IDENTIFIER ::= {id-pr 18}
+
+id-pr-unsupportedMatchingRule OBJECT IDENTIFIER ::= {id-pr 19}
+
+id-pr-attributeMatchingViolation OBJECT IDENTIFIER ::= {id-pr 20}
+
+id-pr-unsupportedMatchingUse OBJECT IDENTIFIER ::= {id-pr 21}
+
+id-pr-matchingUseViolation OBJECT IDENTIFIER ::= {id-pr 22}
+
+id-pr-hierarchySelectForbidden OBJECT IDENTIFIER ::= {id-pr 23}
+
+id-pr-invalidHierarchySelect OBJECT IDENTIFIER ::= {id-pr 24}
+
+id-pr-unavailableHierarchySelect OBJECT IDENTIFIER ::= {id-pr 25}
+
+id-pr-invalidSearchControlOptions OBJECT IDENTIFIER ::= {id-pr 26}
+
+id-pr-invalidServiceControlOptions OBJECT IDENTIFIER ::= {id-pr 27}
+
+id-pr-searchSubsetViolation OBJECT IDENTIFIER ::= {id-pr 28}
+
+id-pr-unmatchedKeyAttributes OBJECT IDENTIFIER ::= {id-pr 29}
+
+id-pr-ambiguousKeyAttributes OBJECT IDENTIFIER ::= {id-pr 30}
+
+id-pr-unavailableRelaxationLevel OBJECT IDENTIFIER ::= {id-pr 31}
+
+id-pr-emptyHierarchySelection OBJECT IDENTIFIER ::= {id-pr 32}
+
+id-pr-administratorImposedLimit OBJECT IDENTIFIER ::= {id-pr 33}
+
+id-pr-permanentRestriction OBJECT IDENTIFIER ::= {id-pr 34}
+
+id-pr-temporaryRestriction OBJECT IDENTIFIER ::= {id-pr 35}
+
+id-pr-relaxationNotSupported OBJECT IDENTIFIER ::= {id-pr 36}
+
+-- Matching rules
+-- id-mr-objectIdentifierMatch OBJECT IDENTIFIER ::= {id-mr 0} X.501|Part2
+-- id-mr-distinguishedNameMatch OBJECT IDENTIFIER ::= {id-mr 1} X.501|Part2
+id-mr-caseIgnoreMatch OBJECT IDENTIFIER ::=
+ {id-mr 2}
+
+id-mr-caseIgnoreOrderingMatch OBJECT IDENTIFIER ::= {id-mr 3}
+
+id-mr-caseIgnoreSubstringsMatch OBJECT IDENTIFIER ::= {id-mr 4}
+
+id-mr-caseExactMatch OBJECT IDENTIFIER ::= {id-mr 5}
+
+id-mr-caseExactOrderingMatch OBJECT IDENTIFIER ::= {id-mr 6}
+
+id-mr-caseExactSubstringsMatch OBJECT IDENTIFIER ::= {id-mr 7}
+
+id-mr-numericStringMatch OBJECT IDENTIFIER ::= {id-mr 8}
+
+id-mr-numericStringOrderingMatch OBJECT IDENTIFIER ::= {id-mr 9}
+
+id-mr-numericStringSubstringsMatch OBJECT IDENTIFIER ::= {id-mr 10}
+
+id-mr-caseIgnoreListMatch OBJECT IDENTIFIER ::= {id-mr 11}
+
+id-mr-caseIgnoreListSubstringsMatch OBJECT IDENTIFIER ::= {id-mr 12}
+
+id-mr-booleanMatch OBJECT IDENTIFIER ::= {id-mr 13}
+
+id-mr-integerMatch OBJECT IDENTIFIER ::= {id-mr 14}
+
+id-mr-integerOrderingMatch OBJECT IDENTIFIER ::= {id-mr 15}
+
+id-mr-bitStringMatch OBJECT IDENTIFIER ::= {id-mr 16}
+
+id-mr-octetStringMatch OBJECT IDENTIFIER ::= {id-mr 17}
+
+id-mr-octetStringOrderingMatch OBJECT IDENTIFIER ::= {id-mr 18}
+
+id-mr-octetStringSubstringsMatch OBJECT IDENTIFIER ::= {id-mr 19}
+
+id-mr-telephoneNumberMatch OBJECT IDENTIFIER ::= {id-mr 20}
+
+id-mr-telephoneNumberSubstringsMatch OBJECT IDENTIFIER ::= {id-mr 21}
+
+id-mr-presentationAddressMatch OBJECT IDENTIFIER ::= {id-mr 22}
+
+id-mr-uniqueMemberMatch OBJECT IDENTIFIER ::= {id-mr 23}
+
+id-mr-protocolInformationMatch OBJECT IDENTIFIER ::= {id-mr 24}
+
+id-mr-uTCTimeMatch OBJECT IDENTIFIER ::= {id-mr 25}
+
+id-mr-uTCTimeOrderingMatch OBJECT IDENTIFIER ::= {id-mr 26}
+
+id-mr-generalizedTimeMatch OBJECT IDENTIFIER ::= {id-mr 27}
+
+id-mr-generalizedTimeOrderingMatch OBJECT IDENTIFIER ::= {id-mr 28}
+
+id-mr-integerFirstComponentMatch OBJECT IDENTIFIER ::= {id-mr 29}
+
+id-mr-objectIdentifierFirstComponentMatch OBJECT IDENTIFIER ::= {id-mr 30}
+
+id-mr-directoryStringFirstComponentMatch OBJECT IDENTIFIER ::= {id-mr 31}
+
+id-mr-wordMatch OBJECT IDENTIFIER ::= {id-mr 32}
+
+id-mr-keywordMatch OBJECT IDENTIFIER ::= {id-mr 33}
+
+-- id-mr-certificateExactMatch OBJECT IDENTIFIER ::= {id-mr 34} X.509|Part8
+-- id-mr-certificateMatch OBJECT IDENTIFIER ::= {id-mr 35} X.509|Part8
+-- id-mr-certificatePairExactMatch OBJECT IDENTIFIER ::= {id-mr 36} X.509|Part8
+-- id-mr-certificatePairMatch OBJECT IDENTIFIER ::= {id-mr 37} X.509|Part8
+-- id-mr-certificateListExactMatch OBJECT IDENTIFIER ::= {id-mr 38} X.509|Part8
+-- id-mr-certificateListMatch OBJECT IDENTIFIER ::= {id-mr 39} X.509|Part8
+-- id-mr-algorithmIdentifierMatch OBJECT IDENTIFIER ::= {id-mr 40} X.509|Part8
+id-mr-storedPrefixMatch OBJECT IDENTIFIER ::=
+ {id-mr 41}
+
+-- id-mr-attributeCertificateMatch OBJECT IDENTIFIER ::= {id-mr 42} X.509|Part8
+-- id-mr-readerAndKeyIDMatch OBJECT IDENTIFIER ::= {id-mr 43}
+-- id-mr-attributeIntegrityMatch OBJECT IDENTIFIER ::= {id-mr 44}
+-- id-mr-attributeCertificateExactMatch OBJECT IDENTIFIER ::= {id-mr 45} X.509|Part8
+-- id-mr-holderIssuerMatch OBJECT IDENTIFIER ::= {id-mr 46} X.509|Part8
+id-mr-systemProposedMatch OBJECT IDENTIFIER ::=
+ {id-mr 47}
+
+id-mr-generalWordMatch OBJECT IDENTIFIER ::= {id-mr 48}
+
+id-mr-approximateStringMatch OBJECT IDENTIFIER ::= {id-mr 49}
+
+id-mr-ignoreIfAbsentMatch OBJECT IDENTIFIER ::= {id-mr 50}
+
+id-mr-nullMatch OBJECT IDENTIFIER ::= {id-mr 51}
+
+id-mr-zonalMatch OBJECT IDENTIFIER ::= {id-mr 52}
+
+-- id-mr-authAttIdMatch OBJECT IDENTIFIER ::= {id-mr 53} X.509|Part8
+-- id-mr-roleSpecCertIdMatch OBJECT IDENTIFIER ::= {id-mr 54} X.509|Part8
+-- id-mr-basicAttConstraintsMatch OBJECT IDENTIFIER ::= {id-mr 55} X.509|Part8
+-- id-mr-delegatedNameConstraintsMatch OBJECT IDENTIFIER ::= {id-mr 56} X.509|Part8
+-- id-mr-timeSpecMatch OBJECT IDENTIFIER ::= {id-mr 57} X.509|Part8
+-- id-mr-attDescriptorMatch OBJECT IDENTIFIER ::= {id-mr 58} X.509|Part8
+-- id-mr-acceptableCertPoliciesMatch OBJECT IDENTIFIER ::= {id-mr 59} X.509|Part8
+-- id-mr-policyMatch OBJECT IDENTIFIER ::= {id-mr 60} X.509|Part8
+-- id-mr-delegationPathMatch OBJECT IDENTIFIER ::= {id-mr 61} X.509|Part8
+-- id-mr-pkiPathMatch OBJECT IDENTIFIER ::= {id-mr 62} X.509|Part8
+id-mr-facsimileNumberMatch OBJECT IDENTIFIER ::=
+ {id-mr 63}
+
+id-mr-facsimileNumberSubstringsMatch OBJECT IDENTIFIER ::= {id-mr 64}
+
+-- id-mr-enhancedCertificateMatch OBJECT IDENTIFIER ::= {id-mr 65} X.509|Part8
+-- id-mr-sOAIdentifierMatch OBJECT IDENTIFIER ::= {id-mr 66} X.509|Part8
+-- id-mr-extensionPresenceMatch OBJECT IDENTIFIER ::= {id-mr 67} X.509|Part8
+id-mr-uuidpairmatch OBJECT IDENTIFIER ::=
+ {id-mr 68}
+
+-- id-mr-dualStringMatch OBJECT IDENTIFIER ::= {id-mr 69} X.509|Part8
+-- contexts
+id-avc-language OBJECT IDENTIFIER ::=
+ {id-avc 0}
+
+id-avc-temporal OBJECT IDENTIFIER ::= {id-avc 1}
+
+id-avc-locale OBJECT IDENTIFIER ::= {id-avc 2}
+
+-- id-avc-attributeValueSecurityLabelContext OBJECT IDENTIFIER ::= {id-avc 3}
+-- id-avc-attributeValueIntegrityInfoContext OBJECT IDENTIFIER ::= {id-avc 4}
+id-avc-ldapAttributeOption OBJECT IDENTIFIER ::=
+ {id-avc 5}
+
+END -- SelectedAttributeTypes
diff --git a/lib/public_key/asn1/UsefulDefinitions.asn1 b/lib/public_key/asn1/UsefulDefinitions.asn1
new file mode 100644
index 0000000000..a200aac6e2
--- /dev/null
+++ b/lib/public_key/asn1/UsefulDefinitions.asn1
@@ -0,0 +1,234 @@
+UsefulDefinitions {joint-iso-itu-t ds(5) module(1) usefulDefinitions(0) 3}
+DEFINITIONS ::=
+BEGIN
+
+-- EXPORTS All -
+-- The types and values defined in this module are exported for use in the other ASN.1 modules contained
+-- within the Directory Specifications, and for the use of other applications which will use them to access
+-- Directory services. Other applications may use them for their own purposes, but this will not constrain
+-- extensions and modifications needed to maintain or improve the Directory service.
+ID ::= OBJECT IDENTIFIER
+
+ds ID ::= {joint-iso-itu-t ds(5)}
+
+-- categories of information object
+module ID ::= {ds 1}
+
+serviceElement ID ::= {ds 2}
+
+applicationContext ID ::= {ds 3}
+
+attributeType ID ::= {ds 4}
+
+attributeSyntax ID ::= {ds 5}
+
+objectClass ID ::= {ds 6}
+
+-- attributeSet ID ::= {ds 7}
+algorithm ID ::= {ds 8}
+
+abstractSyntax ID ::= {ds 9}
+
+-- object ID ::= {ds 10}
+-- port ID ::= {ds 11}
+dsaOperationalAttribute ID ::=
+ {ds 12}
+
+matchingRule ID ::= {ds 13}
+
+knowledgeMatchingRule ID ::= {ds 14}
+
+nameForm ID ::= {ds 15}
+
+group ID ::= {ds 16}
+
+subentry ID ::= {ds 17}
+
+operationalAttributeType ID ::= {ds 18}
+
+operationalBinding ID ::= {ds 19}
+
+schemaObjectClass ID ::= {ds 20}
+
+schemaOperationalAttribute ID ::= {ds 21}
+
+administrativeRoles ID ::= {ds 23}
+
+accessControlAttribute ID ::= {ds 24}
+
+rosObject ID ::= {ds 25}
+
+contract ID ::= {ds 26}
+
+package ID ::= {ds 27}
+
+accessControlSchemes ID ::= {ds 28}
+
+certificateExtension ID ::= {ds 29}
+
+managementObject ID ::= {ds 30}
+
+attributeValueContext ID ::= {ds 31}
+
+-- securityExchange ID ::= {ds 32}
+idmProtocol ID ::= {ds 33}
+
+problem ID ::= {ds 34}
+
+notification ID ::= {ds 35}
+
+matchingRestriction ID ::=
+ {ds 36} -- None are currently defined by this specification
+
+controlAttributeType ID ::= {ds 37}
+
+-- modules
+usefulDefinitions ID ::= {module usefulDefinitions(0) 3}
+
+informationFramework ID ::= {module informationFramework(1) 3}
+
+directoryAbstractService ID ::= {module directoryAbstractService(2) 3}
+
+distributedOperations ID ::= {module distributedOperations(3) 3}
+
+protocolObjectIdentifiers ID ::= {module protocolObjectIdentifiers(4) 3}
+
+selectedAttributeTypes ID ::= {module selectedAttributeTypes(5) 3}
+
+selectedObjectClasses ID ::= {module selectedObjectClasses(6) 3}
+
+authenticationFramework ID ::= {module authenticationFramework(7) 3}
+
+algorithmObjectIdentifiers ID ::= {module algorithmObjectIdentifiers(8) 3}
+
+directoryObjectIdentifiers ID ::= {module directoryObjectIdentifiers(9) 3}
+
+upperBounds ID ::= {module upperBounds(10) 3}
+
+dap ID ::= {module dap(11) 3}
+
+dsp ID ::= {module dsp(12) 3}
+
+distributedDirectoryOIDs ID ::= {module distributedDirectoryOIDs(13) 3}
+
+directoryShadowOIDs ID ::= {module directoryShadowOIDs(14) 3}
+
+directoryShadowAbstractService ID ::=
+ {module directoryShadowAbstractService(15) 3}
+
+disp ID ::= {module disp(16) 3}
+
+dop ID ::= {module dop(17) 3}
+
+opBindingManagement ID ::= {module opBindingManagement(18) 3}
+
+opBindingOIDs ID ::= {module opBindingOIDs(19) 3}
+
+hierarchicalOperationalBindings ID ::=
+ {module hierarchicalOperationalBindings(20) 3}
+
+dsaOperationalAttributeTypes ID ::= {module dsaOperationalAttributeTypes(22) 3}
+
+schemaAdministration ID ::= {module schemaAdministration(23) 3}
+
+basicAccessControl ID ::= {module basicAccessControl(24) 3}
+
+directoryOperationalBindingTypes ID ::=
+ {module directoryOperationalBindingTypes(25) 3}
+
+certificateExtensions ID ::= {module certificateExtensions(26) 0}
+
+directoryManagement ID ::= {module directoryManagement(27) 1}
+
+enhancedSecurity ID ::= {module enhancedSecurity(28) 1}
+
+iDMProtocolSpecification ID ::= {module iDMProtocolSpecification(30) 4}
+
+directoryIDMProtocols ID ::= {module directoryIDMProtocols(31) 4}
+
+-- directorySecurityExchanges ID ::= {module directorySecurityExchanges (29) 1}
+-- synonyms
+id-oc ID ::=
+ objectClass
+
+id-at ID ::= attributeType
+
+id-as ID ::= abstractSyntax
+
+id-mr ID ::= matchingRule
+
+id-nf ID ::= nameForm
+
+id-sc ID ::= subentry
+
+id-oa ID ::= operationalAttributeType
+
+id-ob ID ::= operationalBinding
+
+id-doa ID ::= dsaOperationalAttribute
+
+id-kmr ID ::= knowledgeMatchingRule
+
+id-soc ID ::= schemaObjectClass
+
+id-soa ID ::= schemaOperationalAttribute
+
+id-ar ID ::= administrativeRoles
+
+id-aca ID ::= accessControlAttribute
+
+id-ac ID ::= applicationContext
+
+id-rosObject ID ::= rosObject
+
+id-contract ID ::= contract
+
+id-package ID ::= package
+
+id-acScheme ID ::= accessControlSchemes
+
+id-ce ID ::= certificateExtension
+
+id-mgt ID ::= managementObject
+
+id-idm ID ::= idmProtocol
+
+id-avc ID ::= attributeValueContext
+
+-- id-se ID ::= securityExchange
+id-pr ID ::= problem
+
+id-not ID ::= notification
+
+id-mre ID ::= matchingRestriction
+
+id-cat ID ::= controlAttributeType
+
+-- obsolete module identifiers
+-- usefulDefinition ID ::= {module 0}
+-- informationFramework ID ::= {module 1}
+-- directoryAbstractService ID ::= {module 2}
+-- distributedOperations ID ::= {module 3}
+-- protocolObjectIdentifiers ID ::= {module 4}
+-- selectedAttributeTypes ID ::= {module 5}
+-- selectedObjectClasses ID ::= {module 6}
+-- authenticationFramework ID ::= {module 7}
+-- algorithmObjectIdentifiers ID ::= {module 8}
+-- directoryObjectIdentifiers ID ::= {module 9}
+-- upperBounds ID ::= {module 10}
+-- dap ID ::= {module 11}
+-- dsp ID ::= {module 12}
+-- distributedDirectoryObjectIdentifiers ID ::= {module 13}
+-- unused module identifiers
+-- directoryShadowOIDs ID ::= {module 14}
+-- directoryShadowAbstractService ID ::= {module 15}
+-- disp ID ::= {module 16}
+-- dop ID ::= {module 17}
+-- opBindingManagement ID ::= {module 18}
+-- opBindingOIDs ID ::= {module 19}
+-- hierarchicalOperationalBindings ID ::= {module 20}
+-- dsaOperationalAttributeTypes ID ::= {module 22}
+-- schemaAdministration ID ::= {module 23}
+-- basicAccessControl ID ::= {module 24}
+-- operationalBindingOIDs ID ::= {module 25}
+END -- UsefulDefinitions
diff --git a/lib/public_key/doc/src/cert_records.xml b/lib/public_key/doc/src/cert_records.xml
index ad4f5812cb..edef664245 100644
--- a/lib/public_key/doc/src/cert_records.xml
+++ b/lib/public_key/doc/src/cert_records.xml
@@ -5,7 +5,7 @@
<header>
<copyright>
<year>2008</year>
- <year>2011</year>
+ <year>2012</year>
<holder>Ericsson AB, All Rights Reserved</holder>
</copyright>
<legalnotice>
@@ -37,7 +37,10 @@
<p>This chapter briefly describes erlang records derived from asn1
specifications used to handle X509 certificates. The intent is to
describe the data types and not to specify the meaning of each
- component for this we refer you to RFC 5280.
+ component for this we refer you to <url
+ href="http://www.ietf.org/rfc/rfc5280.txt">RFC 5280</url>. Also
+ descirbed is <p>CertificationRequest</p> that is defined by <url
+ href=http://www.rsa.com/rsalabs/node.asp?id=2124">PKCS-10</url>.
</p>
<p>Use the following include directive to get access to the
@@ -630,6 +633,40 @@ oid names see table below. Ex: ?'id-dsa-with-sha1'</p>
aACompromise
</c></p>
</section>
-
+
+ <section>
+ <marker id="PKCS10"></marker>
+ <title>PKCS#10 Certification Request</title>
+ <code>
+#'CertificationRequest'{
+ certificationRequestInfo #'CertificationRequestInfo'{},
+ signatureAlgorithm #'CertificationRequest_signatureAlgorithm'{}}.
+ signature {0, binary()} - asn1 compact bitstring
+ }
+
+#'CertificationRequestInfo'{
+ version atom(),
+ subject {rdnSequence, [#AttributeTypeAndValue'{}]} ,
+ subjectPKInfo #'CertificationRequestInfo_subjectPKInfo'{},
+ attributes [#AttributeTypeAndValue'{}]
+ }
+
+#'CertificationRequestInfo_subjectPKInfo'{
+ algorithm #'CertificationRequestInfo_subjectPKInfo_algorithm'{}
+ subjectPublicKey {0, binary()} - asn1 compact bitstring
+ }
+
+#'CertificationRequestInfo_subjectPKInfo_algorithm'{
+ algorithm = oid(),
+ parameters = asn1_der_encoded()
+}
+
+#'CertificationRequest_signatureAlgorithm'{
+ algorithm = oid(),
+ parameters = asn1_der_encoded()
+ }
+ </code>
+ </section>
+
</section>
</chapter>
diff --git a/lib/public_key/doc/src/introduction.xml b/lib/public_key/doc/src/introduction.xml
index a21fcf3576..b1d1114a6c 100644
--- a/lib/public_key/doc/src/introduction.xml
+++ b/lib/public_key/doc/src/introduction.xml
@@ -1,11 +1,11 @@
-<?xml version="1.0" encoding="latin1" ?>
+<?xml version="1.0" encoding="iso-8859-1" ?>
<!DOCTYPE chapter SYSTEM "chapter.dtd">
<chapter>
<header>
<copyright>
<year>2008</year>
- <year>2011</year>
+ <year>2012</year>
<holder>Ericsson AB, All Rights Reserved</holder>
</copyright>
<legalnotice>
@@ -37,15 +37,15 @@
<section>
<title>Purpose</title>
<p> This application provides an API to public key infrastructure
- from RFC 3280 (X.509 certificates) and public key formats defined
- by the PKCS-standard.</p>
+ from <url href="http://www.ietf.org/rfc/rfc5280.txt">RFC
+ 5280</url> (X.509 certificates) and public key formats defined by
+ the <url href=http://www.rsa.com/rsalabs/node.asp?id=2124"> PKCS-standard</url></p>
</section>
<section>
<title>Prerequisites</title>
- <p>It is assumed that the reader is familiar with the Erlang
- programming language, concepts of OTP and has a basic understanding
- of the concepts of using public keys.</p>
+ <p>It is assumed that the reader has a basic understanding
+ of the concepts of using public keys and digital certificates.</p>
</section>
<section>
diff --git a/lib/public_key/doc/src/public_key.xml b/lib/public_key/doc/src/public_key.xml
index 5c227557f2..2ec1fcff9d 100644
--- a/lib/public_key/doc/src/public_key.xml
+++ b/lib/public_key/doc/src/public_key.xml
@@ -34,7 +34,7 @@
<modulesummary> API module for public key infrastructure.</modulesummary>
<description>
<p>This module provides functions to handle public key infrastructure
- from RFC 5280 - X.509 certificates and some parts of the PKCS-standard.
+ from <url href="http://www.ietf.org/rfc/rfc5280.txt">RFC 5280</url>- X.509 certificates and some parts of the PKCS-standard.
</p>
</description>
@@ -61,7 +61,7 @@
<p><code>string = [bytes()]</code></p>
<p><code>pki_asn1_type() = 'Certificate' | 'RSAPrivateKey'| 'RSAPublicKey'
- 'DSAPrivateKey' | 'DSAPublicKey' | 'DHParameter' | 'SubjectPublicKeyInfo'| 'PrivateKeyInfo'</code></p>
+ 'DSAPrivateKey' | 'DSAPublicKey' | 'DHParameter' | 'SubjectPublicKeyInfo'| 'PrivateKeyInfo' | 'CertificationRequest'</code></p>
<p><code>pem_entry () = {pki_asn1_type(), binary(), %% DER or encrypted DER
not_encrypted | cipher_info()} </code></p>
diff --git a/lib/public_key/include/public_key.hrl b/lib/public_key/include/public_key.hrl
index 2475295974..2dfdbbb8f3 100644
--- a/lib/public_key/include/public_key.hrl
+++ b/lib/public_key/include/public_key.hrl
@@ -78,7 +78,7 @@
-type dsa_public_key() :: {integer(), #'Dss-Parms'{}}.
-type pki_asn1_type() :: 'Certificate' | 'RSAPrivateKey' | 'RSAPublicKey'
| 'DSAPrivateKey' | 'DSAPublicKey' | 'DHParameter'
- | 'SubjectPublicKeyInfo'.
+ | 'SubjectPublicKeyInfo' | 'CertificationRequest'.
-type pem_entry() :: {pki_asn1_type(), binary(), %% DER or Encrypted DER
not_encrypted | {Cipher :: string(), Salt :: binary()}}.
-type asn1_type() :: atom(). %% see "OTP-PUB-KEY.hrl
diff --git a/lib/public_key/src/pubkey_pem.erl b/lib/public_key/src/pubkey_pem.erl
index 910473d629..4012825f20 100644
--- a/lib/public_key/src/pubkey_pem.erl
+++ b/lib/public_key/src/pubkey_pem.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -194,7 +194,12 @@ pem_start('SubjectPublicKeyInfo') ->
pem_start('DSAPrivateKey') ->
<<"-----BEGIN DSA PRIVATE KEY-----">>;
pem_start('DHParameter') ->
- <<"-----BEGIN DH PARAMETERS-----">>.
+ <<"-----BEGIN DH PARAMETERS-----">>;
+pem_start('CertificationRequest') ->
+ <<"-----BEGIN CERTIFICATE REQUEST-----">>;
+pem_start('ContentInfo') ->
+ <<"-----BEGIN PKCS7-----">>.
+
pem_end(<<"-----BEGIN CERTIFICATE-----">>) ->
<<"-----END CERTIFICATE-----">>;
pem_end(<<"-----BEGIN RSA PRIVATE KEY-----">>) ->
@@ -211,6 +216,10 @@ pem_end(<<"-----BEGIN PRIVATE KEY-----">>) ->
<<"-----END PRIVATE KEY-----">>;
pem_end(<<"-----BEGIN ENCRYPTED PRIVATE KEY-----">>) ->
<<"-----END ENCRYPTED PRIVATE KEY-----">>;
+pem_end(<<"-----BEGIN CERTIFICATE REQUEST-----">>) ->
+ <<"-----END CERTIFICATE REQUEST-----">>;
+pem_end(<<"-----BEGIN PKCS7-----">>) ->
+ <<"-----END PKCS7-----">>;
pem_end(_) ->
undefined.
@@ -229,7 +238,11 @@ asn1_type(<<"-----BEGIN DH PARAMETERS-----">>) ->
asn1_type(<<"-----BEGIN PRIVATE KEY-----">>) ->
'PrivateKeyInfo';
asn1_type(<<"-----BEGIN ENCRYPTED PRIVATE KEY-----">>) ->
- 'EncryptedPrivateKeyInfo'.
+ 'EncryptedPrivateKeyInfo';
+asn1_type(<<"-----BEGIN CERTIFICATE REQUEST-----">>) ->
+ 'CertificationRequest';
+asn1_type(<<"-----BEGIN PKCS7-----">>) ->
+ 'ContentInfo'.
pem_decrypt() ->
<<"Proc-Type: 4,ENCRYPTED">>.
diff --git a/lib/public_key/test/public_key_SUITE.erl b/lib/public_key/test/public_key_SUITE.erl
index f2f30dad6e..2b83bc0a5c 100644
--- a/lib/public_key/test/public_key_SUITE.erl
+++ b/lib/public_key/test/public_key_SUITE.erl
@@ -111,7 +111,7 @@ all() ->
groups() ->
[{pem_decode_encode, [], [dsa_pem, rsa_pem, encrypted_pem,
- dh_pem, cert_pem]},
+ dh_pem, cert_pem, pkcs10_pem]},
{ssh_public_key_decode_encode, [],
[ssh_rsa_public_key, ssh_dsa_public_key, ssh_rfc4716_rsa_comment,
ssh_rfc4716_dsa_comment, ssh_rfc4716_rsa_subject, ssh_known_hosts,
@@ -249,7 +249,42 @@ dh_pem(Config) when is_list(Config) ->
DHParameter = public_key:pem_entry_decode(Entry),
Entry = public_key:pem_entry_encode('DHParameter', DHParameter).
-
+
+%%--------------------------------------------------------------------
+
+pkcs10_pem(doc) ->
+ [""];
+pkcs10_pem(suite) ->
+ [];
+pkcs10_pem(Config) when is_list(Config) ->
+ Datadir = ?config(data_dir, Config),
+ [{'CertificationRequest', DerPKCS10, not_encrypted} = Entry] =
+ erl_make_certs:pem_to_der(filename:join(Datadir, "req.pem")),
+
+ erl_make_certs:der_to_pem(filename:join(Datadir, "new_req.pem"), [Entry]),
+
+ PKCS10 = public_key:der_decode('CertificationRequest', DerPKCS10),
+ PKCS10 = public_key:pem_entry_decode(Entry),
+
+ Entry = public_key:pem_entry_encode('CertificationRequest', PKCS10).
+
+%%--------------------------------------------------------------------
+pkcs7_pem(doc) ->
+ [""];
+pkcs7_pem(suite) ->
+ [];
+pkcs7_pem(Config) when is_list(Config) ->
+ Datadir = ?config(data_dir, Config),
+ [{'ContentInfo', DerPKCS7, not_encrypted} = Entry] =
+ erl_make_certs:pem_to_der(filename:join(Datadir, "pkcs7_cert.pem")),
+
+ erl_make_certs:der_to_pem(filename:join(Datadir, "new_pkcs7_cert.pem"), [Entry]),
+
+ PKCS7 = public_key:der_decode('ContentInfo', DerPKCS7),
+ PKCS7 = public_key:pem_entry_decode(Entry),
+
+ Entry = public_key:pem_entry_encode('ContentInfo', PKCS7).
+
%%--------------------------------------------------------------------
cert_pem(doc) ->
[""];
diff --git a/lib/public_key/test/public_key_SUITE_data/pkcs7_cert.pem b/lib/public_key/test/public_key_SUITE_data/pkcs7_cert.pem
new file mode 100644
index 0000000000..9b450a22c5
--- /dev/null
+++ b/lib/public_key/test/public_key_SUITE_data/pkcs7_cert.pem
@@ -0,0 +1,23 @@
+-----BEGIN PKCS7-----
+MIID6QYJKoZIhvcNAQcCoIID2jCCA9YCAQExADALBgkqhkiG9w0BBwGgggO8MIID
+uDCCAyGgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBgzEOMAwGA1UEAxMFb3RwQ0Ex
+EzARBgNVBAsTCkVybGFuZyBPVFAxFDASBgNVBAoTC0VyaWNzc29uIEFCMQswCQYD
+VQQGEwJTRTESMBAGA1UEBxMJU3RvY2tob2xtMSUwIwYJKoZIhvcNAQkBFhZwZXRl
+ckBlcml4LmVyaWNzc29uLnNlMB4XDTA4MDEwOTA4MjkzMFoXDTE3MTExNzA4Mjkz
+MFowgYQxDzANBgNVBAMTBnNlcnZlcjETMBEGA1UECxMKRXJsYW5nIE9UUDEUMBIG
+A1UEChMLRXJpY3Nzb24gQUIxCzAJBgNVBAYTAlNFMRIwEAYDVQQHEwlTdG9ja2hv
+bG0xJTAjBgkqhkiG9w0BCQEWFnBldGVyQGVyaXguZXJpY3Nzb24uc2UwgZ8wDQYJ
+KoZIhvcNAQEBBQADgY0AMIGJAoGBAKR20HPrkDGdiavHUyWwFEQwta2dmtF2eQZZ
+i9Xk68UJYbuU7CikHs2srkrwzj0OPIqbp/xOBNzJ7Kch0o4yO6vcEAiSCJ6AB4uS
+M742hrYW4qXgc18K6PqTwSuKr94sn3qQuo4hF/ymCxLrnSicrNpzGOz9A0Lf2+Vk
+6hV0BtdHAgMBAAGjggE3MIIBMzAJBgNVHRMEAjAAMAsGA1UdDwQEAwIF4DAdBgNV
+HQ4EFgQUi19l/qhEwHP/CUeaEjWy4GhOBRIwgbMGA1UdIwSBqzCBqIAUBquANDqk
+uHayvZ0uKOVtkd59AZuhgYykgYkwgYYxETAPBgNVBAMTCGVybGFuZ0NBMRMwEQYD
+VQQLEwpFcmxhbmcgT1RQMRQwEgYDVQQKEwtFcmljc3NvbiBBQjESMBAGA1UEBxMJ
+U3RvY2tob2xtMQswCQYDVQQGEwJTRTElMCMGCSqGSIb3DQEJARYWcGV0ZXJAZXJp
+eC5lcmljc3Nvbi5zZYIBATAhBgNVHREEGjAYgRZwZXRlckBlcml4LmVyaWNzc29u
+LnNlMCEGA1UdEgQaMBiBFnBldGVyQGVyaXguZXJpY3Nzb24uc2UwDQYJKoZIhvcN
+AQEFBQADgYEAzHGutrGMSeC3Di7Z8d65SM7jZLrkkusmL+D2oPVIOGrfZbVuyfDK
+U/nImm99z+lhC/N3JEEpB6PgAYSskfVdBL3LoxbUTaCn/+G3A/G8NfRVIYyANTBe
+NW6ueNpjnauLzcwpyXpu3vp1VBg8wBePtGTBIbRHRgtwwHRXAddE/WuhADEA
+-----END PKCS7-----
diff --git a/lib/public_key/vsn.mk b/lib/public_key/vsn.mk
index c8165fa247..b8af89d040 100644
--- a/lib/public_key/vsn.mk
+++ b/lib/public_key/vsn.mk
@@ -1 +1 @@
-PUBLIC_KEY_VSN = 0.16
+PUBLIC_KEY_VSN = 0.17
diff --git a/lib/runtime_tools/src/dyntrace.erl b/lib/runtime_tools/src/dyntrace.erl
index b4579fd5ce..f7dbef6929 100644
--- a/lib/runtime_tools/src/dyntrace.erl
+++ b/lib/runtime_tools/src/dyntrace.erl
@@ -105,7 +105,7 @@ available() ->
user_trace_s1(_Message) ->
erlang:nif_error(nif_not_loaded).
--spec user_trace_i4s4(iolist(),
+-spec user_trace_i4s4(binary() | undefined,
integer_maybe(), integer_maybe(),
integer_maybe(), integer_maybe(),
iolist_maybe(), iolist_maybe(),
@@ -115,7 +115,7 @@ user_trace_s1(_Message) ->
user_trace_i4s4(_, _, _, _, _, _, _, _, _) ->
erlang:nif_error(nif_not_loaded).
--spec user_trace_n(n_probe_label(), iolist(),
+-spec user_trace_n(n_probe_label(), binary() | undefined,
integer_maybe(), integer_maybe(),
integer_maybe(), integer_maybe(),
iolist_maybe(), iolist_maybe(),
diff --git a/lib/runtime_tools/test/dbg_SUITE.erl b/lib/runtime_tools/test/dbg_SUITE.erl
index 4071b159a1..dfae52ed1d 100644
--- a/lib/runtime_tools/test/dbg_SUITE.erl
+++ b/lib/runtime_tools/test/dbg_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -514,7 +514,7 @@ file_port_schedfix1(Config) when is_list(Config) ->
%% Cleanup
%%
?line ToBeDeleted = filelib:wildcard(FName++"*"++".wraplog"),
- ?line lists:map({file, delete}, ToBeDeleted),
+ ?line lists:map(fun file:delete/1, ToBeDeleted),
% io:format("ToBeDeleted=~p", [ToBeDeleted]),
%%
%% Present the result
diff --git a/lib/ssh/doc/src/ssh.xml b/lib/ssh/doc/src/ssh.xml
index 0133250979..aac4b462a2 100644
--- a/lib/ssh/doc/src/ssh.xml
+++ b/lib/ssh/doc/src/ssh.xml
@@ -142,6 +142,11 @@
some reason, the other algorithm is tried. The default is
to try <c><![CDATA[ssh_rsa]]></c> first.</p>
</item>
+ <tag><c><![CDATA[{pref_public_key_algs, list()}]]></c></tag>
+ <item>
+ <p>List of public key algorithms to try to use, ssh_rsa and ssh_dsa available.
+ Will override <c><![CDATA[{public_key_alg, ssh_rsa | ssh_dsa}]]></c></p>
+ </item>
<tag><c><![CDATA[{connect_timeout, timeout()}]]></c></tag>
<item>
<p>Sets a timeout on the transport layer connection. Defaults to infinity.</p>
@@ -178,13 +183,20 @@
<c><![CDATA[add_host_key/3]]></c>. This is considered
somewhat experimental and will be better documented later on.</p>
</item>
+ <tag><c><![CDATA[{quiet_mode, atom() = boolean()}]]></c></tag>
+ <item>
+ <p>If true, the client will not print out anything on authorization.</p>
+ </item>
<tag><c><![CDATA[{fd, file_descriptor()}]]></c></tag>
<item>
<p>Allow an existing file-descriptor to be used
(simply passed on to the transport protocol).</p></item>
<tag><c><![CDATA[{ip_v6_disabled, boolean()}]]></c></tag>
<item>
- <p>Determines if SSH shall use IPv6 or not.</p></item>
+ <p>Determines if SSH shall use IPv6 or not.</p></item>
+ <tag><c><![CDATA[{idle_time, timeout()}]]></c></tag>
+ <item>
+ <p>Sets a timeout on connection when no channels are active, default is infinity</p></item>
</taglist>
</desc>
</func>
diff --git a/lib/ssh/src/ssh.appup.src b/lib/ssh/src/ssh.appup.src
index d08dbafc32..6ba32e018f 100644
--- a/lib/ssh/src/ssh.appup.src
+++ b/lib/ssh/src/ssh.appup.src
@@ -19,10 +19,12 @@
{"%VSN%",
[
+ {<<"2.1.1">>, [{restart_application, ssh}]},
{<<"2.1">>, [{load_module, ssh_sftpd_file_api, soft_purge, soft_purge, []},
{load_module, ssh_connection, soft_purge, soft_purge, []},
{load_module, ssh_connection_manager, soft_purge, soft_purge, []},
{load_module, ssh_auth, soft_purge, soft_purge, []},
+ {load_module, ssh_connection_handler, soft_purge, soft_purge, []},
{load_module, ssh_channel, soft_purge, soft_purge, []},
{load_module, ssh_file, soft_purge, soft_purge, []}]},
{load_module, ssh, soft_purge, soft_purge, []}]},
@@ -30,14 +32,16 @@
{<<"1\\.*">>, [{restart_application, ssh}]}
],
[
+ {<<"2.1.1">>, [{restart_application, ssh}]},
{<<"2.1">>,[{load_module, ssh_sftpd_file_api, soft_purge, soft_purge, []},
{load_module, ssh_connection, soft_purge, soft_purge, []},
{load_module, ssh_connection_manager, soft_purge, soft_purge, []},
{load_module, ssh_auth, soft_purge, soft_purge, []},
+ {load_module, ssh_connection_handler, soft_purge, soft_purge, []},
{load_module, ssh_channel, soft_purge, soft_purge, []},
{load_module, ssh_file, soft_purge, soft_purge, []}]},
{load_module, ssh, soft_purge, soft_purge, []}]},
{<<"2.0\\.*">>, [{restart_application, ssh}]},
{<<"1\\.*">>, [{restart_application, ssh}]}
]
-}.
+}. \ No newline at end of file
diff --git a/lib/ssh/src/ssh.erl b/lib/ssh/src/ssh.erl
index 3395f73884..a569298056 100644
--- a/lib/ssh/src/ssh.erl
+++ b/lib/ssh/src/ssh.erl
@@ -79,7 +79,7 @@ connect(Host, Port, Options, Timeout) ->
DisableIpv6 = proplists:get_value(ip_v6_disabled, SshOptions, false),
Inet = inetopt(DisableIpv6),
do_connect(Host, Port, [Inet | SocketOptions],
- [{host, Host} | SshOptions], Timeout, DisableIpv6)
+ [{user_pid, self()}, {host, Host} | fix_idle_time(SshOptions)], Timeout, DisableIpv6)
end.
do_connect(Host, Port, SocketOptions, SshOptions, Timeout, DisableIpv6) ->
@@ -91,30 +91,39 @@ do_connect(Host, Port, SocketOptions, SshOptions, Timeout, DisableIpv6) ->
{ok, ConnectionSup} ->
{ok, Manager} =
ssh_connection_sup:connection_manager(ConnectionSup),
- receive
- {Manager, is_connected} ->
- {ok, Manager};
- %% When the connection fails
- %% ssh_connection_sup:connection_manager
- %% might return undefined as the connection manager
- %% could allready have terminated, so we will not
- %% match the Manager in this case
- {_, not_connected, {error, econnrefused}} when DisableIpv6 == false ->
- do_connect(Host, Port, proplists:delete(inet6, SocketOptions),
- SshOptions, Timeout, true);
- {_, not_connected, {error, Reason}} ->
- {error, Reason};
- {_, not_connected, Other} ->
- {error, Other}
- after Timeout ->
- ssh_connection_manager:stop(Manager),
- {error, timeout}
- end
+ msg_loop(Manager, DisableIpv6, Host, Port, SocketOptions, SshOptions, Timeout)
catch
exit:{noproc, _} ->
{error, ssh_not_started}
end.
-
+msg_loop(Manager, DisableIpv6, Host, Port, SocketOptions, SshOptions, Timeout) ->
+ receive
+ {Manager, is_connected} ->
+ {ok, Manager};
+ %% When the connection fails
+ %% ssh_connection_sup:connection_manager
+ %% might return undefined as the connection manager
+ %% could allready have terminated, so we will not
+ %% match the Manager in this case
+ {_, not_connected, {error, econnrefused}} when DisableIpv6 == false ->
+ do_connect(Host, Port, proplists:delete(inet6, SocketOptions),
+ SshOptions, Timeout, true);
+ {_, not_connected, {error, Reason}} ->
+ {error, Reason};
+ {_, not_connected, Other} ->
+ {error, Other};
+ {From, user_password} ->
+ Pass = io:get_password(),
+ From ! Pass,
+ msg_loop(Manager, DisableIpv6, Host, Port, SocketOptions, SshOptions, Timeout);
+ {From, question} ->
+ Answer = io:get_line(""),
+ From ! Answer,
+ msg_loop(Manager, DisableIpv6, Host, Port, SocketOptions, SshOptions, Timeout)
+ after Timeout ->
+ ssh_connection_manager:stop(Manager),
+ {error, timeout}
+ end.
%%--------------------------------------------------------------------
%% Function: close(ConnectionRef) -> ok
%%
@@ -237,6 +246,13 @@ shell(Host, Port, Options) ->
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
+fix_idle_time(SshOptions) ->
+ case proplists:get_value(idle_time, SshOptions) of
+ undefined ->
+ [{idle_time, infinity}|SshOptions];
+ _ ->
+ SshOptions
+ end.
start_daemon(Host, Port, Options, Inet) ->
case handle_options(Options) of
{error, _Reason} = Error ->
@@ -342,6 +358,12 @@ handle_option([{exec, _} = Opt | Rest], SocketOptions, SshOptions) ->
handle_option(Rest, SocketOptions, [handle_ssh_option(Opt) | SshOptions]);
handle_option([{auth_methods, _} = Opt | Rest], SocketOptions, SshOptions) ->
handle_option(Rest, SocketOptions, [handle_ssh_option(Opt) | SshOptions]);
+handle_option([{pref_public_key_algs, _} = Opt | Rest], SocketOptions, SshOptions) ->
+ handle_option(Rest, SocketOptions, [handle_ssh_option(Opt) | SshOptions]);
+handle_option([{quiet_mode, _} = Opt|Rest], SocketOptions, SshOptions) ->
+ handle_option(Rest, SocketOptions, [handle_ssh_option(Opt) | SshOptions]);
+handle_option([{idle_time, _} = Opt | Rest], SocketOptions, SshOptions) ->
+ handle_option(Rest, SocketOptions, [handle_ssh_option(Opt) | SshOptions]);
handle_option([Opt | Rest], SocketOptions, SshOptions) ->
handle_option(Rest, [handle_inet_option(Opt) | SocketOptions], SshOptions).
@@ -357,6 +379,13 @@ handle_ssh_option({user_interaction, Value} = Opt) when Value == true; Value ==
Opt;
handle_ssh_option({public_key_alg, Value} = Opt) when Value == ssh_rsa; Value == ssh_dsa ->
Opt;
+handle_ssh_option({pref_public_key_algs, Value} = Opt) when is_list(Value), length(Value) >= 1 ->
+ case check_pref_algs(Value) of
+ true ->
+ Opt;
+ _ ->
+ throw({error, {eoptions, Opt}})
+ end;
handle_ssh_option({connect_timeout, Value} = Opt) when is_integer(Value); Value == infinity ->
Opt;
handle_ssh_option({user, Value} = Opt) when is_list(Value) ->
@@ -407,6 +436,11 @@ handle_ssh_option({shell, {Module, Function, _}} = Opt) when is_atom(Module),
Opt;
handle_ssh_option({shell, Value} = Opt) when is_function(Value) ->
Opt;
+handle_ssh_option({quiet_mode, Value} = Opt) when Value == true;
+ Value == false ->
+ Opt;
+handle_ssh_option({idle_time, Value} = Opt) when is_integer(Value), Value > 0 ->
+ Opt;
handle_ssh_option(Opt) ->
throw({error, {eoptions, Opt}}).
@@ -424,7 +458,18 @@ handle_inet_option({reuseaddr, _} = Opt) ->
%% Option verified by inet
handle_inet_option(Opt) ->
Opt.
-
+%% Check preferred algs
+check_pref_algs([]) ->
+ true;
+check_pref_algs([H|T]) ->
+ case H of
+ ssh_dsa ->
+ check_pref_algs(T);
+ ssh_rsa ->
+ check_pref_algs(T);
+ _ ->
+ false
+ end.
%% Has IPv6 been disabled?
inetopt(true) ->
inet;
diff --git a/lib/ssh/src/ssh_auth.erl b/lib/ssh/src/ssh_auth.erl
index aa452a8e09..c436793dc4 100644
--- a/lib/ssh/src/ssh_auth.erl
+++ b/lib/ssh/src/ssh_auth.erl
@@ -71,7 +71,7 @@ password_msg([#ssh{opts = Opts, io_cb = IoCb,
ssh_bits:install_messages(userauth_passwd_messages()),
Password = case proplists:get_value(password, Opts) of
undefined ->
- user_interaction(IoCb);
+ user_interaction(IoCb, Ssh);
PW ->
PW
end,
@@ -89,10 +89,10 @@ password_msg([#ssh{opts = Opts, io_cb = IoCb,
Ssh)
end.
-user_interaction(ssh_no_io) ->
+user_interaction(ssh_no_io, _) ->
not_ok;
-user_interaction(IoCb) ->
- IoCb:read_password("ssh password: ").
+user_interaction(IoCb, Ssh) ->
+ IoCb:read_password("ssh password: ", Ssh).
%% See RFC 4256 for info on keyboard-interactive
@@ -118,15 +118,37 @@ init_userauth_request_msg(#ssh{opts = Opts} = Ssh) ->
service = "ssh-connection",
method = "none",
data = <<>>},
- FirstAlg = algorithm(proplists:get_value(public_key_alg, Opts,
- ?PREFERRED_PK_ALG)),
- SecondAlg = other_alg(FirstAlg),
- AllowUserInt = proplists:get_value(user_interaction, Opts, true),
- Prefs = method_preference(FirstAlg, SecondAlg, AllowUserInt),
- ssh_transport:ssh_packet(Msg, Ssh#ssh{user = User,
- userauth_preference = Prefs,
- userauth_methods = none,
- service = "ssh-connection"});
+ case proplists:get_value(pref_public_key_algs, Opts, false) of
+ false ->
+ FirstAlg = algorithm(proplists:get_value(public_key_alg, Opts,
+ ?PREFERRED_PK_ALG)),
+ SecondAlg = other_alg(FirstAlg),
+ AllowUserInt = proplists:get_value(user_interaction, Opts, true),
+ Prefs = method_preference(FirstAlg, SecondAlg, AllowUserInt),
+ ssh_transport:ssh_packet(Msg, Ssh#ssh{user = User,
+ userauth_preference = Prefs,
+ userauth_methods = none,
+ service = "ssh-connection"});
+ Algs ->
+ FirstAlg = algorithm(lists:nth(1, Algs)),
+ case length(Algs) =:= 2 of
+ true ->
+ SecondAlg = other_alg(FirstAlg),
+ AllowUserInt = proplists:get_value(user_interaction, Opts, true),
+ Prefs = method_preference(FirstAlg, SecondAlg, AllowUserInt),
+ ssh_transport:ssh_packet(Msg, Ssh#ssh{user = User,
+ userauth_preference = Prefs,
+ userauth_methods = none,
+ service = "ssh-connection"});
+ _ ->
+ AllowUserInt = proplists:get_value(user_interaction, Opts, true),
+ Prefs = method_preference(FirstAlg, AllowUserInt),
+ ssh_transport:ssh_packet(Msg, Ssh#ssh{user = User,
+ userauth_preference = Prefs,
+ userauth_methods = none,
+ service = "ssh-connection"})
+ end
+ end;
{error, no_user} ->
ErrStr = "Could not determine the users name",
throw(#ssh_msg_disconnect{code = ?SSH_DISCONNECT_ILLEGAL_USER_NAME,
@@ -287,6 +309,15 @@ method_preference(Alg1, Alg2, false) ->
{"publickey", ?MODULE, publickey_msg,[Alg2]},
{"password", ?MODULE, password_msg, []}
].
+method_preference(Alg1, true) ->
+ [{"publickey", ?MODULE, publickey_msg, [Alg1]},
+ {"password", ?MODULE, password_msg, []},
+ {"keyboard-interactive", ?MODULE, keyboard_interactive_msg, []}
+ ];
+method_preference(Alg1, false) ->
+ [{"publickey", ?MODULE, publickey_msg, [Alg1]},
+ {"password", ?MODULE, password_msg, []}
+ ].
user_name(Opts) ->
Env = case os:type() of
@@ -370,11 +401,11 @@ keyboard_interact_get_responses(IoCb, Opts, Name, Instr, PromptInfos) ->
%% Special case/fallback for just one prompt
%% (assumed to be the password prompt)
case proplists:get_value(password, Opts) of
- undefined -> keyboard_interact(IoCb, Name, Instr, PromptInfos);
+ undefined -> keyboard_interact(IoCb, Name, Instr, PromptInfos, Opts);
PW -> [PW]
end;
undefined ->
- keyboard_interact(IoCb, Name, Instr, PromptInfos);
+ keyboard_interact(IoCb, Name, Instr, PromptInfos, Opts);
KbdInteractFun ->
Prompts = lists:map(fun({Prompt, _Echo}) -> Prompt end,
PromptInfos),
@@ -388,15 +419,15 @@ keyboard_interact_get_responses(IoCb, Opts, Name, Instr, PromptInfos) ->
end
end.
-keyboard_interact(IoCb, Name, Instr, Prompts) ->
+keyboard_interact(IoCb, Name, Instr, Prompts, Opts) ->
if Name /= "" -> IoCb:format("~s", [Name]);
true -> ok
end,
if Instr /= "" -> IoCb:format("~s", [Instr]);
true -> ok
end,
- lists:map(fun({Prompt, true}) -> IoCb:read_line(Prompt);
- ({Prompt, false}) -> IoCb:read_password(Prompt)
+ lists:map(fun({Prompt, true}) -> IoCb:read_line(Prompt, Opts);
+ ({Prompt, false}) -> IoCb:read_password(Prompt, Opts)
end,
Prompts).
diff --git a/lib/ssh/src/ssh_connection_handler.erl b/lib/ssh/src/ssh_connection_handler.erl
index 5b3d1b8a1b..d8950a7b67 100644
--- a/lib/ssh/src/ssh_connection_handler.erl
+++ b/lib/ssh/src/ssh_connection_handler.erl
@@ -718,8 +718,18 @@ init_ssh(server = Role, Vsn, Version, Options, Socket) ->
available_host_keys = supported_host_keys(Role, KeyCb, Options)
}.
-supported_host_keys(client, _, _) ->
- ["ssh-rsa", "ssh-dss"];
+supported_host_keys(client, _, Options) ->
+ try
+ case extract_algs(proplists:get_value(pref_public_key_algs, Options, false), []) of
+ false ->
+ ["ssh-rsa", "ssh-dss"];
+ Algs ->
+ Algs
+ end
+ catch
+ exit:Reason ->
+ {stop, {shutdown, Reason}}
+ end;
supported_host_keys(server, KeyCb, Options) ->
lists:foldl(fun(Type, Acc) ->
case available_host_key(KeyCb, Type, Options) of
@@ -731,7 +741,19 @@ supported_host_keys(server, KeyCb, Options) ->
end, [],
%% Prefered alg last so no need to reverse
["ssh-dss", "ssh-rsa"]).
-
+extract_algs(false, _) ->
+ false;
+extract_algs([],[]) ->
+ false;
+extract_algs([], NewList) ->
+ lists:reverse(NewList);
+extract_algs([H|T], NewList) ->
+ case H of
+ ssh_dsa ->
+ extract_algs(T, ["ssh-dss"|NewList]);
+ ssh_rsa ->
+ extract_algs(T, ["ssh-rsa"|NewList])
+ end.
available_host_key(KeyCb, "ssh-dss"= Alg, Opts) ->
case KeyCb:host_key('ssh-dss', Opts) of
{ok, _} ->
diff --git a/lib/ssh/src/ssh_connection_manager.erl b/lib/ssh/src/ssh_connection_manager.erl
index 422d9356d5..0c1eee5186 100644
--- a/lib/ssh/src/ssh_connection_manager.erl
+++ b/lib/ssh/src/ssh_connection_manager.erl
@@ -62,6 +62,7 @@
latest_channel_id = 0,
opts,
channel_args,
+ idle_timer_ref, % timerref
connected
}).
@@ -203,6 +204,8 @@ init([client, Opts]) ->
ChannelPid = proplists:get_value(channel_pid, Opts),
self() !
{start_connection, client, [Parent, Address, Port, SocketOpts, Options]},
+ TimerRef = get_idle_time(Options),
+
{ok, #state{role = client,
client = ChannelPid,
connection_state = #connection{channel_cache = Cache,
@@ -211,6 +214,7 @@ init([client, Opts]) ->
connection_supervisor = Parent,
requests = []},
opts = Opts,
+ idle_timer_ref = TimerRef,
connected = false}}.
%%--------------------------------------------------------------------
@@ -230,6 +234,13 @@ handle_call({request, ChannelPid, ChannelId, Type, Data}, From, State0) ->
%% channel is sent later when reply arrives from the connection
%% handler.
lists:foreach(fun send_msg/1, Replies),
+ SshOpts = proplists:get_value(ssh_opts, State0#state.opts),
+ case proplists:get_value(idle_time, SshOpts) of
+ infinity ->
+ ok;
+ _IdleTime ->
+ erlang:send_after(5000, self(), {check_cache, [], []})
+ end,
{noreply, State};
handle_call({request, ChannelId, Type, Data}, From, State0) ->
@@ -358,7 +369,7 @@ handle_call({open, ChannelPid, Type, InitialWindowSize, MaxPacketSize, Data},
recv_packet_size = MaxPacketSize},
ssh_channel:cache_update(Cache, Channel),
State = add_request(true, ChannelId, From, State1),
- {noreply, State};
+ {noreply, remove_timer_ref(State)};
handle_call({send_window, ChannelId}, _From,
#state{connection_state =
@@ -403,6 +414,13 @@ handle_call({close, ChannelId}, _,
send_msg({connection_reply, Pid,
ssh_connection:channel_close_msg(Id)}),
ssh_channel:cache_update(Cache, Channel#channel{sent_close = true}),
+ SshOpts = proplists:get_value(ssh_opts, State#state.opts),
+ case proplists:get_value(idle_time, SshOpts) of
+ infinity ->
+ ok;
+ _IdleTime ->
+ erlang:send_after(5000, self(), {check_cache, [], []})
+ end,
{reply, ok, State};
undefined ->
{reply, ok, State}
@@ -523,7 +541,10 @@ handle_info({start_connection, client,
Pid ! {self(), not_connected, Reason},
{stop, {shutdown, normal}, State}
end;
-
+handle_info({check_cache, _ , _},
+ #state{connection_state =
+ #connection{channel_cache = Cache}} = State) ->
+ {noreply, check_cache(State, Cache)};
handle_info({ssh_cm, _Sender, Msg}, State0) ->
%% Backwards compatibility!
State = cm_message(Msg, State0),
@@ -536,7 +557,7 @@ handle_info({same_user, _}, State) ->
handle_info(ssh_connected, #state{role = client, client = Pid}
= State) ->
Pid ! {self(), is_connected},
- {noreply, State#state{connected = true}};
+ {noreply, State#state{connected = true, opts = handle_password(State#state.opts)}};
handle_info(ssh_connected, #state{role = server} = State) ->
{noreply, State#state{connected = true}};
@@ -549,6 +570,47 @@ handle_info({'DOWN', _Ref, process, ChannelPid, _Reason}, State) ->
handle_info({'EXIT', _Sup, Reason}, State) ->
{stop, Reason, State}.
+handle_password(Opts) ->
+ handle_rsa_password(handle_dsa_password(handle_normal_password(Opts))).
+handle_normal_password(Opts) ->
+ case proplists:get_value(ssh_opts, Opts, false) of
+ false ->
+ Opts;
+ SshOpts ->
+ case proplists:get_value(password, SshOpts, false) of
+ false ->
+ Opts;
+ _Password ->
+ NewOpts = [{password, undefined}|lists:keydelete(password, 1, SshOpts)],
+ [{ssh_opts, NewOpts}|lists:keydelete(ssh_opts, 1, Opts)]
+ end
+ end.
+handle_dsa_password(Opts) ->
+ case proplists:get_value(ssh_opts, Opts, false) of
+ false ->
+ Opts;
+ SshOpts ->
+ case proplists:get_value(dsa_pass_phrase, SshOpts, false) of
+ false ->
+ Opts;
+ _Password ->
+ NewOpts = [{dsa_pass_phrase, undefined}|lists:keydelete(dsa_pass_phrase, 1, SshOpts)],
+ [{ssh_opts, NewOpts}|lists:keydelete(ssh_opts, 1, Opts)]
+ end
+ end.
+handle_rsa_password(Opts) ->
+ case proplists:get_value(ssh_opts, Opts, false) of
+ false ->
+ Opts;
+ SshOpts ->
+ case proplists:get_value(rsa_pass_phrase, SshOpts, false) of
+ false ->
+ Opts;
+ _Password ->
+ NewOpts = [{rsa_pass_phrase, undefined}|lists:keydelete(rsa_pass_phrase, 1, SshOpts)],
+ [{ssh_opts, NewOpts}|lists:keydelete(ssh_opts, 1, Opts)]
+ end
+ end.
%%--------------------------------------------------------------------
%% Function: terminate(Reason, State) -> void()
%% Description: This function is called by a gen_server when it is about to
@@ -580,6 +642,45 @@ code_change(_OldVsn, State, _Extra) ->
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
+get_idle_time(SshOptions) ->
+ case proplists:get_value(idle_time, SshOptions) of
+ infinity ->
+ infinity;
+ _IdleTime -> %% We dont want to set the timeout on first connect
+ undefined
+ end.
+check_cache(State, Cache) ->
+ %% Check the number of entries in Cache
+ case proplists:get_value(size, ets:info(Cache)) of
+ 0 ->
+ Opts = proplists:get_value(ssh_opts, State#state.opts),
+ case proplists:get_value(idle_time, Opts) of
+ infinity ->
+ State;
+ undefined ->
+ State;
+ Time ->
+ case State#state.idle_timer_ref of
+ undefined ->
+ TimerRef = erlang:send_after(Time, self(), {'EXIT', [], "Timeout"}),
+ State#state{idle_timer_ref=TimerRef};
+ _ ->
+ State
+ end
+ end;
+ _ ->
+ State
+ end.
+remove_timer_ref(State) ->
+ case State#state.idle_timer_ref of
+ infinity -> %% If the timer is not activated
+ State;
+ undefined -> %% If we already has cancelled the timer
+ State;
+ TimerRef -> %% Timer is active
+ erlang:cancel_timer(TimerRef),
+ State#state{idle_timer_ref = undefined}
+ end.
channel_data(Id, Type, Data, Connection0, ConnectionPid, From, State) ->
case ssh_connection:channel_data(Id, Type, Data, Connection0,
ConnectionPid, From) of
@@ -677,7 +778,7 @@ handle_channel_down(ChannelPid, #state{connection_state =
(_,Acc) ->
Acc
end, [], Cache),
- {{replies, []}, State}.
+ {{replies, []}, check_cache(State, Cache)}.
update_sys(Cache, Channel, Type, ChannelPid) ->
ssh_channel:cache_update(Cache,
diff --git a/lib/ssh/src/ssh_io.erl b/lib/ssh/src/ssh_io.erl
index 1dbd097423..17a7cebb4a 100644
--- a/lib/ssh/src/ssh_io.erl
+++ b/lib/ssh/src/ssh_io.erl
@@ -23,37 +23,52 @@
-module(ssh_io).
--export([yes_no/1, read_password/1, read_line/1, format/2]).
+-export([yes_no/2, read_password/2, read_line/2, format/2]).
-import(lists, [reverse/1]).
+-include("ssh.hrl").
+read_line(Prompt, Ssh) ->
+ format("~s", [listify(Prompt)]),
+ proplists:get_value(user_pid, Ssh) ! {self(), question},
+ receive
+ Answer ->
+ Answer
+ end.
-read_line(Prompt) when is_list(Prompt) ->
- io:get_line(list_to_atom(Prompt));
-read_line(Prompt) when is_atom(Prompt) ->
- io:get_line(Prompt).
-
-read_ln(Prompt) ->
- trim(read_line(Prompt)).
-
-yes_no(Prompt) ->
+yes_no(Prompt, Ssh) ->
io:format("~s [y/n]?", [Prompt]),
- case read_ln('') of
- "y" -> yes;
- "n" -> no;
- "Y" -> yes;
- "N" -> no;
- _ ->
- io:format("please answer y or n\n"),
- yes_no(Prompt)
+ proplists:get_value(user_pid, Ssh#ssh.opts) ! {self(), question},
+ receive
+ Answer ->
+ case trim(Answer) of
+ "y" -> yes;
+ "n" -> no;
+ "Y" -> yes;
+ "N" -> no;
+ y -> yes;
+ n -> no;
+ _ ->
+ io:format("please answer y or n\n"),
+ yes_no(Prompt, Ssh)
+ end
end.
-read_password(Prompt) ->
+read_password(Prompt, Ssh) ->
format("~s", [listify(Prompt)]),
- case io:get_password() of
- "" ->
- read_password(Prompt);
- Pass -> Pass
+ case is_list(Ssh) of
+ false ->
+ proplists:get_value(user_pid, Ssh#ssh.opts) ! {self(), user_password};
+ _ ->
+ proplists:get_value(user_pid, Ssh) ! {self(), user_password}
+ end,
+ receive
+ Answer ->
+ case Answer of
+ "" ->
+ read_password(Prompt, Ssh);
+ Pass -> Pass
+ end
end.
listify(A) when is_atom(A) ->
diff --git a/lib/ssh/src/ssh_transport.erl b/lib/ssh/src/ssh_transport.erl
index 1f912c9bdf..7f6e7d9946 100644
--- a/lib/ssh/src/ssh_transport.erl
+++ b/lib/ssh/src/ssh_transport.erl
@@ -133,7 +133,7 @@ kex_dh_gex_messages() ->
].
yes_no(Ssh, Prompt) ->
- (Ssh#ssh.io_cb):yes_no(Prompt).
+ (Ssh#ssh.io_cb):yes_no(Prompt, Ssh).
connect(ConnectionSup, Address, Port, SocketOpts, Opts) ->
Timeout = proplists:get_value(connect_timeout, Opts, infinity),
diff --git a/lib/ssh/test/ssh_basic_SUITE.erl b/lib/ssh/test/ssh_basic_SUITE.erl
index 7a641c92c1..5fec7f0cd7 100644
--- a/lib/ssh/test/ssh_basic_SUITE.erl
+++ b/lib/ssh/test/ssh_basic_SUITE.erl
@@ -42,14 +42,15 @@ all() ->
{group, dsa_pass_key},
{group, rsa_pass_key},
{group, internal_error},
+ {group, idle_time},
daemon_already_started,
server_password_option,
server_userpassword_option,
close].
groups() ->
- [{dsa_key, [], [send, exec, exec_compressed, shell, known_hosts]},
- {rsa_key, [], [send, exec, exec_compressed, shell, known_hosts]},
+ [{dsa_key, [], [send, exec, exec_compressed, shell, known_hosts, idle_time]},
+ {rsa_key, [], [send, exec, exec_compressed, shell, known_hosts, idle_time]},
{dsa_pass_key, [], [pass_phrase]},
{rsa_pass_key, [], [pass_phrase]},
{internal_error, [], [internal_error]}
@@ -234,7 +235,27 @@ exec_compressed(Config) when is_list(Config) ->
ssh:stop_daemon(Pid).
%%--------------------------------------------------------------------
+idle_time(doc) ->
+ ["Idle timeout test"];
+idle_time(Config) ->
+ SystemDir = filename:join(?config(priv_dir, Config), system),
+ UserDir = ?config(priv_dir, Config),
+ {Pid, Host, Port} = ssh_test_lib:daemon([{system_dir, SystemDir},
+ {user_dir, UserDir},
+ {failfun, fun ssh_test_lib:failfun/2}]),
+ ConnectionRef =
+ ssh_test_lib:connect(Host, Port, [{silently_accept_hosts, true},
+ {user_dir, UserDir},
+ {user_interaction, false}]),
+ {ok, Id} = ssh_connection:session_channel(ConnectionRef, 1000),
+ ssh_connection:close(ConnectionRef, Id),
+ receive
+ after 10000 ->
+ {error,channel_closed} = ssh_connection:session_channel(ConnectionRef, 1000)
+ end,
+ ssh:stop_daemon(Pid).
+%%--------------------------------------------------------------------
shell(doc) ->
["Test that ssh:shell/2 works"];
shell(Config) when is_list(Config) ->
diff --git a/lib/ssh/vsn.mk b/lib/ssh/vsn.mk
index defa47f824..921ec2206a 100644
--- a/lib/ssh/vsn.mk
+++ b/lib/ssh/vsn.mk
@@ -1,5 +1,5 @@
#-*-makefile-*- ; force emacs to enter makefile-mode
-SSH_VSN = 2.1.1
+SSH_VSN = 2.1.2
APP_VSN = "ssh-$(SSH_VSN)"
diff --git a/lib/ssl/src/ssl.appup.src b/lib/ssl/src/ssl.appup.src
index 76550fa04b..c118c129e8 100644
--- a/lib/ssl/src/ssl.appup.src
+++ b/lib/ssl/src/ssl.appup.src
@@ -1,14 +1,21 @@
%% -*- erlang -*-
{"%VSN%",
[
- {"5.0.1", [{restart_application, ssl}]},
- {"5.0", [{restart_application, ssl}]},
+ {"5.1", [
+ {load_module, ssl_connection, soft_purge, soft_purge, []}
+ ]
+ },
+ {<<"5.0\\*">>, [{restart_application, ssl}]},
{<<"4\\.*">>, [{restart_application, ssl}]},
{<<"3\\.*">>, [{restart_application, ssl}]}
],
[
- {"5.0.1", [{restart_application, ssl}]},
- {"5.0", [{restart_application, ssl}]},
+ {"5.1", [
+ {load_module, ssl_connection, soft_purge, soft_purge, []}
+ ]
+ },
+ {"5.1", [{restart_application, ssl}]},
+ {<<"5.0\\*">>, [{restart_application, ssl}]},
{<<"4\\.*">>, [{restart_application, ssl}]},
{<<"3\\.*">>, [{restart_application, ssl}]}
]}.
diff --git a/lib/ssl/src/ssl.erl b/lib/ssl/src/ssl.erl
index 7788f758ac..771bfa5739 100644
--- a/lib/ssl/src/ssl.erl
+++ b/lib/ssl/src/ssl.erl
@@ -869,10 +869,10 @@ internal_inet_values() ->
socket_options(InetValues) ->
#socket_options{
- mode = proplists:get_value(mode, InetValues),
- header = proplists:get_value(header, InetValues),
- active = proplists:get_value(active, InetValues),
- packet = proplists:get_value(packet, InetValues),
+ mode = proplists:get_value(mode, InetValues, lists),
+ header = proplists:get_value(header, InetValues, 0),
+ active = proplists:get_value(active, InetValues, active),
+ packet = proplists:get_value(packet, InetValues, 0),
packet_size = proplists:get_value(packet_size, InetValues)
}.
diff --git a/lib/ssl/src/ssl_connection.erl b/lib/ssl/src/ssl_connection.erl
index 1319b54d6b..87cf49d07d 100644
--- a/lib/ssl/src/ssl_connection.erl
+++ b/lib/ssl/src/ssl_connection.erl
@@ -119,7 +119,7 @@ send(Pid, Data) ->
sync_send_all_state_event(Pid, {application_data,
%% iolist_to_binary should really
%% be called iodata_to_binary()
- erlang:iolist_to_binary(Data)}, infinity).
+ erlang:iolist_to_binary(Data)}).
%%--------------------------------------------------------------------
-spec recv(pid(), integer(), timeout()) ->
@@ -128,7 +128,7 @@ send(Pid, Data) ->
%% Description: Receives data when active = false
%%--------------------------------------------------------------------
recv(Pid, Length, Timeout) ->
- sync_send_all_state_event(Pid, {recv, Length}, Timeout).
+ sync_send_all_state_event(Pid, {recv, Length, Timeout}).
%%--------------------------------------------------------------------
-spec connect(host(), inet:port_number(), port(), {#ssl_options{}, #socket_options{}},
pid(), tuple(), timeout()) ->
@@ -165,7 +165,7 @@ ssl_accept(Port, Socket, Opts, User, CbInfo, Timeout) ->
%% Description: Starts ssl handshake.
%%--------------------------------------------------------------------
handshake(#sslsocket{pid = Pid}, Timeout) ->
- case sync_send_all_state_event(Pid, start, Timeout) of
+ case sync_send_all_state_event(Pid, {start, Timeout}) of
connected ->
ok;
Error ->
@@ -331,15 +331,15 @@ init([Role, Host, Port, Socket, {SSLOpts0, _} = Options, User, CbInfo]) ->
#state{}) -> gen_fsm_state_return().
%%--------------------------------------------------------------------
hello(start, #state{host = Host, port = Port, role = client,
- ssl_options = SslOpts,
- session = #session{own_certificate = Cert} = Session0,
- session_cache = Cache, session_cache_cb = CacheCb,
- transport_cb = Transport, socket = Socket,
- connection_states = ConnectionStates0,
- renegotiation = {Renegotiation, _}} = State0) ->
+ ssl_options = SslOpts,
+ session = #session{own_certificate = Cert} = Session0,
+ session_cache = Cache, session_cache_cb = CacheCb,
+ transport_cb = Transport, socket = Socket,
+ connection_states = ConnectionStates0,
+ renegotiation = {Renegotiation, _}} = State0) ->
Hello = ssl_handshake:client_hello(Host, Port, ConnectionStates0, SslOpts,
Cache, CacheCb, Renegotiation, Cert),
-
+
Version = Hello#client_hello.client_version,
Handshake0 = ssl_handshake:init_handshake_history(),
{BinMsg, ConnectionStates, Handshake} =
@@ -370,23 +370,22 @@ hello(#server_hello{cipher_suite = CipherSuite,
renegotiation = {Renegotiation, _},
ssl_options = SslOptions} = State0) ->
case ssl_handshake:hello(Hello, SslOptions, ConnectionStates0, Renegotiation) of
- #alert{} = Alert ->
- handle_own_alert(Alert, ReqVersion, hello, State0),
- {stop, normal, State0};
-
+ #alert{} = Alert ->
+ handle_own_alert(Alert, ReqVersion, hello, State0),
+ {stop, {shutdown, own_alert}, State0};
{Version, NewId, ConnectionStates, NextProtocol} ->
{KeyAlgorithm, _, _, _} =
ssl_cipher:suite_definition(CipherSuite),
PremasterSecret = make_premaster_secret(ReqVersion, KeyAlgorithm),
-
- NewNextProtocol = case NextProtocol of
- undefined ->
- State0#state.next_protocol;
- _ ->
- NextProtocol
- end,
-
+
+ NewNextProtocol = case NextProtocol of
+ undefined ->
+ State0#state.next_protocol;
+ _ ->
+ NextProtocol
+ end,
+
State = State0#state{key_algorithm = KeyAlgorithm,
hashsign_algorithm = default_hashsign(Version, KeyAlgorithm),
negotiated_version = Version,
@@ -394,13 +393,13 @@ hello(#server_hello{cipher_suite = CipherSuite,
premaster_secret = PremasterSecret,
expecting_next_protocol_negotiation = NextProtocol =/= undefined,
next_protocol = NewNextProtocol},
-
+
case ssl_session:is_new(OldId, NewId) of
true ->
handle_new_session(NewId, CipherSuite, Compression,
State#state{connection_states = ConnectionStates});
false ->
- handle_resumed_session(NewId, State#state{connection_states = ConnectionStates})
+ handle_resumed_session(NewId, State#state{connection_states = ConnectionStates})
end
end;
@@ -419,8 +418,7 @@ hello(Hello = #client_hello{client_version = ClientVersion},
negotiated_version = Version,
session = Session});
#alert{} = Alert ->
- handle_own_alert(Alert, ClientVersion, hello, State),
- {stop, normal, State}
+ handle_own_alert(Alert, ClientVersion, hello, State)
end;
hello(timeout, State) ->
@@ -451,8 +449,7 @@ abbreviated(#finished{verify_data = Data} = Finished,
next_state_connection(abbreviated,
ack_connection(State#state{connection_states = ConnectionStates}));
#alert{} = Alert ->
- handle_own_alert(Alert, Version, abbreviated, State),
- {stop, normal, State}
+ handle_own_alert(Alert, Version, abbreviated, State)
end;
abbreviated(#finished{verify_data = Data} = Finished,
@@ -472,8 +469,7 @@ abbreviated(#finished{verify_data = Data} = Finished,
connection_states =
ConnectionStates}));
#alert{} = Alert ->
- handle_own_alert(Alert, Version, abbreviated, State),
- {stop, normal, State}
+ handle_own_alert(Alert, Version, abbreviated, State)
end;
abbreviated(timeout, State) ->
@@ -497,8 +493,7 @@ certify(#certificate{asn1_certificates = []},
fail_if_no_peer_cert = true}} =
State) ->
Alert = ?ALERT_REC(?FATAL,?HANDSHAKE_FAILURE),
- handle_own_alert(Alert, Version, certify, State),
- {stop, normal, State};
+ handle_own_alert(Alert, Version, certify, State);
certify(#certificate{asn1_certificates = []},
#state{role = server,
@@ -521,8 +516,7 @@ certify(#certificate{} = Cert,
handle_peer_cert(PeerCert, PublicKeyInfo,
State#state{client_certificate_requested = false});
#alert{} = Alert ->
- handle_own_alert(Alert, Version, certify, State),
- {stop, normal, State}
+ handle_own_alert(Alert, Version, certify, State)
end;
certify(#server_key_exchange{} = KeyExchangeMsg,
@@ -534,8 +528,7 @@ certify(#server_key_exchange{} = KeyExchangeMsg,
{Record, State} = next_record(State1),
next_state(certify, certify, Record, State);
#alert{} = Alert ->
- handle_own_alert(Alert, Version, certify, State0),
- {stop, normal, State0}
+ handle_own_alert(Alert, Version, certify, State0)
end;
certify(#server_key_exchange{} = Msg,
@@ -559,8 +552,7 @@ certify(#server_hello_done{},
State = State0#state{connection_states = ConnectionStates},
client_certify_and_key_exchange(State);
#alert{} = Alert ->
- handle_own_alert(Alert, Version, certify, State0),
- {stop, normal, State0}
+ handle_own_alert(Alert, Version, certify, State0)
end;
%% Master secret is calculated from premaster_secret
@@ -578,8 +570,7 @@ certify(#server_hello_done{},
session = Session},
client_certify_and_key_exchange(State);
#alert{} = Alert ->
- handle_own_alert(Alert, Version, certify, State0),
- {stop, normal, State0}
+ handle_own_alert(Alert, Version, certify, State0)
end;
certify(#client_key_exchange{} = Msg,
@@ -595,8 +586,7 @@ certify(#client_key_exchange{exchange_keys = Keys},
certify_client_key_exchange(ssl_handshake:decode_client_key(Keys, KeyAlg, Version), State)
catch
#alert{} = Alert ->
- handle_own_alert(Alert, Version, certify, State),
- {stop, normal, State}
+ handle_own_alert(Alert, Version, certify, State)
end;
@@ -621,8 +611,7 @@ certify_client_key_exchange(#encrypted_premaster_secret{premaster_secret= EncPMS
{Record, State} = next_record(State1),
next_state(certify, cipher, Record, State);
#alert{} = Alert ->
- handle_own_alert(Alert, Version, certify, State0),
- {stop, normal, State0}
+ handle_own_alert(Alert, Version, certify, State0)
end;
certify_client_key_exchange(#client_diffie_hellman_public{dh_public = ClientPublicDhKey},
@@ -635,8 +624,7 @@ certify_client_key_exchange(#client_diffie_hellman_public{dh_public = ClientPubl
{Record, State} = next_record(State1),
next_state(certify, cipher, Record, State);
#alert{} = Alert ->
- handle_own_alert(Alert, Version, certify, State0),
- {stop, normal, State0}
+ handle_own_alert(Alert, Version, certify, State0)
end.
%%--------------------------------------------------------------------
@@ -665,8 +653,7 @@ cipher(#certificate_verify{signature = Signature, hashsign_algorithm = CertHashS
{Record, State} = next_record(State0),
next_state(cipher, cipher, Record, State);
#alert{} = Alert ->
- handle_own_alert(Alert, Version, cipher, State0),
- {stop, normal, State0}
+ handle_own_alert(Alert, Version, cipher, State0)
end;
% client must send a next protocol message if we are expecting it
@@ -692,8 +679,7 @@ cipher(#finished{verify_data = Data} = Finished,
Session = register_session(Role, Host, Port, Session0),
cipher_role(Role, Data, Session, State);
#alert{} = Alert ->
- handle_own_alert(Alert, Version, cipher, State),
- {stop, normal, State}
+ handle_own_alert(Alert, Version, cipher, State)
end;
% only allowed to send next_protocol message after change cipher spec
@@ -788,7 +774,8 @@ handle_sync_event({application_data, Data}, From, StateName,
State#state{send_queue = queue:in({From, Data}, Queue)},
get_timeout(State)};
-handle_sync_event(start, StartFrom, hello, State) ->
+handle_sync_event({start, Timeout}, StartFrom, hello, State) ->
+ start_or_recv_cancel_timer(Timeout, StartFrom),
hello(start, State#state{start_or_recv_from = StartFrom});
%% The two clauses below could happen if a server upgrades a socket in
@@ -798,12 +785,14 @@ handle_sync_event(start, StartFrom, hello, State) ->
%% mode before telling the client that it is willing to upgrade
%% and before calling ssl:ssl_accept/2. These clauses are
%% here to make sure it is the users problem and not owers if
-%% they upgrade a active socket.
-handle_sync_event(start, _, connection, State) ->
+%% they upgrade an active socket.
+handle_sync_event({start,_}, _, connection, State) ->
{reply, connected, connection, State, get_timeout(State)};
-handle_sync_event(start, _From, error, {Error, State = #state{}}) ->
+handle_sync_event({start,_}, _From, error, {Error, State = #state{}}) ->
{stop, {shutdown, Error}, {error, Error}, State};
-handle_sync_event(start, StartFrom, StateName, State) ->
+
+handle_sync_event({start, Timeout}, StartFrom, StateName, State) ->
+ start_or_recv_cancel_timer(Timeout, StartFrom),
{next_state, StateName, State#state{start_or_recv_from = StartFrom}, get_timeout(State)};
handle_sync_event(close, _, StateName, State) ->
@@ -835,12 +824,14 @@ handle_sync_event({shutdown, How0}, _, StateName,
{stop, normal, Error, State}
end;
-handle_sync_event({recv, N}, RecvFrom, connection = StateName, State0) ->
+handle_sync_event({recv, N, Timeout}, RecvFrom, connection = StateName, State0) ->
+ start_or_recv_cancel_timer(Timeout, RecvFrom),
passive_receive(State0#state{bytes_to_read = N, start_or_recv_from = RecvFrom}, StateName);
%% Doing renegotiate wait with handling request until renegotiate is
%% finished. Will be handled by next_state_is_connection/2.
-handle_sync_event({recv, N}, RecvFrom, StateName, State) ->
+handle_sync_event({recv, N, Timeout}, RecvFrom, StateName, State) ->
+ start_or_recv_cancel_timer(Timeout, RecvFrom),
{next_state, StateName, State#state{bytes_to_read = N, start_or_recv_from = RecvFrom},
get_timeout(State)};
@@ -963,7 +954,7 @@ handle_info({Protocol, _, Data}, StateName,
next_state(StateName, StateName, Record, State);
#alert{} = Alert ->
handle_normal_shutdown(Alert, StateName, State0),
- {stop, normal, State0}
+ {stop, {shutdown, own_alert}, State0}
end;
handle_info({CloseTag, Socket}, StateName,
@@ -984,7 +975,7 @@ handle_info({CloseTag, Socket}, StateName,
ok
end,
handle_normal_shutdown(?ALERT_REC(?FATAL, ?CLOSE_NOTIFY), StateName, State),
- {stop, normal, State};
+ {stop, {shutdown, transport_closed}, State};
handle_info({ErrorTag, Socket, econnaborted}, StateName,
#state{socket = Socket, start_or_recv_from = StartFrom, role = Role,
@@ -1005,7 +996,14 @@ handle_info({'DOWN', MonitorRef, _, _, _}, _,
handle_info(allow_renegotiate, StateName, State) ->
{next_state, StateName, State#state{allow_renegotiate = true}, get_timeout(State)};
-
+
+handle_info({cancel_start_or_recv, RecvFrom}, connection = StateName, #state{start_or_recv_from = RecvFrom} = State) ->
+ gen_fsm:reply(RecvFrom, {error, timeout}),
+ {next_state, StateName, State#state{start_or_recv_from = undefined}, get_timeout(State)};
+
+handle_info({cancel_start_or_recv, _RecvFrom}, StateName, State) ->
+ {next_state, StateName, State, get_timeout(State)};
+
handle_info(Msg, StateName, State) ->
Report = io_lib:format("SSL: Got unexpected info: ~p ~n", [Msg]),
error_logger:info_report(Report),
@@ -1022,6 +1020,20 @@ terminate(_, _, #state{terminated = true}) ->
%% we want to guarantee that Transport:close has been called
%% when ssl:close/1 returns.
ok;
+
+terminate({shutdown, transport_closed}, StateName, #state{send_queue = SendQueue,
+ renegotiation = Renegotiate} = State) ->
+ handle_unrecv_data(StateName, State),
+ handle_trusted_certs_db(State),
+ notify_senders(SendQueue),
+ notify_renegotiater(Renegotiate);
+
+terminate({shutdown, own_alert}, _StateName, #state{send_queue = SendQueue,
+ renegotiation = Renegotiate} = State) ->
+ handle_trusted_certs_db(State),
+ notify_senders(SendQueue),
+ notify_renegotiater(Renegotiate);
+
terminate(Reason, connection, #state{negotiated_version = Version,
connection_states = ConnectionStates,
transport_cb = Transport,
@@ -1032,16 +1044,14 @@ terminate(Reason, connection, #state{negotiated_version = Version,
notify_renegotiater(Renegotiate),
BinAlert = terminate_alert(Reason, Version, ConnectionStates),
Transport:send(Socket, BinAlert),
- workaround_transport_delivery_problems(Socket, Transport, Reason),
- Transport:close(Socket);
+ workaround_transport_delivery_problems(Socket, Transport);
-terminate(Reason, _StateName, #state{transport_cb = Transport,
+terminate(_Reason, _StateName, #state{transport_cb = Transport,
socket = Socket, send_queue = SendQueue,
renegotiation = Renegotiate} = State) ->
handle_trusted_certs_db(State),
notify_senders(SendQueue),
notify_renegotiater(Renegotiate),
- workaround_transport_delivery_problems(Socket, Transport, Reason),
Transport:close(Socket).
%%--------------------------------------------------------------------
@@ -1216,18 +1226,13 @@ init_diffie_hellman(DbHandle,_, DHParamFile, server) ->
end.
sync_send_all_state_event(FsmPid, Event) ->
- sync_send_all_state_event(FsmPid, Event, infinity).
-
-sync_send_all_state_event(FsmPid, Event, Timeout) ->
- try gen_fsm:sync_send_all_state_event(FsmPid, Event, Timeout)
+ try gen_fsm:sync_send_all_state_event(FsmPid, Event, infinity)
catch
exit:{noproc, _} ->
{error, closed};
- exit:{timeout, _} ->
- {error, timeout};
exit:{normal, _} ->
{error, closed};
- exit:{shutdown, _} ->
+ exit:{{shutdown, _},_} ->
{error, closed}
end.
@@ -1324,8 +1329,7 @@ new_server_hello(#server_hello{cipher_suite = CipherSuite,
next_state(hello, certify, Record, State)
catch
#alert{} = Alert ->
- handle_own_alert(Alert, Version, hello, State0),
- {stop, normal, State0}
+ handle_own_alert(Alert, Version, hello, State0)
end.
resumed_server_hello(#state{session = Session,
@@ -1345,8 +1349,7 @@ resumed_server_hello(#state{session = Session,
{Record, State} = next_record(State2),
next_state(hello, abbreviated, Record, State);
#alert{} = Alert ->
- handle_own_alert(Alert, Version, hello, State0),
- {stop, normal, State0}
+ handle_own_alert(Alert, Version, hello, State0)
end.
handle_new_session(NewId, CipherSuite, Compression, #state{session = Session0} = State0) ->
@@ -1371,8 +1374,7 @@ handle_resumed_session(SessId, #state{connection_states = ConnectionStates0,
session = Session}),
next_state(hello, abbreviated, Record, State);
#alert{} = Alert ->
- handle_own_alert(Alert, Version, hello, State0),
- {stop, normal, State0}
+ handle_own_alert(Alert, Version, hello, State0)
end.
@@ -1389,8 +1391,7 @@ client_certify_and_key_exchange(#state{negotiated_version = Version} =
next_state(certify, cipher, Record, State)
catch
throw:#alert{} = Alert ->
- handle_own_alert(Alert, Version, certify, State0),
- {stop, normal, State0}
+ handle_own_alert(Alert, Version, certify, State0)
end.
do_client_certify_and_key_exchange(State0) ->
@@ -1962,8 +1963,7 @@ handle_tls_handshake(Handle, StateName, #state{tls_packets = [Packet | Packets]}
end.
next_state(Current,_, #alert{} = Alert, #state{negotiated_version = Version} = State) ->
- handle_own_alert(Alert, Version, Current, State),
- {stop, normal, State};
+ handle_own_alert(Alert, Version, Current, State);
next_state(_,Next, no_record, State) ->
{next_state, Next, State, get_timeout(State)};
@@ -2001,8 +2001,7 @@ next_state(Current, Next, #ssl_tls{type = ?HANDSHAKE, fragment = Data},
State = State0#state{tls_packets = Packets, tls_handshake_buffer = Buf},
handle_tls_handshake(Handle, Next, State)
catch throw:#alert{} = Alert ->
- handle_own_alert(Alert, Version, Current, State0),
- {stop, normal, State0}
+ handle_own_alert(Alert, Version, Current, State0)
end;
next_state(_, StateName, #ssl_tls{type = ?APPLICATION_DATA, fragment = Data}, State0) ->
@@ -2258,13 +2257,13 @@ handle_alert(#alert{level = ?FATAL} = Alert, StateName,
handle_alert(#alert{level = ?WARNING, description = ?CLOSE_NOTIFY} = Alert,
StateName, State) ->
handle_normal_shutdown(Alert, StateName, State),
- {stop, normal, State};
+ {stop, {shutdown, peer_close}, State};
handle_alert(#alert{level = ?WARNING, description = ?NO_RENEGOTIATION} = Alert, StateName,
#state{log_alert = Log, renegotiation = {true, internal}} = State) ->
log_alert(Log, StateName, Alert),
handle_normal_shutdown(Alert, StateName, State),
- {stop, normal, State};
+ {stop, {shutdown, peer_close}, State};
handle_alert(#alert{level = ?WARNING, description = ?NO_RENEGOTIATION} = Alert, StateName,
#state{log_alert = Log, renegotiation = {true, From}} = State0) ->
@@ -2317,8 +2316,8 @@ handle_own_alert(Alert, Version, StateName,
try %% Try to tell the other side
{BinMsg, _} =
encode_alert(Alert, Version, ConnectionStates),
- linux_workaround_transport_delivery_problems(Alert, Socket),
- Transport:send(Socket, BinMsg)
+ Transport:send(Socket, BinMsg),
+ workaround_transport_delivery_problems(Socket, Transport)
catch _:_ -> %% Can crash if we are in a uninitialized state
ignore
end,
@@ -2327,7 +2326,8 @@ handle_own_alert(Alert, Version, StateName,
handle_normal_shutdown(Alert,StateName, State)
catch _:_ ->
ok
- end.
+ end,
+ {stop, {shutdown, own_alert}, State}.
handle_normal_shutdown(Alert, _, #state{socket = Socket,
start_or_recv_from = StartFrom,
@@ -2342,8 +2342,7 @@ handle_normal_shutdown(Alert, StateName, #state{socket = Socket,
handle_unexpected_message(Msg, Info, #state{negotiated_version = Version} = State) ->
Alert = ?ALERT_REC(?FATAL,?UNEXPECTED_MESSAGE),
- handle_own_alert(Alert, Version, {Info, Msg}, State),
- {stop, normal, State}.
+ handle_own_alert(Alert, Version, {Info, Msg}, State).
make_premaster_secret({MajVer, MinVer}, rsa) ->
Rand = ssl:random_bytes(?NUM_OF_PREMASTERSECRET_BYTES-2),
@@ -2401,36 +2400,35 @@ notify_renegotiater({true, From}) when not is_atom(From) ->
notify_renegotiater(_) ->
ok.
-terminate_alert(Reason, Version, ConnectionStates) when Reason == normal; Reason == shutdown;
+terminate_alert(Reason, Version, ConnectionStates) when Reason == normal;
Reason == user_close ->
{BinAlert, _} = encode_alert(?ALERT_REC(?WARNING, ?CLOSE_NOTIFY),
Version, ConnectionStates),
BinAlert;
+terminate_alert({shutdown, _}, Version, ConnectionStates) ->
+ {BinAlert, _} = encode_alert(?ALERT_REC(?WARNING, ?CLOSE_NOTIFY),
+ Version, ConnectionStates),
+ BinAlert;
+
terminate_alert(_, Version, ConnectionStates) ->
{BinAlert, _} = encode_alert(?ALERT_REC(?FATAL, ?INTERNAL_ERROR),
Version, ConnectionStates),
BinAlert.
-workaround_transport_delivery_problems(_,_, user_close) ->
- ok;
-workaround_transport_delivery_problems(Socket, Transport, _) ->
+workaround_transport_delivery_problems(Socket, gen_tcp = Transport) ->
%% Standard trick to try to make sure all
- %% data sent to to tcp port is really sent
- %% before tcp port is closed so that the peer will
- %% get a correct error message.
+ %% data sent to the tcp port is really delivered to the
+ %% peer application before tcp port is closed so that the peer will
+ %% get the correct TLS alert message and not only a transport close.
inet:setopts(Socket, [{active, false}]),
Transport:shutdown(Socket, write),
- Transport:recv(Socket, 0).
-
-linux_workaround_transport_delivery_problems(#alert{level = ?FATAL}, Socket) ->
- case os:type() of
- {unix, linux} ->
- inet:setopts(Socket, [{nodelay, true}]);
- _ ->
- ok
- end;
-linux_workaround_transport_delivery_problems(_, _) ->
- ok.
+ %% Will return when other side has closed or after 30 s
+ %% e.g. we do not want to hang if something goes wrong
+ %% with the network but we want to maximise the odds that
+ %% peer application gets all data sent on the tcp connection.
+ Transport:recv(Socket, 0, 30000);
+workaround_transport_delivery_problems(Socket, Transport) ->
+ Transport:close(Socket).
get_timeout(#state{ssl_options=#ssl_options{hibernate_after = undefined}}) ->
infinity;
@@ -2503,3 +2501,26 @@ default_hashsign(_Version, KeyExchange)
default_hashsign(_Version, KeyExchange)
when KeyExchange == dh_anon ->
{null, anon}.
+
+start_or_recv_cancel_timer(infinity, _RecvFrom) ->
+ ok;
+start_or_recv_cancel_timer(Timeout, RecvFrom) ->
+ erlang:send_after(Timeout, self(), {cancel_start_or_recv, RecvFrom}).
+
+handle_unrecv_data(StateName, #state{socket = Socket, transport_cb = Transport} = State) ->
+ inet:setopts(Socket, [{active, false}]),
+ case Transport:recv(Socket, 0, 0) of
+ {error, closed} ->
+ ok;
+ {ok, Data} ->
+ handle_close_alert(Data, StateName, State)
+ end.
+
+handle_close_alert(Data, StateName, State0) ->
+ case next_tls_record(Data, State0) of
+ {#ssl_tls{type = ?ALERT, fragment = EncAlerts}, State} ->
+ [Alert|_] = decode_alerts(EncAlerts),
+ handle_normal_shutdown(Alert, StateName, State);
+ _ ->
+ ok
+ end.
diff --git a/lib/ssl/vsn.mk b/lib/ssl/vsn.mk
index e381b73c27..bc8b8fd039 100644
--- a/lib/ssl/vsn.mk
+++ b/lib/ssl/vsn.mk
@@ -1 +1 @@
-SSL_VSN = 5.1
+SSL_VSN = 5.1.1
diff --git a/lib/stdlib/src/proc_lib.erl b/lib/stdlib/src/proc_lib.erl
index 02bcbb5a60..4bca4c1e6d 100644
--- a/lib/stdlib/src/proc_lib.erl
+++ b/lib/stdlib/src/proc_lib.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -184,6 +184,17 @@ check_for_monitor(SpawnOpts) ->
false
end.
+spawn_mon(M,F,A) ->
+ Parent = get_my_name(),
+ Ancestors = get_ancestors(),
+ erlang:spawn_monitor(?MODULE, init_p, [Parent,Ancestors,M,F,A]).
+
+spawn_opt_mon(M, F, A, Opts) when is_atom(M), is_atom(F), is_list(A) ->
+ Parent = get_my_name(),
+ Ancestors = get_ancestors(),
+ check_for_monitor(Opts),
+ erlang:spawn_opt(?MODULE, init_p, [Parent,Ancestors,M,F,A], [monitor|Opts]).
+
-spec hibernate(Module, Function, Args) -> no_return() when
Module :: module(),
Function :: atom(),
@@ -270,8 +281,8 @@ start(M, F, A) when is_atom(M), is_atom(F), is_list(A) ->
Ret :: term() | {error, Reason :: term()}.
start(M, F, A, Timeout) when is_atom(M), is_atom(F), is_list(A) ->
- Pid = ?MODULE:spawn(M, F, A),
- sync_wait(Pid, Timeout).
+ PidRef = spawn_mon(M, F, A),
+ sync_wait_mon(PidRef, Timeout).
-spec start(Module, Function, Args, Time, SpawnOpts) -> Ret when
Module :: module(),
@@ -282,8 +293,8 @@ start(M, F, A, Timeout) when is_atom(M), is_atom(F), is_list(A) ->
Ret :: term() | {error, Reason :: term()}.
start(M, F, A, Timeout, SpawnOpts) when is_atom(M), is_atom(F), is_list(A) ->
- Pid = ?MODULE:spawn_opt(M, F, A, SpawnOpts),
- sync_wait(Pid, Timeout).
+ PidRef = spawn_opt_mon(M, F, A, SpawnOpts),
+ sync_wait_mon(PidRef, Timeout).
-spec start_link(Module, Function, Args) -> Ret when
Module :: module(),
@@ -330,6 +341,23 @@ sync_wait(Pid, Timeout) ->
{error, timeout}
end.
+sync_wait_mon({Pid, Ref}, Timeout) ->
+ receive
+ {ack, Pid, Return} ->
+ erlang:demonitor(Ref, [flush]),
+ Return;
+ {'DOWN', Ref, _Type, Pid, Reason} ->
+ {error, Reason};
+ {'EXIT', Pid, Reason} -> %% link as spawn_opt?
+ erlang:demonitor(Ref, [flush]),
+ {error, Reason}
+ after Timeout ->
+ erlang:demonitor(Ref, [flush]),
+ exit(Pid, kill),
+ flush(Pid),
+ {error, timeout}
+ end.
+
-spec flush(pid()) -> 'true'.
flush(Pid) ->
diff --git a/lib/stdlib/test/base64_SUITE.erl b/lib/stdlib/test/base64_SUITE.erl
index c64a961ffa..7b8650f224 100644
--- a/lib/stdlib/test/base64_SUITE.erl
+++ b/lib/stdlib/test/base64_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2007-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2007-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -20,7 +20,6 @@
-module(base64_SUITE).
-include_lib("common_test/include/ct.hrl").
--include("test_server_line.hrl").
%% Test server specific exports
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
@@ -33,7 +32,7 @@
mime_decode_to_string/1, roundtrip/1]).
init_per_testcase(_, Config) ->
- Dog = test_server:timetrap(?t:minutes(2)),
+ Dog = test_server:timetrap(?t:minutes(4)),
NewConfig = lists:keydelete(watchdog, 1, Config),
[{watchdog, Dog} | NewConfig].
diff --git a/lib/stdlib/test/dict_SUITE.erl b/lib/stdlib/test/dict_SUITE.erl
index c46fc47b34..df9c769c67 100644
--- a/lib/stdlib/test/dict_SUITE.erl
+++ b/lib/stdlib/test/dict_SUITE.erl
@@ -53,7 +53,7 @@ end_per_group(_GroupName, Config) ->
init_per_testcase(_Case, Config) ->
- ?line Dog = ?t:timetrap(?t:minutes(5)),
+ Dog = ?t:timetrap(?t:minutes(5)),
[{watchdog,Dog}|Config].
end_per_testcase(_Case, Config) ->
@@ -65,22 +65,22 @@ create(Config) when is_list(Config) ->
test_all(fun create_1/1).
create_1(M) ->
- ?line D0 = M:empty(),
- ?line [] = M:to_list(D0),
- ?line 0 = M:size(D0),
+ D0 = M(empty, []),
+ [] = M(to_list, D0),
+ 0 = M(size, D0),
D0.
store(Config) when is_list(Config) ->
test_all([{0,132},{253,258},{510,514}], fun store_1/2).
store_1(List, M) ->
- ?line D0 = M:from_list(List),
+ D0 = M(from_list, List),
%% Make sure that we get the same result by inserting
%% elements one at the time.
- ?line D1 = foldl(fun({K,V}, Dict) -> M:enter(K, V, Dict) end,
- M:empty(), List),
- ?line true = M:equal(D0, D1),
+ D1 = foldl(fun({K,V}, Dict) -> M(enter, {K,V,Dict}) end,
+ M(empty, []), List),
+ true = M(equal, {D0,D1}),
D0.
%%%
@@ -98,7 +98,7 @@ dict_mods() ->
[Orddict,Dict,Gb].
test_all(Tester) ->
- ?line Pids = [spawn_tester(M, Tester) || M <- dict_mods()],
+ Pids = [spawn_tester(M, Tester) || M <- dict_mods()],
collect_all(Pids, []).
spawn_tester(M, Tester) ->
@@ -106,7 +106,7 @@ spawn_tester(M, Tester) ->
spawn_link(fun() ->
random:seed(1, 2, 42),
S = Tester(M),
- Res = {M:size(S),lists:sort(M:to_list(S))},
+ Res = {M(size, S),lists:sort(M(to_list, S))},
Parent ! {result,self(),Res}
end).
diff --git a/lib/stdlib/test/dict_test_lib.erl b/lib/stdlib/test/dict_test_lib.erl
index 92a75dad89..7167014310 100644
--- a/lib/stdlib/test/dict_test_lib.erl
+++ b/lib/stdlib/test/dict_test_lib.erl
@@ -17,67 +17,48 @@
%% %CopyrightEnd%
%%
--module(dict_test_lib, [Mod,Equal]).
+-module(dict_test_lib).
--export([module/0,equal/2,empty/0,size/1,to_list/1,from_list/1,
- enter/3,delete/2,lookup/2]).
+-export([new/2]).
-module() ->
- Mod.
-
-equal(X, Y) ->
- Equal(X, Y).
+new(Mod, Eq) ->
+ fun (enter, {K,V,D}) -> enter(Mod, K, V, D);
+ (empty, []) -> empty(Mod);
+ (equal, {D1,D2}) -> Eq(D1, D2);
+ (from_list, L) -> from_list(Mod, L);
+ (module, []) -> Mod;
+ (size, D) -> Mod:size(D);
+ (to_list, D) -> to_list(Mod, D)
+ end.
-empty() ->
+empty(Mod) ->
case erlang:function_exported(Mod, new, 0) of
false -> Mod:empty();
true -> Mod:new()
end.
-size(S) ->
- Mod:size(S).
-
-to_list(S) ->
- Mod:to_list(S).
+to_list(Mod, D) ->
+ Mod:to_list(D).
-from_list(S) ->
+from_list(Mod, L) ->
case erlang:function_exported(Mod, from_orddict, 1) of
false ->
- Mod:from_list(S);
+ Mod:from_list(L);
true ->
%% The gb_trees module has no from_list/1 function.
%%
%% The keys in S are not unique. To make sure
%% that we pick the same key/value pairs as
%% dict/orddict, first convert the list to an orddict.
- Orddict = orddict:from_list(S),
+ Orddict = orddict:from_list(L),
Mod:from_orddict(Orddict)
end.
%% Store new value into dictionary or update previous value in dictionary.
-enter(Key, Val, Dict) ->
+enter(Mod, Key, Val, Dict) ->
case erlang:function_exported(Mod, store, 3) of
false ->
Mod:enter(Key, Val, Dict);
true ->
Mod:store(Key, Val, Dict)
end.
-
-%% Delete an EXISTING key.
-delete(Key, Dict) ->
- case erlang:function_exported(Mod, delete, 2) of
- true -> Mod:delete(Key, Dict);
- false -> Mod:erase(Key, Dict)
- end.
-
-%% -> none | {value,Value}
-lookup(Key, Dict) ->
- case erlang:function_exported(Mod, lookup, 2) of
- false ->
- case Mod:find(Key, Dict) of
- error -> none;
- {ok,Value} -> {value,Value}
- end;
- true ->
- Mod:lookup(Key, Dict)
- end.
diff --git a/lib/stdlib/test/epp_SUITE.erl b/lib/stdlib/test/epp_SUITE.erl
index f79414db49..77c615d6d9 100644
--- a/lib/stdlib/test/epp_SUITE.erl
+++ b/lib/stdlib/test/epp_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -1236,6 +1236,13 @@ otp_8911(doc) ->
otp_8911(suite) ->
[];
otp_8911(Config) when is_list(Config) ->
+ case test_server:is_cover() of
+ true ->
+ {skip, "Testing cover, so can not run when cover is already running"};
+ false ->
+ do_otp_8911(Config)
+ end.
+do_otp_8911(Config) ->
?line {ok, CWD} = file:get_cwd(),
?line ok = file:set_cwd(?config(priv_dir, Config)),
diff --git a/lib/stdlib/test/gen_server_SUITE.erl b/lib/stdlib/test/gen_server_SUITE.erl
index 2abb01ba24..dffeadb423 100644
--- a/lib/stdlib/test/gen_server_SUITE.erl
+++ b/lib/stdlib/test/gen_server_SUITE.erl
@@ -1046,8 +1046,9 @@ call_with_huge_message_queue(Config) when is_list(Config) ->
io:format("Time for empty message queue: ~p", [Time]),
io:format("Time for huge message queue: ~p", [NewTime]),
+ IsCover = test_server:is_cover(),
case (NewTime+1) / (Time+1) of
- Q when Q < 10 ->
+ Q when Q < 10; IsCover ->
ok;
Q ->
io:format("Q = ~p", [Q]),
diff --git a/lib/stdlib/test/id_transform_SUITE.erl b/lib/stdlib/test/id_transform_SUITE.erl
index 233b0d0a78..ee97ffe7b3 100644
--- a/lib/stdlib/test/id_transform_SUITE.erl
+++ b/lib/stdlib/test/id_transform_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2003-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2003-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -61,7 +61,7 @@ id_transform(Config) when is_list(Config) ->
?line {module,erl_id_trans}=code:load_binary(erl_id_trans,File,Bin),
?line case test_server:purify_is_running() of
false ->
- Dog = ?t:timetrap(?t:hours(1)),
+ Dog = ct:timetrap(?t:hours(1)),
?line Res = run_in_test_suite(),
?t:timetrap_cancel(Dog),
Res;
diff --git a/lib/stdlib/test/io_proto_SUITE.erl b/lib/stdlib/test/io_proto_SUITE.erl
index 661d57c85b..17e69f7c1c 100644
--- a/lib/stdlib/test/io_proto_SUITE.erl
+++ b/lib/stdlib/test/io_proto_SUITE.erl
@@ -96,7 +96,8 @@ groups() ->
[].
init_per_suite(Config) ->
- Config.
+ DefShell = get_default_shell(),
+ [{default_shell,DefShell}|Config].
end_per_suite(_Config) ->
ok.
@@ -124,20 +125,25 @@ unicode_prompt(doc) ->
["Test that an Unicode prompt does not crash the shell"];
unicode_prompt(Config) when is_list(Config) ->
?line PA = filename:dirname(code:which(?MODULE)),
- ?line rtnode([{putline,""},
- {putline, "2."},
- {getline, "2"},
- {putline, "shell:prompt_func({io_proto_SUITE,uprompt})."},
- {getline, "default"},
- {putline, "io:get_line('')."},
- {putline, "hej"},
- {getline, "\"hej\\n\""},
- {putline, "io:setopts([{binary,true}])."},
- {getline, "ok"},
- {putline, "io:get_line('')."},
- {putline, "hej"},
- {getline, "<<\"hej\\n\">>"}
- ],[],[],"-pa \""++ PA++"\""),
+ case proplists:get_value(default_shell,Config) of
+ old ->
+ ok;
+ new ->
+ ?line rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline, "shell:prompt_func({io_proto_SUITE,uprompt})."},
+ {getline, "default"},
+ {putline, "io:get_line('')."},
+ {putline, "hej"},
+ {getline, "\"hej\\n\""},
+ {putline, "io:setopts([{binary,true}])."},
+ {getline, "ok"},
+ {putline, "io:get_line('')."},
+ {putline, "hej"},
+ {getline, "<<\"hej\\n\">>"}
+ ],[],[],"-pa \""++ PA++"\"")
+ end,
%% And one with oldshell
?line rtnode([{putline,""},
{putline, "2."},
@@ -234,21 +240,26 @@ setopts_getopts(Config) when is_list(Config) ->
lists:sort(io:getopts(RFile)),
?line eof = io:get_line(RFile,''),
?line file:close(RFile),
- %% So, lets test another node with new interactive shell
- ?line rtnode([{putline,""},
- {putline, "2."},
- {getline, "2"},
- {putline, "lists:keyfind(binary,1,io:getopts())."},
- {getline, "{binary,false}"},
- {putline, "io:get_line('')."},
- {putline, "hej"},
- {getline, "\"hej\\n\""},
- {putline, "io:setopts([{binary,true}])."},
- {getline, "ok"},
- {putline, "io:get_line('')."},
- {putline, "hej"},
- {getline, "<<\"hej\\n\">>"}
- ],[]),
+ case proplists:get_value(default_shell,Config) of
+ old ->
+ ok;
+ new ->
+ %% So, lets test another node with new interactive shell
+ ?line rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline, "lists:keyfind(binary,1,io:getopts())."},
+ {getline, "{binary,false}"},
+ {putline, "io:get_line('')."},
+ {putline, "hej"},
+ {getline, "\"hej\\n\""},
+ {putline, "io:setopts([{binary,true}])."},
+ {getline, "ok"},
+ {putline, "io:get_line('')."},
+ {putline, "hej"},
+ {getline, "<<\"hej\\n\">>"}
+ ],[])
+ end,
%% And one with oldshell
?line rtnode([{putline,""},
{putline, "2."},
@@ -433,21 +444,27 @@ unicode_options(Config) when is_list(Config) ->
end,
?line [ ok = CannotWriteFile(F,FailDir) || F <- AllNoBom ],
- %% OK, time for the group_leaders...
- ?line rtnode([{putline,""},
- {putline, "2."},
- {getline, "2"},
- {putline, "lists:keyfind(encoding,1,io:getopts())."},
- {getline, "{encoding,latin1}"},
- {putline, "io:format(\"~ts~n\",[[1024]])."},
- {getline, "\\x{400}"},
- {putline, "io:setopts([unicode])."},
- {getline, "ok"},
- {putline, "io:format(\"~ts~n\",[[1024]])."},
- {getline,
- binary_to_list(unicode:characters_to_binary(
- [1024],unicode,utf8))}
- ],[],"LC_CTYPE=\""++get_lc_ctype()++"\"; export LC_CTYPE; "),
+ case proplists:get_value(default_shell,Config) of
+ old ->
+ ok;
+ new ->
+ %% OK, time for the group_leaders...
+ ?line rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline, "lists:keyfind(encoding,1,io:getopts())."},
+ {getline, "{encoding,latin1}"},
+ {putline, "io:format(\"~ts~n\",[[1024]])."},
+ {getline, "\\x{400}"},
+ {putline, "io:setopts([unicode])."},
+ {getline, "ok"},
+ {putline, "io:format(\"~ts~n\",[[1024]])."},
+ {getline,
+ binary_to_list(unicode:characters_to_binary(
+ [1024],unicode,utf8))}
+ ],[],"LC_CTYPE=\""++get_lc_ctype()++"\"; "
+ "export LC_CTYPE; ")
+ end,
?line rtnode([{putline,""},
{putline, "2."},
{getline_re, ".*2."},
@@ -680,23 +697,28 @@ binary_options(Config) when is_list(Config) ->
?line file:close(F3),
%% OK, time for the group_leaders...
%% io:format(standard_error,"Hmmm:~w~n",["<<\""++binary_to_list(<<"\345\344\366"/utf8>>)++"\\n\">>"]),
- ?line rtnode([{putline,""},
- {putline, "2."},
- {getline, "2"},
- {putline, "lists:keyfind(binary,1,io:getopts())."},
- {getline, "{binary,false}"},
- {putline, "io:get_line('')."},
- {putline, "hej"},
- {getline, "\"hej\\n\""},
- {putline, "io:setopts([{binary,true},unicode])."},
- {getline, "ok"},
- {putline, "io:get_line('')."},
- {putline, "hej"},
- {getline, "<<\"hej\\n\">>"},
- {putline, "io:get_line('')."},
- {putline, binary_to_list(<<"\345\344\366"/utf8>>)},
- {getline, "<<\""++binary_to_list(unicode:characters_to_binary(<<"\345\344\366"/utf8>>,latin1,utf8))++"\\n\">>"}
- ],[]),
+ case proplists:get_value(default_shell,Config) of
+ old ->
+ ok;
+ new ->
+ ?line rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline, "lists:keyfind(binary,1,io:getopts())."},
+ {getline, "{binary,false}"},
+ {putline, "io:get_line('')."},
+ {putline, "hej"},
+ {getline, "\"hej\\n\""},
+ {putline, "io:setopts([{binary,true},unicode])."},
+ {getline, "ok"},
+ {putline, "io:get_line('')."},
+ {putline, "hej"},
+ {getline, "<<\"hej\\n\">>"},
+ {putline, "io:get_line('')."},
+ {putline, binary_to_list(<<"\345\344\366"/utf8>>)},
+ {getline, "<<\""++binary_to_list(unicode:characters_to_binary(<<"\345\344\366"/utf8>>,latin1,utf8))++"\\n\">>"}
+ ],[])
+ end,
%% And one with oldshell
?line rtnode([{putline,""},
{putline, "2."},
@@ -1146,9 +1168,11 @@ read_modes_gl(suite) ->
read_modes_gl(doc) ->
["Test various modes when reading from the group leade from another machine"];
read_modes_gl(Config) when is_list(Config) ->
- case get_progs() of
- {error,Reason} ->
+ case {get_progs(),proplists:get_value(default_shell,Config)} of
+ {{error,Reason},_} ->
{skipped,Reason};
+ {_,old} ->
+ {skipper,"No new shell"};
_ ->
read_modes_gl_1(Config,answering_machine1)
end.
@@ -1754,6 +1778,17 @@ get_data_within(Port, Timeout, Acc) ->
timeout
end.
+get_default_shell() ->
+ try
+ rtnode([{putline,""},
+ {putline, "whereis(user_drv)."},
+ {getline, "undefined"}],[]),
+ old
+ catch E:R ->
+ ?dbg({E,R}),
+ new
+ end.
+
%%
%% Test I/O-server
%%
diff --git a/lib/stdlib/test/proc_lib_SUITE.erl b/lib/stdlib/test/proc_lib_SUITE.erl
index c95089117c..8dca69bac4 100644
--- a/lib/stdlib/test/proc_lib_SUITE.erl
+++ b/lib/stdlib/test/proc_lib_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -28,7 +28,7 @@
crash/1, sync_start_nolink/1, sync_start_link/1,
spawn_opt/1, sp1/0, sp2/0, sp3/1, sp4/2, sp5/1,
hibernate/1]).
--export([ otp_6345/1]).
+-export([ otp_6345/1, init_dont_hang/1]).
-export([hib_loop/1, awaken/1]).
@@ -36,7 +36,7 @@
handle_event/2, handle_call/2, handle_info/2,
terminate/2]).
--export([otp_6345_init/1]).
+-export([otp_6345_init/1, init_dont_hang_init/1]).
-ifdef(STANDALONE).
@@ -52,7 +52,7 @@ all() ->
{group, tickets}].
groups() ->
- [{tickets, [], [otp_6345]},
+ [{tickets, [], [otp_6345, init_dont_hang]},
{sync_start, [], [sync_start_nolink, sync_start_link]}].
init_per_suite(Config) ->
@@ -343,6 +343,29 @@ otp_6345_loop() ->
otp_6345_loop()
end.
+%% OTP-9803
+init_dont_hang(suite) ->
+ [];
+init_dont_hang(doc) ->
+ ["Check that proc_lib:start don't hang if spawned process crashes before proc_lib:init_ack/2"];
+init_dont_hang(Config) when is_list(Config) ->
+ %% Start should behave as start_link
+ process_flag(trap_exit, true),
+ StartLinkRes = proc_lib:start_link(?MODULE, init_dont_hang_init, [self()]),
+ try
+ StartLinkRes = proc_lib:start(?MODULE, init_dont_hang_init, [self()], 1000),
+ StartLinkRes = proc_lib:start(?MODULE, init_dont_hang_init, [self()], 1000, []),
+ ok
+ catch _:Error ->
+ io:format("Error ~p /= ~p ~n",[erlang:get_stacktrace(), StartLinkRes]),
+ exit(Error)
+ end.
+
+init_dont_hang_init(Parent) ->
+ 1 = 2.
+
+
+
%%-----------------------------------------------------------------
%% The error_logger handler used.
%%-----------------------------------------------------------------
diff --git a/lib/stdlib/test/sets_SUITE.erl b/lib/stdlib/test/sets_SUITE.erl
index f284276bd7..e2bcdd18ce 100644
--- a/lib/stdlib/test/sets_SUITE.erl
+++ b/lib/stdlib/test/sets_SUITE.erl
@@ -35,7 +35,7 @@
-import(lists, [foldl/3,reverse/1]).
init_per_testcase(_Case, Config) ->
- ?line Dog = ?t:timetrap(?t:minutes(5)),
+ Dog = ?t:timetrap(?t:minutes(5)),
[{watchdog,Dog}|Config].
end_per_testcase(_Case, Config) ->
@@ -70,65 +70,65 @@ create(Config) when is_list(Config) ->
test_all(fun create_1/1).
create_1(M) ->
- ?line S0 = M:empty(),
- ?line [] = M:to_list(S0),
- ?line 0 = M:size(S0),
- ?line true = M:is_empty(S0),
+ S0 = M(empty, []),
+ [] = M(to_list, S0),
+ 0 = M(size, S0),
+ true = M(is_empty, S0),
E = make_ref(),
- ?line One = M:singleton(E),
- ?line 1 = M:size(One),
- ?line false = M:is_empty(One),
- [E] = M:to_list(One),
+ One = M(singleton, E),
+ 1 = M(size, One),
+ false = M(is_empty, One),
+ [E] = M(to_list, One),
S0.
add_element(Config) when is_list(Config) ->
test_all([{0,132},{253,258},{510,514}], fun add_element_1/2).
add_element_1(List, M) ->
- ?line S = M:from_list(List),
- ?line SortedSet = lists:usort(List),
- ?line SortedSet = lists:sort(M:to_list(S)),
+ S = M(from_list, List),
+ SortedSet = lists:usort(List),
+ SortedSet = lists:sort(M(to_list, S)),
%% Make sure that we get the same result by inserting
%% elements one at the time.
- ?line S2 = foldl(fun(El, Set) -> M:add_element(El, Set) end,
- M:empty(), List),
- ?line true = M:equal(S, S2),
+ S2 = foldl(fun(El, Set) -> M(add_element, {El,Set}) end,
+ M(empty, []), List),
+ true = M(equal, {S,S2}),
%% Insert elements, randomly delete inserted elements,
%% and re-inserted all deleted elements at the end.
- ?line S3 = add_element_del(List, M, M:empty(), [], []),
- ?line true = M:equal(S2, S3),
- ?line true = M:equal(S, S3),
+ S3 = add_element_del(List, M, M(empty, []), [], []),
+ true = M(equal, {S2,S3}),
+ true = M(equal, {S,S3}),
S.
add_element_del([H|T], M, S, Del, []) ->
- add_element_del(T, M, M:add_element(H, S), Del, [H]);
+ add_element_del(T, M, M(add_element, {H,S}), Del, [H]);
add_element_del([H|T], M, S0, Del, Inserted) ->
- S1 = M:add_element(H, S0),
+ S1 = M(add_element, {H,S0}),
case random:uniform(3) of
1 ->
OldEl = lists:nth(random:uniform(length(Inserted)), Inserted),
- S = M:del_element(OldEl, S1),
+ S = M(del_element, {OldEl,S1}),
add_element_del(T, M, S, [OldEl|Del], [H|Inserted]);
_ ->
add_element_del(T, M, S1, Del, [H|Inserted])
end;
add_element_del([], M, S, Del, _) ->
- M:union(S, M:from_list(Del)).
+ M(union, {S,M(from_list, Del)}).
del_element(Config) when is_list(Config) ->
test_all([{0,132},{253,258},{510,514},{1022,1026}], fun del_element_1/2).
del_element_1(List, M) ->
- ?line S0 = M:from_list(List),
- ?line Empty = foldl(fun(El, Set) -> M:del_element(El, Set) end, S0, List),
- ?line Empty = M:empty(),
- ?line M:is_empty(Empty),
- ?line S1 = foldl(fun(El, Set) ->
- M:add_element(El, Set)
- end, S0, reverse(List)),
- ?line true = M:equal(S0, S1),
+ S0 = M(from_list, List),
+ Empty = foldl(fun(El, Set) -> M(del_element, {El,Set}) end, S0, List),
+ Empty = M(empty, []),
+ true = M(is_empty, Empty),
+ S1 = foldl(fun(El, Set) ->
+ M(add_element, {El,Set})
+ end, S0, reverse(List)),
+ true = M(equal, {S0,S1}),
S1.
subtract(Config) when is_list(Config) ->
@@ -138,23 +138,23 @@ subtract(Config) when is_list(Config) ->
test_all([{2,69},{126,130},{253,258},511,512,{1023,1030}], fun subtract_1/2).
subtract_empty(M) ->
- ?line Empty = M:empty(),
- ?line true = M:is_empty(M:subtract(Empty, Empty)),
- M:subtract(Empty, Empty).
+ Empty = M(empty, []),
+ true = M(is_empty, M(subtract, {Empty,Empty})),
+ M(subtract, {Empty,Empty}).
subtract_1(List, M) ->
- ?line S0 = M:from_list(List),
- ?line Empty = M:empty(),
+ S0 = M(from_list, List),
+ Empty = M(empty, []),
%% Trivial cases.
- ?line true = M:is_empty(M:subtract(Empty, S0)),
- ?line true = M:equal(S0, M:subtract(S0, Empty)),
+ true = M(is_empty, M(subtract, {Empty,S0})),
+ true = M(equal, {S0,M(subtract, {S0,Empty})}),
%% Not so trivial.
- ?line subtract_check(List, mutate_some(remove_some(List, 0.4)), M),
- ?line subtract_check(List, rnd_list(length(List) div 2 + 5), M),
- ?line subtract_check(List, rnd_list(length(List) div 7 + 9), M),
- ?line subtract_check(List, mutate_some(List), M).
+ subtract_check(List, mutate_some(remove_some(List, 0.4)), M),
+ subtract_check(List, rnd_list(length(List) div 2 + 5), M),
+ subtract_check(List, rnd_list(length(List) div 7 + 9), M),
+ subtract_check(List, mutate_some(List), M).
subtract_check(A, B, M) ->
one_subtract_check(B, A, M),
@@ -163,12 +163,12 @@ subtract_check(A, B, M) ->
one_subtract_check(A, B, M) ->
ASorted = lists:usort(A),
BSorted = lists:usort(B),
- ASet = M:from_list(A),
- BSet = M:from_list(B),
- DiffSet = M:subtract(ASet, BSet),
+ ASet = M(from_list, A),
+ BSet = M(from_list, B),
+ DiffSet = M(subtract, {ASet,BSet}),
Diff = ASorted -- BSorted,
- true = M:equal(DiffSet, M:from_list(Diff)),
- Diff = lists:sort(M:to_list(DiffSet)),
+ true = M(equal, {DiffSet,M(from_list, Diff)}),
+ Diff = lists:sort(M(to_list, DiffSet)),
DiffSet.
intersection(Config) when is_list(Config) ->
@@ -176,60 +176,60 @@ intersection(Config) when is_list(Config) ->
test_all([{1,65},{126,130},{253,259},{499,513},{1023,1025}], fun intersection_1/2).
intersection_1(List, M) ->
- ?line S0 = M:from_list(List),
+ S0 = M(from_list, List),
%% Intersection with self.
- ?line true = M:equal(S0, M:intersection(S0, S0)),
- ?line true = M:equal(S0, M:intersection([S0,S0])),
- ?line true = M:equal(S0, M:intersection([S0,S0,S0])),
- ?line true = M:equal(S0, M:intersection([S0])),
+ true = M(equal, {S0,M(intersection, {S0,S0})}),
+ true = M(equal, {S0,M(intersection, [S0,S0])}),
+ true = M(equal, {S0,M(intersection, [S0,S0,S0])}),
+ true = M(equal, {S0,M(intersection, [S0])}),
%% Intersection with empty.
- ?line Empty = M:empty(),
- ?line true = M:equal(Empty, M:intersection(S0, Empty)),
- ?line true = M:equal(Empty, M:intersection([S0,Empty,S0,Empty])),
+ Empty = M(empty, []),
+ true = M(equal, {Empty,M(intersection, {S0,Empty})}),
+ true = M(equal, {Empty,M(intersection, [S0,Empty,S0,Empty])}),
%% The intersection of no sets is undefined.
- ?line {'EXIT',_} = (catch M:intersection([])),
+ {'EXIT',_} = (catch M(intersection, [])),
%% Disjoint sets.
- ?line Disjoint = [{El} || El <- List],
- ?line DisjointSet = M:from_list(Disjoint),
- ?line M:is_empty(M:intersection(S0, DisjointSet)),
+ Disjoint = [{El} || El <- List],
+ DisjointSet = M(from_list, Disjoint),
+ true = M(is_empty, M(intersection, {S0,DisjointSet})),
%% Disjoint, different sizes.
- ?line M:is_empty(M:intersection(S0, M:from_list(remove_some(Disjoint, 0.3)))),
- ?line M:is_empty(M:intersection(S0, M:from_list(remove_some(Disjoint, 0.7)))),
- ?line M:is_empty(M:intersection(S0, M:from_list(remove_some(Disjoint, 0.9)))),
- ?line M:is_empty(M:intersection(M:from_list(remove_some(List, 0.3)), DisjointSet)),
- ?line M:is_empty(M:intersection(M:from_list(remove_some(List, 0.5)), DisjointSet)),
- ?line M:is_empty(M:intersection(M:from_list(remove_some(List, 0.9)), DisjointSet)),
+ [begin
+ SomeRemoved = M(from_list, remove_some(Disjoint, HowMuch)),
+ true = M(is_empty, M(intersection, {S0,SomeRemoved})),
+ MoreRemoved = M(from_list, remove_some(List, HowMuch)),
+ true = M(is_empty, M(intersection, {MoreRemoved,DisjointSet}))
+ end || HowMuch <- [0.3,0.5,0.7,0.9]],
%% Partial overlap (one or more elements in result set).
%% The sets have almost the same size. (Almost because a duplicated
%% element in the original list could be mutated and not mutated
%% at the same time.)
- ?line PartialOverlap = mutate_some(List, []),
- ?line IntersectionSet = check_intersection(List, PartialOverlap, M),
- ?line false = M:is_empty(IntersectionSet),
+ PartialOverlap = mutate_some(List, []),
+ IntersectionSet = check_intersection(List, PartialOverlap, M),
+ false = M(is_empty, IntersectionSet),
%% Partial overlap, different set sizes. (Intersection possibly empty.)
- ?line check_intersection(List, remove_some(PartialOverlap, 0.1), M),
- ?line check_intersection(List, remove_some(PartialOverlap, 0.3), M),
- ?line check_intersection(List, remove_some(PartialOverlap, 0.5), M),
- ?line check_intersection(List, remove_some(PartialOverlap, 0.7), M),
- ?line check_intersection(List, remove_some(PartialOverlap, 0.9), M),
+ check_intersection(List, remove_some(PartialOverlap, 0.1), M),
+ check_intersection(List, remove_some(PartialOverlap, 0.3), M),
+ check_intersection(List, remove_some(PartialOverlap, 0.5), M),
+ check_intersection(List, remove_some(PartialOverlap, 0.7), M),
+ check_intersection(List, remove_some(PartialOverlap, 0.9), M),
IntersectionSet.
check_intersection(Orig, Mutated, M) ->
- OrigSet = M:from_list(Orig),
- MutatedSet = M:from_list(Mutated),
+ OrigSet = M(from_list, Orig),
+ MutatedSet = M(from_list, Mutated),
Intersection = [El || El <- Mutated, not is_tuple(El)],
SortedIntersection = lists:usort(Intersection),
- IntersectionSet = M:intersection(OrigSet, MutatedSet),
- true = M:equal(IntersectionSet, M:from_list(SortedIntersection)),
- SortedIntersection = lists:sort(M:to_list(IntersectionSet)),
+ IntersectionSet = M(intersection, {OrigSet,MutatedSet}),
+ true = M(equal, {IntersectionSet,M(from_list, SortedIntersection)}),
+ SortedIntersection = lists:sort(M(to_list, IntersectionSet)),
IntersectionSet.
@@ -239,63 +239,63 @@ union(Config) when is_list(Config) ->
test_all([{1,71},{125,129},{254,259},{510,513},{1023,1025}], fun union_1/2).
union_1(List, M) ->
- ?line S = M:from_list(List),
+ S = M(from_list, List),
%% Union with self and empty.
- ?line Empty = M:empty(),
- ?line true = M:equal(S, M:union(S, S)),
- ?line true = M:equal(S, M:union([S,S])),
- ?line true = M:equal(S, M:union([S,S,Empty])),
- ?line true = M:equal(S, M:union([S,Empty,S])),
- ?line true = M:equal(S, M:union(S, Empty)),
- ?line true = M:equal(S, M:union([S])),
- ?line true = M:is_empty(M:union([])),
+ Empty = M(empty, []),
+ true = M(equal, {S,M(union, {S,S})}),
+ true = M(equal, {S,M(union, [S,S])}),
+ true = M(equal, {S,M(union, [S,S,Empty])}),
+ true = M(equal, {S,M(union, [S,Empty,S])}),
+ true = M(equal, {S,M(union, {S,Empty})}),
+ true = M(equal, {S,M(union, [S])}),
+ true = M(is_empty, M(union, [])),
%% Partial overlap.
- ?line check_union(List, remove_some(mutate_some(List), 0.9), M),
- ?line check_union(List, remove_some(mutate_some(List), 0.7), M),
- ?line check_union(List, remove_some(mutate_some(List), 0.5), M),
- ?line check_union(List, remove_some(mutate_some(List), 0.3), M),
- ?line check_union(List, remove_some(mutate_some(List), 0.1), M),
-
- ?line check_union(List, mutate_some(remove_some(List, 0.9)), M),
- ?line check_union(List, mutate_some(remove_some(List, 0.7)), M),
- ?line check_union(List, mutate_some(remove_some(List, 0.5)), M),
- ?line check_union(List, mutate_some(remove_some(List, 0.3)), M),
- ?line check_union(List, mutate_some(remove_some(List, 0.1)), M).
+ check_union(List, remove_some(mutate_some(List), 0.9), M),
+ check_union(List, remove_some(mutate_some(List), 0.7), M),
+ check_union(List, remove_some(mutate_some(List), 0.5), M),
+ check_union(List, remove_some(mutate_some(List), 0.3), M),
+ check_union(List, remove_some(mutate_some(List), 0.1), M),
+
+ check_union(List, mutate_some(remove_some(List, 0.9)), M),
+ check_union(List, mutate_some(remove_some(List, 0.7)), M),
+ check_union(List, mutate_some(remove_some(List, 0.5)), M),
+ check_union(List, mutate_some(remove_some(List, 0.3)), M),
+ check_union(List, mutate_some(remove_some(List, 0.1)), M).
check_union(Orig, Other, M) ->
- OrigSet = M:from_list(Orig),
- OtherSet = M:from_list(Other),
+ OrigSet = M(from_list, Orig),
+ OtherSet = M(from_list, Other),
Union = Orig++Other,
SortedUnion = lists:usort(Union),
- UnionSet = M:union(OrigSet, OtherSet),
- SortedUnion = lists:sort(M:to_list(UnionSet)),
- M:equal(UnionSet, M:from_list(Union)),
+ UnionSet = M(union, {OrigSet,OtherSet}),
+ SortedUnion = lists:sort(M(to_list, UnionSet)),
+ M(equal, {UnionSet,M(from_list, Union)}),
UnionSet.
is_subset(Config) when is_list(Config) ->
test_all([{1,132},{253,270},{299,311}], fun is_subset_1/2).
is_subset_1(List, M) ->
- ?line S = M:from_list(List),
- ?line Empty = M:empty(),
+ S = M(from_list, List),
+ Empty = M(empty, []),
%% Subset of empty and self.
- ?line true = M:is_subset(Empty, Empty),
- ?line true = M:is_subset(Empty, S),
- ?line false = M:is_subset(S, Empty),
- ?line true = M:is_subset(S, S),
+ true = M(is_subset, {Empty,Empty}),
+ true = M(is_subset, {Empty,S}),
+ false = M(is_subset, {S,Empty}),
+ true = M(is_subset, {S,S}),
%% Other cases.
- Res = [?line false = M:is_subset(M:singleton(make_ref()), S),
- ?line true = M:is_subset(M:singleton(hd(List)), S),
- ?line true = check_subset(remove_some(List, 0.1), List, M),
- ?line true = check_subset(remove_some(List, 0.5), List, M),
- ?line true = check_subset(remove_some(List, 0.9), List, M),
- ?line check_subset(mutate_some(List), List, M),
- ?line check_subset(rnd_list(length(List) div 2 + 5), List, M),
- ?line subtract_check(List, rnd_list(length(List) div 7 + 9), M)
+ Res = [false = M(is_subset, {M(singleton, make_ref()),S}),
+ true = M(is_subset, {M(singleton, hd(List)),S}),
+ true = check_subset(remove_some(List, 0.1), List, M),
+ true = check_subset(remove_some(List, 0.5), List, M),
+ true = check_subset(remove_some(List, 0.9), List, M),
+ check_subset(mutate_some(List), List, M),
+ check_subset(rnd_list(length(List) div 2 + 5), List, M),
+ subtract_check(List, rnd_list(length(List) div 7 + 9), M)
],
res_to_set(Res, M, 0, []).
@@ -304,12 +304,12 @@ check_subset(X, Y, M) ->
check_one_subset(X, Y, M).
check_one_subset(X, Y, M) ->
- XSet = M:from_list(X),
- YSet = M:from_list(Y),
+ XSet = M(from_list, X),
+ YSet = M(from_list, Y),
SortedX = lists:usort(X),
SortedY = lists:usort(Y),
IsSubSet = length(SortedY--SortedX) =:= length(SortedY) - length(SortedX),
- IsSubSet = M:is_subset(XSet, YSet),
+ IsSubSet = M(is_subset, {XSet,YSet}),
IsSubSet.
%% Encode all test results as a set to return.
@@ -317,54 +317,54 @@ res_to_set([true|T], M, I, Acc) ->
res_to_set(T, M, I+1, [I|Acc]);
res_to_set([_|T], M, I, Acc) ->
res_to_set(T, M, I+1, Acc);
-res_to_set([], M, _, Acc) -> M:from_list(Acc).
+res_to_set([], M, _, Acc) -> M(from_list, Acc).
is_set(Config) when is_list(Config) ->
%% is_set/1 is tested in the other test cases when its argument
%% is a set. Here test some arguments that makes it return false.
- ?line false = gb_sets:is_set([a,b]),
- ?line false = gb_sets:is_set({a,very,bad,tuple}),
+ false = gb_sets:is_set([a,b]),
+ false = gb_sets:is_set({a,very,bad,tuple}),
- ?line false = sets:is_set([a,b]),
- ?line false = sets:is_set({a,very,bad,tuple}),
+ false = sets:is_set([a,b]),
+ false = sets:is_set({a,very,bad,tuple}),
- ?line false = ordsets:is_set([b,a]),
- ?line false = ordsets:is_set({bad,tuple}),
+ false = ordsets:is_set([b,a]),
+ false = ordsets:is_set({bad,tuple}),
%% Now test values that are known to be bad for all set representations.
test_all(fun is_set_1/1).
is_set_1(M) ->
- ?line false = M:is_set(self()),
- ?line false = M:is_set(blurf),
- ?line false = M:is_set(make_ref()),
- ?line false = M:is_set(<<1,2,3>>),
- ?line false = M:is_set(42),
- ?line false = M:is_set(math:pi()),
- ?line false = M:is_set({}),
- M:empty().
+ false = M(is_set, self()),
+ false = M(is_set, blurf),
+ false = M(is_set, make_ref()),
+ false = M(is_set, <<1,2,3>>),
+ false = M(is_set, 42),
+ false = M(is_set, math:pi()),
+ false = M(is_set, {}),
+ M(empty, []).
fold(Config) when is_list(Config) ->
test_all([{0,71},{125,129},{254,259},{510,513},{1023,1025},{9999,10001}],
fun fold_1/2).
fold_1(List, M) ->
- ?line S = M:from_list(List),
- ?line L = M:fold(fun(E, A) -> [E|A] end, [], S),
- ?line true = lists:sort(L) =:= lists:usort(List),
- M:empty().
+ S = M(from_list, List),
+ L = M(fold, {fun(E, A) -> [E|A] end,[],S}),
+ true = lists:sort(L) =:= lists:usort(List),
+ M(empty, []).
filter(Config) when is_list(Config) ->
test_all([{0,69},{126,130},{254,259},{510,513},{1023,1025},{7999,8000}],
fun filter_1/2).
filter_1(List, M) ->
- ?line S = M:from_list(List),
+ S = M(from_list, List),
IsNumber = fun(X) -> is_number(X) end,
- ?line M:equal(M:from_list(lists:filter(IsNumber, List)),
- M:filter(IsNumber, S)),
- ?line M:filter(fun(X) -> is_atom(X) end, S).
+ M(equal, {M(from_list, lists:filter(IsNumber, List)),
+ M(filter, {IsNumber,S})}),
+ M(filter, {fun(X) -> is_atom(X) end,S}).
%%%
%%% Test specifics for gb_sets.
@@ -375,26 +375,26 @@ take_smallest(Config) when is_list(Config) ->
fun take_smallest_1/2).
take_smallest_1(List, M) ->
- case M:module() of
+ case M(module, []) of
gb_sets -> take_smallest_2(List, M);
_ -> ok
end,
- M:empty().
+ M(empty, []).
take_smallest_2(List0, M) ->
- ?line List = lists:usort(List0),
- ?line S = M:from_list(List0),
+ List = lists:usort(List0),
+ S = M(from_list, List0),
take_smallest_3(S, List, M).
take_smallest_3(S0, List0, M) ->
- case M:is_empty(S0) of
+ case M(is_empty, S0) of
true -> ok;
false ->
- ?line Smallest = hd(List0),
- ?line Smallest = gb_sets:smallest(S0),
- ?line {Smallest,S} = gb_sets:take_smallest(S0),
- ?line List = tl(List0),
- ?line true = gb_sets:to_list(S) =:= List,
+ Smallest = hd(List0),
+ Smallest = gb_sets:smallest(S0),
+ {Smallest,S} = gb_sets:take_smallest(S0),
+ List = tl(List0),
+ true = gb_sets:to_list(S) =:= List,
take_smallest_3(S, List, M)
end.
@@ -403,26 +403,26 @@ take_largest(Config) when is_list(Config) ->
fun take_largest_1/2).
take_largest_1(List, M) ->
- case M:module() of
+ case M(module, []) of
gb_sets -> take_largest_2(List, M);
_ -> ok
end,
- M:empty().
+ M(empty, []).
take_largest_2(List0, M) ->
- ?line List = reverse(lists:usort(List0)),
- ?line S = M:from_list(List0),
+ List = reverse(lists:usort(List0)),
+ S = M(from_list, List0),
take_largest_3(S, List, M).
take_largest_3(S0, List0, M) ->
- case M:is_empty(S0) of
+ case M(is_empty, S0) of
true -> ok;
false ->
- ?line Largest = hd(List0),
- ?line Largest = gb_sets:largest(S0),
- ?line {Largest,S} = gb_sets:take_largest(S0),
- ?line List = tl(List0),
- ?line true = gb_sets:to_list(S) =:= reverse(List),
+ Largest = hd(List0),
+ Largest = gb_sets:largest(S0),
+ {Largest,S} = gb_sets:take_largest(S0),
+ List = tl(List0),
+ true = gb_sets:to_list(S) =:= reverse(List),
take_largest_3(S, List, M)
end.
@@ -441,23 +441,23 @@ sets_mods() ->
[Ordsets,Sets,Gb].
test_all(Tester) ->
- ?line Res = [begin
- random:seed(1, 2, 42),
- S = Tester(M),
- {M:size(S),lists:sort(M:to_list(S))}
- end || M <- sets_mods()],
- ?line all_same(Res).
+ Res = [begin
+ random:seed(1, 2, 42),
+ S = Tester(M),
+ {M(size, S),lists:sort(M(to_list, S))}
+ end || M <- sets_mods()],
+ all_same(Res).
test_all([{Low,High}|T], Tester) ->
test_all(lists:seq(Low, High)++T, Tester);
test_all([Sz|T], Tester) when is_integer(Sz) ->
List = rnd_list(Sz),
- ?line Res = [begin
+ Res = [begin
random:seed(19, 2, Sz),
S = Tester(List, M),
- {M:size(S),lists:sort(M:to_list(S))}
+ {M(size, S),lists:sort(M(to_list, S))}
end || M <- sets_mods()],
- ?line all_same(Res),
+ all_same(Res),
test_all(T, Tester);
test_all([], _) -> ok.
diff --git a/lib/stdlib/test/sets_test_lib.erl b/lib/stdlib/test/sets_test_lib.erl
index bdfb0d59d2..fd4ec2bac3 100644
--- a/lib/stdlib/test/sets_test_lib.erl
+++ b/lib/stdlib/test/sets_test_lib.erl
@@ -17,91 +17,89 @@
%% %CopyrightEnd%
%%
--module(sets_test_lib, [Mod,Equal]).
-
--export([module/0,equal/2,empty/0,from_list/1,to_list/1,singleton/1,
- add_element/2,del_element/2,size/1,is_empty/1,is_set/1,
- intersection/1,intersection/2,subtract/2,
- union/1,union/2,is_subset/2,fold/3,filter/2]).
-
-module() ->
- Mod.
-
-equal(X, Y) ->
- Equal(X, Y).
-
-empty() ->
- Mod:new().
-
-from_list(L) ->
- Mod:from_list(L).
-
-to_list(S) ->
- Mod:to_list(S).
+-module(sets_test_lib).
+
+-export([new/2]).
+
+new(Mod, Eq) ->
+ fun (add_element, {El,S}) -> add_element(Mod, El, S);
+ (del_element, {El,S}) -> del_element(Mod, El, S);
+ (empty, []) -> Mod:new();
+ (equal, {S1,S2}) -> Eq(S1, S2);
+ (filter, {F,S}) -> filter(Mod, F, S);
+ (fold, {F,A,S}) -> fold(Mod, F, A, S);
+ (from_list, L) -> Mod:from_list(L);
+ (intersection, {S1,S2}) -> intersection(Mod, Eq, S1, S2);
+ (intersection, Ss) -> intersection(Mod, Eq, Ss);
+ (is_empty, S) -> is_empty(Mod, S);
+ (is_set, S) -> Mod:is_set(S);
+ (is_subset, {S,Set}) -> is_subset(Mod, Eq, S, Set);
+ (module, []) -> Mod;
+ (singleton, E) -> singleton(Mod, E);
+ (size, S) -> Mod:size(S);
+ (subtract, {S1,S2}) -> subtract(Mod, S1, S2);
+ (to_list, S) -> Mod:to_list(S);
+ (union, {S1,S2}) -> union(Mod, Eq, S1, S2);
+ (union, Ss) -> union(Mod, Eq, Ss)
+ end.
-singleton(E) ->
+singleton(Mod, E) ->
case erlang:function_exported(Mod, singleton, 1) of
true -> Mod:singleton(E);
- false -> from_list([E])
+ false -> Mod:from_list([E])
end.
-add_element(El, S0) ->
+add_element(Mod, El, S0) ->
S = Mod:add_element(El, S0),
true = Mod:is_element(El, S),
- false = is_empty(S),
+ false = is_empty(Mod, S),
true = Mod:is_set(S),
S.
-del_element(El, S0) ->
+del_element(Mod, El, S0) ->
S = Mod:del_element(El, S0),
false = Mod:is_element(El, S),
true = Mod:is_set(S),
S.
-size(S) ->
- Mod:size(S).
-
-is_empty(S) ->
+is_empty(Mod, S) ->
true = Mod:is_set(S),
case erlang:function_exported(Mod, is_empty, 1) of
true -> Mod:is_empty(S);
false -> Mod:size(S) == 0
end.
-is_set(S) ->
- Mod:is_set(S).
-
-intersection(S1, S2) ->
+intersection(Mod, Equal, S1, S2) ->
S = Mod:intersection(S1, S2),
true = Equal(S, Mod:intersection(S2, S1)),
- Disjoint = is_empty(S),
+ Disjoint = is_empty(Mod, S),
Disjoint = Mod:is_disjoint(S1, S2),
Disjoint = Mod:is_disjoint(S2, S1),
S.
-intersection(Ss) ->
+intersection(Mod, Equal, Ss) ->
S = Mod:intersection(Ss),
true = Equal(S, Mod:intersection(lists:reverse(Ss))),
S.
-subtract(S1, S2) ->
+subtract(Mod, S1, S2) ->
S = Mod:subtract(S1, S2),
true = Mod:is_set(S),
true = Mod:size(S) =< Mod:size(S1),
S.
-union(S1, S2) ->
+union(Mod, Equal, S1, S2) ->
S = Mod:union(S1, S2),
true = Equal(S, Mod:union(S2, S1)),
true = Mod:is_set(S),
S.
-union(Ss) ->
+union(Mod, Equal, Ss) ->
S = Mod:union(Ss),
true = Equal(S, Mod:union(lists:reverse(Ss))),
S.
-is_subset(S, Set) ->
+is_subset(Mod, Equal, S, Set) ->
case Mod:is_subset(S, Set) of
false -> false;
true ->
@@ -115,10 +113,10 @@ is_subset(S, Set) ->
true
end.
-fold(F, A, S) ->
+fold(Mod, F, A, S) ->
true = Mod:is_set(S),
Mod:fold(F, A, S).
-filter(F, S) ->
+filter(Mod, F, S) ->
true = Mod:is_set(S),
Mod:filter(F, S).
diff --git a/lib/stdlib/test/stdlib.cover b/lib/stdlib/test/stdlib.cover
index 61f4f064b9..e71be880cb 100644
--- a/lib/stdlib/test/stdlib.cover
+++ b/lib/stdlib/test/stdlib.cover
@@ -1,17 +1,2 @@
%% -*- erlang -*-
{incl_app,stdlib,details}.
-
-{excl_mods,stdlib,
- [erl_parse,
- erl_eval,
- ets,
- filename,
- gen_event,
- gen_server,
- gen,
- lists,
- io,
- io_lib,
- io_lib_format,
- io_lib_pretty,
- proc_lib]}.
diff --git a/lib/test_server/doc/src/test_server.xml b/lib/test_server/doc/src/test_server.xml
index 5bfa42c36f..841cbfbe91 100644
--- a/lib/test_server/doc/src/test_server.xml
+++ b/lib/test_server/doc/src/test_server.xml
@@ -5,7 +5,7 @@
<header>
<copyright>
<year>2007</year>
- <year>2011</year>
+ <year>2012</year>
<holder>Ericsson AB, All Rights Reserved</holder>
</copyright>
<legalnotice>
@@ -529,6 +529,18 @@ Only valid for peer nodes. Note that slave nodes always
analogy with <c>os:getenv/1</c>), which removes the
environment variable. Only valid for peer nodes. Not
available on VxWorks.</item>
+ <tag><c>{start_cover, false}</c></tag>
+ <item>By default the test server will start cover on all nodes
+ when the test is run with code coverage analysis. To make
+ sure cover is not started on a new node, set this option to
+ <c>false</c>. This can be necessary if the connection to
+ the node at some point will be broken but the node is
+ expected to stay alive. The reason is that a remote cover
+ node can not continue to run without its main node. Another
+ solution would be to explicitly stop cover on the node
+ before breaking the connection, but in some situations (if
+ old code resides in one or more processes) this is not
+ possible.</item>
</taglist>
</desc>
</func>
diff --git a/lib/test_server/src/Makefile b/lib/test_server/src/Makefile
index bb0b4e55b8..20e7a5942c 100644
--- a/lib/test_server/src/Makefile
+++ b/lib/test_server/src/Makefile
@@ -40,6 +40,8 @@ RELSYSDIR = $(RELEASE_PATH)/lib/test_server-$(VSN)
# ----------------------------------------------------
MODULES= test_server_ctrl \
+ test_server_gl \
+ test_server_io \
test_server_node \
test_server \
test_server_sup \
diff --git a/lib/test_server/src/erl2html2.erl b/lib/test_server/src/erl2html2.erl
index 6891e87e48..9c459c05d4 100644
--- a/lib/test_server/src/erl2html2.erl
+++ b/lib/test_server/src/erl2html2.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -18,19 +18,9 @@
%%
%%%------------------------------------------------------------------
-%%% Purpose:Convert Erlang files to html. (Pretty faaast... :-)
+%%% Purpose:Convert Erlang files to html.
%%%------------------------------------------------------------------
-%--------------------------------------------------------------------
-% Some stats (Sparc5@110Mhz):
-% 4109 lines (erl_parse.erl): 3.00 secs
-% 1847 lines (application_controller.erl): 0.57 secs
-% 3160 lines (test_server.erl): 1.00 secs
-% 1199 lines (ts_estone.erl): 0.35 secs
-%
-% Avg: ~4.5e-4s/line, or ~0.45s/1000 lines, or ~2200 lines/sec.
-%--------------------------------------------------------------------
-
-module(erl2html2).
-export([convert/2, convert/3]).
@@ -52,134 +42,141 @@ convert(File, Dest) ->
"<body bgcolor=\"white\" text=\"black\""
" link=\"blue\" vlink=\"purple\" alink=\"red\">\n"],
convert(File, Dest, Header).
-
+
+
convert(File, Dest, Header) ->
- case file:read_file(File) of
- {ok, Bin} ->
- Code=binary_to_list(Bin),
- statistics(runtime),
- {Html1, Lines} = root(Code, [], 1),
- Html = [Header,
- "<pre>\n", Html1, "</pre>\n",
- footer(Lines),"</body>\n</html>\n"],
- file:write_file(Dest, Html);
- {error, Reason} ->
- {error, Reason}
+ %% statistics(runtime),
+ case parse_file(File) of
+ {ok,Functions} ->
+ %% {_, Time1} = statistics(runtime),
+ %% io:format("Parsed file in ~.2f Seconds.~n",[Time1/1000]),
+ case file:open(File,[raw,{read_ahead,10000}]) of
+ {ok,SFd} ->
+ case file:open(Dest,[write,raw]) of
+ {ok,DFd} ->
+ file:write(DFd,[Header,"<pre>\n"]),
+ Lines = build_html(SFd,DFd,Functions),
+ file:write(DFd,["</pre>\n",footer(),
+ "</body>\n</html>\n"]),
+ %% {_, Time2} = statistics(runtime),
+ %% io:format("Converted ~p lines in ~.2f Seconds.~n",
+ %% [Lines, Time2/1000]),
+ file:close(SFd),
+ file:close(DFd),
+ ok;
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
end.
-root([], Res, Line) ->
- {Res, Line};
-root([Char0|Code], Res, Line0) ->
- Char = [Char0],
- case Char of
- "-" ->
- {Match, Line1, NewCode0, AttName} =
- read_to_char(Line0+1, Code, [], [$(, $.]),
- {_, Line2, NewCode, Stuff} = read_to_char(Line1, NewCode0, [], $\n),
- NewRes = [Res,linenum(Line0),"-<b>",AttName,
- "</b>",Match, Stuff, "\n"],
- root(NewCode, NewRes, Line2);
- "%" ->
- {_, Line, NewCode, Stuff} = read_to_char(Line0+1, Code, [], $\n),
- NewRes = [Res,linenum(Line0),"<i>%",Stuff,"</i>\n"],
- root(NewCode, NewRes, Line);
- "\n" ->
- root(Code, [Res,linenum(Line0), "\n"], Line0+1);
- " " ->
- {_, Line, NewCode, Stuff} = read_to_char(Line0+1, Code, [], $\n),
- root(NewCode, [Res,linenum(Line0)," ",Stuff, "\n"],
- Line);
- "\t" ->
- {_, Line, NewCode, Stuff} = read_to_char(Line0+1, Code, [], $\n),
- root(NewCode, [Res,linenum(Line0),"\t",Stuff, "\n"],
- Line);
- [Chr|_] when Chr>96, Chr<123 ->
- %% Assumed to be function/clause start.
- %% FIXME: This will trivially generate non-unique anchors
- %% (one for each clause) --- which is illegal HTML.
- {_, Line1, NewCode0, FName0} = read_to_char(Line0+1, Code, [], $(),
- {_, Line2, NewCode, Stuff} =
- read_to_char(Line1,NewCode0, [], $\n),
- FuncName = [[Chr],FName0],
- NewRes=[Res,"<a name=",FuncName,">",
- linenum(Line0),"<b>",FuncName,"</b></a>",
- "(",Stuff, "\n"],
- root(NewCode, NewRes, Line2);
- Chr ->
- {_, Line, NewCode, Stuff} = read_to_char(Line0+1, Code, [], $\n),
- root(NewCode, [Res,linenum(Line0),Chr,Stuff, "\n"],
- Line)
+%%%-----------------------------------------------------------------
+%%% Parse the input file to get the line numbers for all function
+%%% definitions. This will be used when creating link targets for each
+%%% function in build_html/5.
+%%%
+%%% All function clauses are also marked in order to allow
+%%% possibly_enhance/2 to write these in bold.
+parse_file(File) ->
+ case epp:open(File, [], []) of
+ {ok,Epp} ->
+ Forms = parse_file(Epp,File,false),
+ epp:close(Epp),
+ {ok,Forms};
+ {error,E} ->
+ {error,E}
end.
-read_to_char(Line0, [], Res, _Chr) ->
- {nomatch, Line0, [], Res};
-read_to_char(Line0, [Char|Code], Res, Chr) ->
- case Char of
- Chr -> {Char, Line0, Code, Res};
- _ when is_list(Chr) ->
- case lists:member(Char,Chr) of
- true ->
- {Char, Line0, Code, Res};
- false ->
- {Line,NewCode,NewRes} = maybe_convert(Line0,Code,Res,Char),
- read_to_char(Line, NewCode, NewRes, Chr)
+
+parse_file(Epp,File,InCorrectFile) ->
+ case epp:parse_erl_form(Epp) of
+ {ok,Form} ->
+ case Form of
+ {attribute,_,file,{File,_}} ->
+ parse_file(Epp,File,true);
+ {attribute,_,file,{_OtherFile,_}} ->
+ parse_file(Epp,File,false);
+ {function,L,F,A,[_|C]} when InCorrectFile ->
+ Clauses = [{clause,CL} || {clause,CL,_,_,_} <- C],
+ [{atom_to_list(F),A,L} | Clauses] ++
+ parse_file(Epp,File,true);
+ _ ->
+ parse_file(Epp,File,InCorrectFile)
end;
- _ ->
- {Line,NewCode,NewRes} = maybe_convert(Line0,Code,Res,Char),
- read_to_char(Line,NewCode, NewRes, Chr)
+ {error,_E} ->
+ parse_file(Epp,File,InCorrectFile);
+ {eof,_Location} ->
+ []
end.
-maybe_convert(Line0,Code,Res,Chr) ->
- case Chr of
- %% Quoted stuff should not have the highlighting like normal code
- %% FIXME: unbalanced quotes (e.g. in comments) will cause trouble with
- %% highlighting and line numbering in the rest of the module.
- $" ->
- {_, Line1, NewCode, Stuff0} = read_to_char(Line0, Code, [], $"),
- {Line2,Stuff} = add_linenumbers(Line1,lists:flatten(Stuff0),[]),
- {Line2,NewCode,[Res,$",Stuff,$"]};
- %% These chars have meaning in HTML, and *must* *not* be
- %% written as themselves.
- $& ->
- {Line0, Code, [Res,"&amp;"]};
- $< ->
- {Line0, Code, [Res,"&lt;"]};
- $> ->
- {Line0, Code, [Res,"&gt;"]};
- %% Everything else is simply copied.
- OtherChr ->
- {Line0, Code, [Res,OtherChr]}
- end.
+%%%-----------------------------------------------------------------
+%%% Add a link target for each line and one for each function definition.
+build_html(SFd,DFd,Functions) ->
+ build_html(SFd,DFd,file:read_line(SFd),1,Functions,false).
-add_linenumbers(Line,[Chr|Chrs],Res) ->
- case Chr of
- $\n -> add_linenumbers(Line+1,Chrs,[Res,$\n,linenum(Line)]);
- _ -> add_linenumbers(Line,Chrs,[Res,Chr])
- end;
-add_linenumbers(Line,[],Res) ->
- {Line,Res}.
+build_html(SFd,DFd,{ok,Str},L,[{F,A,L}|Functions],_IsFuncDef) ->
+ FALink = http_uri:encode(F++"-"++integer_to_list(A)),
+ file:write(DFd,["<a name=\"",FALink,"\"/>"]),
+ build_html(SFd,DFd,{ok,Str},L,Functions,true);
+build_html(SFd,DFd,{ok,Str},L,[{clause,L}|Functions],_IsFuncDef) ->
+ build_html(SFd,DFd,{ok,Str},L,Functions,true);
+build_html(SFd,DFd,{ok,Str},L,Functions,IsFuncDef) ->
+ LStr = line_number(L),
+ Str1 = line(Str,IsFuncDef),
+ file:write(DFd,[LStr,Str1]),
+ build_html(SFd,DFd,file:read_line(SFd),L+1,Functions,false);
+build_html(_SFd,_DFd,eof,L,_Functions,_IsFuncDef) ->
+ L.
-%% Make nicely indented line numbers.
-linenum(Line) ->
- Num = integer_to_list(Line),
- A = case Line rem 10 of
- 0 -> "<a name=\"" ++ Num ++"\"></a>";
- _ -> []
- end,
+line_number(L) ->
+ LStr = integer_to_list(L),
Pred =
- case length(Num) of
+ case length(LStr) of
Length when Length < 5 ->
lists:duplicate(5-Length,$\s);
_ ->
[]
end,
- [A,Pred,integer_to_list(Line),":"].
+ ["<a name=\"",LStr,"\"/>",Pred,LStr,": "].
+
+line(Str,IsFuncDef) ->
+ Str1 = htmlize(Str),
+ possibly_enhance(Str1,IsFuncDef).
+
+%%%-----------------------------------------------------------------
+%%% Substitute special characters that should not appear in HTML
+htmlize([$<|Str]) ->
+ [$&,$l,$t,$;|htmlize(Str)];
+htmlize([$>|Str]) ->
+ [$&,$g,$t,$;|htmlize(Str)];
+htmlize([$&|Str]) ->
+ [$&,$a,$m,$p,$;|htmlize(Str)];
+htmlize([$"|Str]) ->
+ [$&,$q,$u,$o,$t,$;|htmlize(Str)];
+htmlize([Ch|Str]) ->
+ [Ch|htmlize(Str)];
+htmlize([]) ->
+ [].
+
+%%%-----------------------------------------------------------------
+%%% Write comments in italic and function definitions in bold.
+possibly_enhance(Str,true) ->
+ case lists:splitwith(fun($() -> false; (_) -> true end, Str) of
+ {_,[]} -> Str;
+ {F,A} -> ["<b>",F,"</b>",A]
+ end;
+possibly_enhance([$%|_]=Str,_) ->
+ ["<i>",Str--"\n","</i>","\n"];
+possibly_enhance([$-|_]=Str,_) ->
+ possibly_enhance(Str,true);
+possibly_enhance(Str,false) ->
+ Str.
-footer(_Lines) ->
+%%%-----------------------------------------------------------------
+%%% End of the file
+footer() ->
"".
-%% {_, Time} = statistics(runtime),
-%% io:format("Converted ~p lines in ~.2f Seconds.~n",
-%% [Lines, Time/1000]),
-%% S = "<i>The transformation of this file (~p lines) took ~.2f seconds</i>",
-%% F = lists:flatten(io_lib:format(S, [Lines, Time/1000])),
-%% ["<hr size=1>",F,"<br>\n"].
diff --git a/lib/test_server/src/test_server.app.src b/lib/test_server/src/test_server.app.src
index faf7db835e..26330f9695 100644
--- a/lib/test_server/src/test_server.app.src
+++ b/lib/test_server/src/test_server.app.src
@@ -24,6 +24,7 @@
test_server_ctrl,
test_server,
test_server_h,
+ test_server_io,
test_server_node,
test_server_sup
]},
diff --git a/lib/test_server/src/test_server.erl b/lib/test_server/src/test_server.erl
index bfa5e927b1..14cdfd391a 100644
--- a/lib/test_server/src/test_server.erl
+++ b/lib/test_server/src/test_server.erl
@@ -20,15 +20,12 @@
-define(DEFAULT_TIMETRAP_SECS, 60).
-%%% START %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
--export([start/1,start/2]).
-
%%% TEST_SERVER_CTRL INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-export([run_test_case_apply/1,init_target_info/0,init_purify/0]).
--export([cover_compile/1,cover_analyse/2]).
+-export([cover_compile/1,cover_analyse/3]).
%%% TEST_SERVER_SUP INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
--export([get_loc/1]).
+-export([get_loc/1,set_tc_state/1]).
%%% TEST SUITE INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-export([lookup_config/2]).
@@ -60,49 +57,11 @@
-export([]).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
--record(state,{controller,jobs=[]}).
-
-include("test_server_internal.hrl").
-include_lib("kernel/include/file.hrl").
-define(pl2a(M), test_server_sup:package_atom(M)).
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% **** START *** CODE FOR REMOTE TARGET ONLY ***
-%%
-%% test_server
-%% This process is started only if the test is to be run on a remote target
-%% The process is then started on target
-%% A socket connection is established with the test_server_ctrl process
-%% on host, and information about target is sent to host.
-start([ControllerHost]) when is_atom(ControllerHost) ->
- start(atom_to_list(ControllerHost));
-start(ControllerHost) when is_list(ControllerHost) ->
- start(ControllerHost,?MAIN_PORT).
-start(ControllerHost,ControllerPort) ->
- S = self(),
- Pid = spawn(fun() -> init(ControllerHost,ControllerPort,S) end),
- receive {Pid,started} -> {ok,Pid};
- {Pid,Error} -> Error
- end.
-
-init(Host,Port,Starter) ->
- global:register_name(?MODULE,self()),
- process_flag(trap_exit,true),
- test_server_sup:cleanup_crash_dumps(),
- case gen_tcp:connect(Host,Port, [binary,
- {reuseaddr,true},
- {packet,2}]) of
- {ok,MainSock} ->
- Starter ! {self(),started},
- request(MainSock,{target_info,init_target_info()}),
- loop(#state{controller={Host,MainSock}});
- Error ->
- Starter ! {self(),{error,
- {could_not_contact_controller,Error}}}
- end.
-
init_target_info() ->
[$.|Emu] = code:objfile_extension(),
{_, OTPRel} = init:script_id(),
@@ -118,171 +77,10 @@ init_target_info() ->
username=test_server_sup:get_username(),
cookie=atom_to_list(erlang:get_cookie())}.
-
-loop(#state{controller={_,MainSock}} = State) ->
- receive
- {tcp, MainSock, <<1,Request/binary>>} ->
- State1 = decode_main(binary_to_term(Request),State),
- loop(State1);
- {tcp_closed, MainSock} ->
- gen_tcp:close(MainSock),
- halt();
- {'EXIT',Pid,Reason} ->
- case lists:keysearch(Pid,1,State#state.jobs) of
- {value,{Pid,Name}} ->
- case Reason of
- normal -> ignore;
- _other -> request(MainSock,{job_proc_killed,Name,Reason})
- end,
- NewJobs = lists:keydelete(Pid,1,State#state.jobs),
- loop(State#state{jobs = NewJobs});
- false ->
- loop(State)
- end
- end.
-
-%% Decode request on main socket
-decode_main({job,Port,Name},#state{controller={Host,_},jobs=Jobs}=State) ->
- S = self(),
- NewJob = spawn_link(fun() -> job(Host,Port,S) end),
- receive {NewJob,started} -> State#state{jobs=[{NewJob,Name}|Jobs]};
- {NewJob,_Error} -> State
- end.
-
init_purify() ->
purify_new_leaks().
-%% Temporary job process on target
-%% This process will live while all test cases in the job are executed.
-%% A socket connection is established with the job process on host.
-job(Host,Port,Starter) ->
- process_flag(trap_exit,true),
- init_purify(),
- case gen_tcp:connect(Host,Port, [binary,
- {reuseaddr,true},
- {packet,4},
- {active,false}]) of
- {ok,JobSock} ->
- Starter ! {self(),started},
- job(JobSock);
- Error ->
- Starter ! {self(),{error,
- {could_not_contact_controller,Error}}}
- end.
-
-job(JobSock) ->
- JobDir = get_jobdir(),
- ok = file:make_dir(JobDir),
- ok = file:make_dir(filename:join(JobDir,?priv_dir)),
- put(test_server_job_sock,JobSock),
- put(test_server_job_dir,JobDir),
- {ok,Cwd} = file:get_cwd(),
- job_loop(JobSock),
- ok = file:set_cwd(Cwd),
- send_privdir(JobDir,JobSock), % also recursively removes jobdir
- ok.
-
-
-get_jobdir() ->
- Now = now(),
- {{Y,M,D},{H,Mi,S}} = calendar:now_to_local_time(Now),
- Basename = io_lib:format("~w-~2.2.0w-~2.2.0w_~2.2.0w.~2.2.0w.~2.2.0w_~w",
- [Y,M,D,H,Mi,S,element(3,Now)]),
- %% if target has a file master, don't use prim_file to look up cwd
- case lists:keymember(master,1,init:get_arguments()) of
- true ->
- {ok,Cwd} = file:get_cwd(),
- Cwd ++ "/" ++ Basename;
- false ->
- filename:absname(Basename)
- end.
-
-send_privdir(JobDir,JobSock) ->
- LocalPrivDir = filename:join(JobDir,?priv_dir),
- case file:list_dir(LocalPrivDir) of
- {ok,List} when List/=[] ->
- Tarfile0 = ?priv_dir ++ ".tar.gz",
- Tarfile = filename:join(JobDir,Tarfile0),
- {ok,Tar} = erl_tar:open(Tarfile,[write,compressed,cooked]),
- ok = erl_tar:add(Tar,LocalPrivDir,?priv_dir,[]),
- ok = erl_tar:close(Tar),
- {ok,TarBin} = file:read_file(Tarfile),
- file:delete(Tarfile),
- ok = del_dir(JobDir),
- request(JobSock,{{privdir,Tarfile0},TarBin});
- _ ->
- ok = del_dir(JobDir),
- request(JobSock,{privdir,empty_priv_dir})
- end.
-
-del_dir(Dir) ->
- case file:read_file_info(Dir) of
- {ok,#file_info{type=directory}} ->
- {ok,Cont} = file:list_dir(Dir),
- lists:foreach(fun(F) -> del_dir(filename:join(Dir,F)) end, Cont),
- ok = file:del_dir(Dir);
- {ok,#file_info{}} ->
- ok = file:delete(Dir);
- _r ->
- %% This might be a symlink - let's try to delete it!
- catch file:delete(Dir),
- ok
- end.
-
-%%
-%% Receive and decode request on job socket
-%%
-job_loop(JobSock) ->
- Request = recv(JobSock),
- case decode_job(Request) of
- ok -> job_loop(JobSock);
- {stop,R} -> R
- end.
-
-decode_job({{beam,Mod,Which},Beam}) ->
- % FIXME, shared directory structure on host and target required,
- % "Library beams" are not loaded from HOST... /Patrik
- code:add_patha(filename:dirname(Which)),
- % End of Patriks uglyness...
- {module,Mod} = code:load_binary(Mod,Which,Beam),
- ok;
-decode_job({{datadir,Tarfile0},Archive}) ->
- JobDir = get(test_server_job_dir),
- Tarfile = filename:join(JobDir,Tarfile0),
- ok = file:write_file(Tarfile,Archive),
- % Cooked is temporary removed/broken
- % ok = erl_tar:extract(Tarfile,[compressed,{cwd,JobDir},cooked]),
- ok = erl_tar:extract(Tarfile,[compressed,{cwd,JobDir}]),
- ok = file:delete(Tarfile),
- ok;
-decode_job({test_case,Case}) ->
- Result = run_test_case_apply(Case),
- JobSock = get(test_server_job_sock),
- request(JobSock,{test_case_result,Result}),
- case test_server_sup:tar_crash_dumps() of
- {error,no_crash_dumps} -> request(JobSock,{crash_dumps,no_crash_dumps});
- {ok,TarFile} ->
- {ok,TarBin} = file:read_file(TarFile),
- file:delete(TarFile),
- request(JobSock,{{crash_dumps,filename:basename(TarFile)},TarBin})
- end,
- ok;
-decode_job({sync_apply,{M,F,A}}) ->
- R = apply(M,F,A),
- request(get(test_server_job_sock),{sync_result,R}),
- ok;
-decode_job(job_done) ->
- {stop,stopped}.
-
-%%
-%% **** STOP *** CODE FOR REMOTE TARGET ONLY ***
-%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-
-
-
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% cover_compile({App,Include,Exclude,Cross}) ->
%% {ok,AnalyseModules} | {error,Reason}
@@ -377,9 +175,7 @@ module_names(Beams) ->
do_cover_compile(Modules) ->
do_cover_compile1(lists:usort(Modules)). % remove duplicates
-do_cover_compile1([Dont|Rest]) when Dont=:=cover;
- Dont=:=test_server;
- Dont=:=test_server_ctrl ->
+do_cover_compile1([Dont|Rest]) when Dont=:=cover ->
do_cover_compile1(Rest);
do_cover_compile1([M|Rest]) ->
case {code:is_sticky(M),code:is_loaded(M)} of
@@ -416,7 +212,7 @@ do_cover_compile1([]) ->
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% cover_analyse(Analyse,Modules) -> [{M,{Cov,NotCov,Details}}]
+%% cover_analyse(Analyse,Modules,Stop) -> [{M,{Cov,NotCov,Details}}]
%%
%% Analyse = {details,Dir} | details | {overview,void()} | overview
%% Modules = [atom()], the modules to analyse
@@ -432,8 +228,19 @@ do_cover_compile1([]) ->
%%
%% Also, if a Dir exists, cover data will be exported to a file called
%% all.coverdata in that directory.
-cover_analyse(Analyse,Modules) ->
- io:fwrite("Cover analysing...\n",[]),
+%%
+%% Finally, if Stop==true, then cover will be stopped after the
+%% analysis is completed. Stopping cover causes the original (non
+%% cover compiled) modules to be loaded back in. If a process at this
+%% point is still running old code of any of the cover compiled
+%% modules, meaning that is has not done any fully qualified function
+%% call after the cover compilation, the process will now be
+%% killed. To avoid this scenario, it is possible to set Stop=false,
+%% which means that the modules will stay cover compiled. Note that
+%% this is only recommended if the erlang node is being terminated
+%% after the test is completed.
+cover_analyse(Analyse,Modules,Stop) ->
+ print(stdout, "Cover analysing...\n", []),
DetailsFun =
case Analyse of
{details,Dir} ->
@@ -483,9 +290,15 @@ cover_analyse(Analyse,Modules) ->
{M,Err}
end
end, Modules),
- Sticky = unstick_all_sticky(node()),
- cover:stop(),
- stick_all_sticky(node(),Sticky),
+
+ case Stop of
+ true ->
+ Sticky = unstick_all_sticky(node()),
+ cover:stop(),
+ stick_all_sticky(node(),Sticky);
+ false ->
+ ok
+ end,
R.
pmap(Fun,List) ->
@@ -502,7 +315,20 @@ pmap(Fun,List) ->
end
end, Pids).
+
+do_cover_for_node(Node,CoverFunc) ->
+ %% In case a slave node is starting another slave node! I.e. this
+ %% function is executed on a slave node - then the cover function
+ %% must be executed on the master node. This is for instance the
+ %% case in test_server's own tests.
+ MainCoverNode = cover:get_main_node(),
+ Sticky = unstick_all_sticky(MainCoverNode,Node),
+ rpc:call(MainCoverNode,cover,CoverFunc,[Node]),
+ stick_all_sticky(Node,Sticky).
+
unstick_all_sticky(Node) ->
+ unstick_all_sticky(node(),Node).
+unstick_all_sticky(MainCoverNode,Node) ->
lists:filter(
fun(M) ->
case code:is_sticky(M) of
@@ -513,7 +339,7 @@ unstick_all_sticky(Node) ->
false
end
end,
- cover:modules()).
+ rpc:call(MainCoverNode,cover,modules,[])).
stick_all_sticky(Node,Sticky) ->
lists:foreach(
@@ -524,7 +350,7 @@ stick_all_sticky(Node,Sticky) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% run_test_case_apply(Mod,Func,Args,Name,RunInit,TimetrapData,RejectIoReqs) ->
+%% run_test_case_apply(Mod,Func,Args,Name,RunInit,TimetrapData) ->
%% {Time,Value,Loc,Opts,Comment} | {died,Reason,unknown,Comment}
%%
%% Time = float() (seconds)
@@ -538,7 +364,6 @@ stick_all_sticky(Node,Sticky) ->
%% it possible to capture all it's output from io:format/2, etc.
%%
%% The job process then sits down and waits for news from the case process.
-%% This might be io requests (which are redirected to the log files).
%%
%% Returns a tuple with the time spent (in seconds) in the test case,
%% the return value from the test case or an {'EXIT',Reason} if the case
@@ -559,12 +384,9 @@ stick_all_sticky(Node,Sticky) ->
%% ScaleTimetrap indicates if test_server should attemp to automatically
%% compensate timetraps for runtime delays introduced by e.g. tools like
%% cover.
-%%
-%% RejectIoReqs (bool) is information about whether printouts to stdout
-%% should be visible in the minor log file or not.
run_test_case_apply({CaseNum,Mod,Func,Args,Name,
- RunInit,TimetrapData,RejectIoReqs}) ->
+ RunInit,TimetrapData}) ->
purify_format("Test case #~w ~w:~w/1", [CaseNum, Mod, Func]),
case os:getenv("TS_RUN_VALGRIND") of
false ->
@@ -576,40 +398,29 @@ run_test_case_apply({CaseNum,Mod,Func,Args,Name,
test_server_h:testcase({Mod,Func,1}),
ProcBef = erlang:system_info(process_count),
Result = run_test_case_apply(Mod, Func, Args, Name, RunInit,
- TimetrapData, RejectIoReqs),
+ TimetrapData),
ProcAft = erlang:system_info(process_count),
purify_new_leaks(),
DetFail = get(test_server_detected_fail),
{Result,DetFail,ProcBef,ProcAft}.
-run_test_case_apply(Mod, Func, Args, Name, RunInit, TimetrapData, RejectIoReqs) ->
- case get(test_server_job_dir) of
- undefined ->
- %% i'm a local target
- do_run_test_case_apply(Mod, Func, Args, Name, RunInit,
- TimetrapData, RejectIoReqs);
- JobDir ->
- %% i'm a remote target
- case Args of
- [Config] when is_list(Config) ->
- {value,{data_dir,HostDataDir}} =
- lists:keysearch(data_dir, 1, Config),
- DataBase = filename:basename(HostDataDir),
- TargetDataDir = filename:join(JobDir, DataBase),
- Config1 = lists:keyreplace(data_dir, 1, Config,
- {data_dir,TargetDataDir}),
- TargetPrivDir = filename:join(JobDir, ?priv_dir),
- Config2 = lists:keyreplace(priv_dir, 1, Config1,
- {priv_dir,TargetPrivDir}),
- do_run_test_case_apply(Mod, Func, [Config2], Name, RunInit,
- TimetrapData, RejectIoReqs);
- _other ->
- do_run_test_case_apply(Mod, Func, Args, Name, RunInit,
- TimetrapData, RejectIoReqs)
- end
- end.
-do_run_test_case_apply(Mod, Func, Args, Name, RunInit,
- TimetrapData, RejectIoReqs) ->
+-type tc_status() :: 'starting' | 'running' | 'init_per_testcase' |
+ 'end_per_testcase' | {'framework',atom(),atom()} |
+ 'tc'.
+-record(st,
+ {
+ ref :: reference(),
+ pid :: pid(),
+ mf :: {atom(),atom()},
+ status :: tc_status() | 'undefined',
+ ret_val :: term(),
+ comment :: list(char()),
+ timeout :: non_neg_integer() | 'infinity',
+ config :: list() | 'undefined',
+ end_conf_pid :: pid() | 'undefined'
+ }).
+
+run_test_case_apply(Mod, Func, Args, Name, RunInit, TimetrapData) ->
{ok,Cwd} = file:get_cwd(),
Args2Print = case Args of
[Args1] when is_list(Args1) ->
@@ -624,9 +435,6 @@ do_run_test_case_apply(Mod, Func, Args, Name, RunInit,
TCCallback = get(test_server_testcase_callback),
LogOpts = get(test_server_logopts),
Ref = make_ref(),
- OldGLeader = group_leader(),
- %% Set ourself to group leader for the spawned process
- group_leader(self(),self()),
Pid =
spawn_link(
fun() ->
@@ -634,10 +442,10 @@ do_run_test_case_apply(Mod, Func, Args, Name, RunInit,
RunInit, TimetrapData,
LogOpts, TCCallback)
end),
- group_leader(OldGLeader, self()),
put(test_server_detected_fail, []),
- run_test_case_msgloop(Ref, Pid, false, RejectIoReqs, false, "",
- undefined, starting).
+ St = #st{ref=Ref,pid=Pid,mf={Mod,Func},status=starting,ret_val=[],
+ comment="",timeout=infinity,config=hd(Args)},
+ run_test_case_msgloop(St).
%% Ugly bug (pre R5A):
%% If this process (group leader of the test case) terminates before
@@ -648,32 +456,23 @@ do_run_test_case_apply(Mod, Func, Args, Name, RunInit,
%% A test case is known to have failed if it returns {'EXIT', _} tuple,
%% or sends a message {failed, File, Line} to it's group_leader
%%
-run_test_case_msgloop(Ref, Pid, CaptureStdout, RejectIoReqs, Terminate,
- Comment, CurrConf, Status) ->
- %% NOTE: Keep job_proxy_msgloop/0 up to date when changes
- %% are made in this function!
- {Timeout,ReturnValue} =
- case Terminate of
- {true, ReturnVal} ->
- %% stop any timetrap timers for the test case
- %% that have been started by this process
- timetrap_cancel_all(Pid, false),
- {20, ReturnVal};
- false ->
- {infinity, should_never_appear}
- end,
+run_test_case_msgloop(#st{ref=Ref,pid=Pid,end_conf_pid=EndConfPid0}=St0) ->
receive
- {test_case_initialized,Pid} ->
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,Terminate,
- Comment,CurrConf,running);
- Abort = {abort_current_testcase,_,_} when Status == starting ->
+ {set_tc_state=Tag,From,{Status,Config0}} ->
+ Config = case Config0 of
+ unknown -> St0#st.config;
+ _ -> Config0
+ end,
+ St = St0#st{status=Status,config=Config},
+ From ! {self(),Tag,ok},
+ run_test_case_msgloop(St);
+ {abort_current_testcase,_,_}=Abort when St0#st.status =:= starting ->
%% we're in init phase, must must postpone this operation
%% until test case execution is in progress (or FW:init_tc
%% gets killed)
self() ! Abort,
erlang:yield(),
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,Terminate,
- Comment,CurrConf,Status);
+ run_test_case_msgloop(St0);
{abort_current_testcase,Reason,From} ->
Line = case is_process_alive(Pid) of
true -> get_loc(Pid);
@@ -683,142 +482,49 @@ run_test_case_msgloop(Ref, Pid, CaptureStdout, RejectIoReqs, Terminate,
exit(Pid,{testcase_aborted,Reason,Line}),
erlang:yield(),
From ! {self(),abort_current_testcase,ok},
- NewComment =
- receive
- {'DOWN', Mon, process, Pid, _} ->
- Comment
- after 10000 ->
- %% Pid is probably trapping exits, hit it harder...
- exit(Pid, kill),
- %% here's the only place we know Reason, so we save
- %% it as a comment, potentially replacing user data
- Error = lists:flatten(io_lib:format("Aborted: ~p",
- [Reason])),
- Error1 = lists:flatten([string:strip(S,left) ||
+ St = receive
+ {'DOWN', Mon, process, Pid, _} ->
+ St0
+ after 10000 ->
+ %% Pid is probably trapping exits, hit it harder...
+ exit(Pid, kill),
+ %% here's the only place we know Reason, so we save
+ %% it as a comment, potentially replacing user data
+ Error = lists:flatten(io_lib:format("Aborted: ~p",
+ [Reason])),
+ Error1 = lists:flatten([string:strip(S,left) ||
S <- string:tokens(Error,
[$\n])]),
- if length(Error1) > 63 ->
- string:substr(Error1,1,60) ++ "...";
- true ->
- Error1
- end
- end,
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,Terminate,
- NewComment,CurrConf,Status);
- {permit_io,FromPid} ->
- put({permit_io,FromPid},true),
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,Terminate,
- Comment,CurrConf,Status);
- {io_request,From,ReplyAs,{put_chars,io_lib,Func,[Format,Args]}}
- when is_list(Format) ->
- Msg = (catch io_lib:Func(Format,Args)),
- run_test_case_msgloop_io(From,ReplyAs,CaptureStdout,RejectIoReqs,
- Msg,From,Func),
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,Terminate,
- Comment,CurrConf,Status);
- {io_request,From,ReplyAs,{put_chars,io_lib,Func,[Format,Args]}}
- when is_atom(Format) ->
- Msg = (catch io_lib:Func(Format,Args)),
- run_test_case_msgloop_io(From,ReplyAs,CaptureStdout,RejectIoReqs,
- Msg,From,Func),
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,Terminate,
- Comment,CurrConf,Status);
- {io_request,From,ReplyAs,{put_chars,Bytes}} ->
- run_test_case_msgloop_io(From,ReplyAs,CaptureStdout,RejectIoReqs,
- Bytes,From,put_chars),
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,Terminate,
- Comment,CurrConf,Status);
- {io_request,From,ReplyAs,{put_chars,unicode,io_lib,Func,[Format,Args]}}
- when is_list(Format) ->
- Msg = unicode_to_latin1(catch io_lib:Func(Format,Args)),
- run_test_case_msgloop_io(From,ReplyAs,CaptureStdout,RejectIoReqs,
- Msg,From,Func),
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,Terminate,
- Comment,CurrConf,Status);
- {io_request,From,ReplyAs,{put_chars,latin1,io_lib,Func,[Format,Args]}}
- when is_list(Format) ->
- Msg = (catch io_lib:Func(Format,Args)),
- run_test_case_msgloop_io(From,ReplyAs,CaptureStdout,RejectIoReqs,
- Msg,From,Func),
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,Terminate,
- Comment,CurrConf,Status);
- {io_request,From,ReplyAs,{put_chars,unicode,io_lib,Func,[Format,Args]}}
- when is_atom(Format) ->
- Msg = unicode_to_latin1(catch io_lib:Func(Format,Args)),
- run_test_case_msgloop_io(From,ReplyAs,CaptureStdout,RejectIoReqs,
- Msg,From,Func),
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,Terminate,
- Comment,CurrConf,Status);
- {io_request,From,ReplyAs,{put_chars,latin1,io_lib,Func,[Format,Args]}}
- when is_atom(Format) ->
- Msg = (catch io_lib:Func(Format,Args)),
- run_test_case_msgloop_io(From,ReplyAs,CaptureStdout,RejectIoReqs,
- Msg,From,Func),
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,Terminate,
- Comment,CurrConf,Status);
- {io_request,From,ReplyAs,{put_chars,unicode,Bytes}} ->
- run_test_case_msgloop_io(From,ReplyAs,CaptureStdout,RejectIoReqs,
- unicode_to_latin1(Bytes),From,put_chars),
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,Terminate,
- Comment,CurrConf,Status);
- {io_request,From,ReplyAs,{put_chars,latin1,Bytes}} ->
- run_test_case_msgloop_io(From,ReplyAs,CaptureStdout,RejectIoReqs,
- Bytes,From,put_chars),
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,Terminate,
- Comment,CurrConf,Status);
- IoReq when element(1, IoReq) == io_request ->
- %% something else, just pass it on
- group_leader() ! IoReq,
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,Terminate,
- Comment,CurrConf,Status);
- {structured_io,ClientPid,Msg} ->
- output(Msg, ClientPid),
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,Terminate,
- Comment,CurrConf,Status);
- {capture,NewCapture} ->
- run_test_case_msgloop(Ref,Pid,NewCapture,RejectIoReqs,Terminate,
- Comment,CurrConf,Status);
+ Comment = if length(Error1) > 63 ->
+ string:substr(Error1,1,60) ++ "...";
+ true ->
+ Error1
+ end,
+ St0#st{comment=Comment}
+ end,
+ run_test_case_msgloop(St);
{sync_apply,From,MFA} ->
sync_local_or_remote_apply(false,From,MFA),
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,Terminate,
- Comment,CurrConf,Status);
+ run_test_case_msgloop(St0);
{sync_apply_proxy,Proxy,From,MFA} ->
sync_local_or_remote_apply(Proxy,From,MFA),
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,Terminate,
- Comment,CurrConf,Status);
- {printout,Detail,Format,Args} ->
- print(Detail,Format,Args),
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,Terminate,
- Comment,CurrConf,Status);
- {comment,NewComment} ->
- NewComment1 = test_server_ctrl:to_string(NewComment),
- NewComment2 = test_server_sup:framework_call(format_comment,
- [NewComment1],
- NewComment1),
- Terminate1 =
- case Terminate of
- {true,{Time,Value,Loc,Opts,_OldComment}} ->
- {true,{Time,Value,mod_loc(Loc),Opts,NewComment2}};
- Other ->
- Other
- end,
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,Terminate1,
- NewComment2,CurrConf,Status);
+ run_test_case_msgloop(St0);
+ {comment,NewComment0} ->
+ NewComment1 = test_server_ctrl:to_string(NewComment0),
+ NewComment = test_server_sup:framework_call(format_comment,
+ [NewComment1],
+ NewComment1),
+ run_test_case_msgloop(St0#st{comment=NewComment});
{read_comment,From} ->
- From ! {self(),read_comment,Comment},
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,Terminate,
- Comment,CurrConf,Status);
- {set_curr_conf,From,NewCurrConf} ->
- From ! {self(),set_curr_conf,ok},
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,Terminate,
- Comment,NewCurrConf,Status);
- {make_priv_dir,From} when CurrConf == undefined ->
- From ! {self(),make_priv_dir,{error,no_priv_dir_in_config}},
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,Terminate,
- Comment,CurrConf,Status);
+ From ! {self(),read_comment,St0#st.comment},
+ run_test_case_msgloop(St0);
{make_priv_dir,From} ->
+ Config = case St0#st.config of
+ undefined -> [];
+ Config0 -> Config0
+ end,
Result =
- case proplists:get_value(priv_dir, element(2, CurrConf)) of
+ case proplists:get_value(priv_dir, Config) of
undefined ->
{error,no_priv_dir_in_config};
PrivDir ->
@@ -832,212 +538,63 @@ run_test_case_msgloop(Ref, Pid, CaptureStdout, RejectIoReqs, Terminate,
end
end,
From ! {self(),make_priv_dir,Result},
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,Terminate,
- Comment,CurrConf,Status);
+ run_test_case_msgloop(St0);
{'EXIT',Pid,{Ref,Time,Value,Loc,Opts}} ->
- RetVal = {Time/1000000,Value,mod_loc(Loc),Opts,Comment},
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,
- {true,RetVal},Comment,undefined,Status);
+ RetVal = {Time/1000000,Value,Loc,Opts},
+ St = setup_termination(RetVal, St0#st{config=undefined}),
+ run_test_case_msgloop(St);
{'EXIT',Pid,Reason} ->
- case Reason of
- {timetrap_timeout,TVal,Loc} ->
- %% convert Loc to form that can be formatted
- case mod_loc(Loc) of
- {FwMod,FwFunc,framework} ->
- %% timout during framework call
- spawn_fw_call(FwMod,FwFunc,CurrConf,Pid,
- {framework_error,{timetrap,TVal}},
- unknown,self()),
- run_test_case_msgloop(Ref,Pid,
- CaptureStdout,RejectIoReqs,
- Terminate,Comment,
- undefined,Status);
- Loc1 ->
- %% call end_per_testcase on a separate process,
- %% only so that the user has a chance to
- %% clean up after init_per_testcase, even after
- %% a timetrap timeout
- NewCurrConf =
- case CurrConf of
- {{Mod,Func},Conf} ->
- EndConfPid =
- call_end_conf(
- Mod,Func,Pid,
- {timetrap_timeout,TVal},
- Loc1,[{tc_status,
- {failed,
- timetrap_timeout}}|Conf],
- TVal),
- {EndConfPid,{Mod,Func},Conf};
- _ ->
- {Mod,Func} = get_mf(Loc1),
- %% The framework functions mustn't
- %% execute on this group leader process
- %% or io will cause deadlock, so we
- %% spawn a dedicated process for the
- %% operation and let the group leader
- %% go back to handle io.
- spawn_fw_call(Mod,Func,CurrConf,Pid,
- {timetrap_timeout,TVal},
- Loc1,self()),
- undefined
- end,
- run_test_case_msgloop(Ref,Pid,
- CaptureStdout,RejectIoReqs,
- Terminate,Comment,
- NewCurrConf,Status)
- end;
- {timetrap_timeout,TVal,Loc,InitOrEnd} ->
- case mod_loc(Loc) of
- {FwMod,FwFunc,framework} ->
- %% timout during framework call
- spawn_fw_call(FwMod,FwFunc,CurrConf,Pid,
- {framework_error,{timetrap,TVal}},
- unknown,self());
- Loc1 ->
- {Mod,_Func} = get_mf(Loc1),
- spawn_fw_call(Mod,InitOrEnd,CurrConf,Pid,
- {timetrap_timeout,TVal},
- Loc1,self())
- end,
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,
- Terminate,Comment,CurrConf,Status);
- {testcase_aborted,ErrorMsg={user_timetrap_error,_},AbortLoc} ->
- %% user timetrap function caused exit
- %% during start of test case
- {Mod,Func} = get_mf(mod_loc(AbortLoc)),
- spawn_fw_call(Mod,Func,CurrConf,Pid,
- ErrorMsg,unknown,self()),
- run_test_case_msgloop(Ref,Pid,
- CaptureStdout,RejectIoReqs,
- Terminate,Comment,
- undefined,Status);
- {testcase_aborted,AbortReason,AbortLoc} ->
- ErrorMsg = {testcase_aborted,AbortReason},
- case mod_loc(AbortLoc) of
- {FwMod,FwFunc,framework} ->
- %% abort during framework call
- spawn_fw_call(FwMod,FwFunc,CurrConf,Pid,
- {framework_error,ErrorMsg},
- unknown,self()),
- run_test_case_msgloop(Ref,Pid,
- CaptureStdout,RejectIoReqs,
- Terminate,Comment,
- undefined,Status);
- Loc1 ->
- %% call end_per_testcase on a separate process,
- %% only so that the user has a chance to clean up
- %% after init_per_testcase, even after abortion
- NewCurrConf =
- case CurrConf of
- {{Mod,Func},Conf} ->
- TVal =
- case lists:keysearch(default_timeout,
- 1,
- Conf) of
- {value,{default_timeout,Tmo}} ->
- Tmo;
- _ ->
- ?DEFAULT_TIMETRAP_SECS*1000
- end,
- EndConfPid =
- call_end_conf(
- Mod,Func,Pid,
- ErrorMsg,Loc1,
- [{tc_status,
- {failed,ErrorMsg}}|Conf],TVal),
- {EndConfPid,{Mod,Func},Conf};
- _ ->
- {Mod,Func} = get_mf(Loc1),
- spawn_fw_call(Mod,Func,CurrConf,Pid,
- ErrorMsg,Loc1,self()),
- undefined
- end,
- run_test_case_msgloop(Ref,Pid,
- CaptureStdout,RejectIoReqs,
- Terminate,Comment,
- NewCurrConf,Status)
- end;
- killed ->
- %% result of an exit(TestCase,kill) call, which is the
- %% only way to abort a testcase process that traps exits
- %% (see abort_current_testcase)
- {Mod,Func} = case CurrConf of
- {MF,_} -> MF;
- _ -> {undefined,undefined}
- end,
- spawn_fw_call(Mod,Func,CurrConf,Pid,
- testcase_aborted_or_killed,
- unknown,self()),
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,
- Terminate,Comment,CurrConf,Status);
- {fw_error,{FwMod,FwFunc,FwError}} ->
- spawn_fw_call(FwMod,FwFunc,CurrConf,Pid,
- {framework_error,FwError},
- unknown,self()),
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,
- Terminate,Comment,CurrConf,Status);
- _Other ->
- %% the testcase has terminated because of Reason (e.g. an exit
- %% because a linked process failed)
- {Mod,Func} = case CurrConf of
- {MF,_} -> MF;
- _ -> {undefined,undefined}
- end,
- spawn_fw_call(Mod,Func,CurrConf,Pid,
- Reason,unknown,self()),
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,
- Terminate,Comment,CurrConf,Status)
- end;
- {EndConfPid,{call_end_conf,Data,_Result}} ->
+ St = handle_tc_exit(Reason, St0),
+ run_test_case_msgloop(St);
+ {EndConfPid0,{call_end_conf,Data,_Result}} ->
+ #st{mf={Mod,Func},config=CurrConf} = St0,
case CurrConf of
- {EndConfPid,{Mod,Func},_Conf} ->
+ _ when is_list(CurrConf) ->
{_Mod,_Func,TCPid,TCExitReason,Loc} = Data,
spawn_fw_call(Mod,Func,CurrConf,TCPid,
TCExitReason,Loc,self()),
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,
- Terminate,Comment,undefined,Status);
+ St = St0#st{config=undefined,end_conf_pid=undefined},
+ run_test_case_msgloop(St);
_ ->
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,
- Terminate,Comment,CurrConf,Status)
+ run_test_case_msgloop(St0)
end;
{_FwCallPid,fw_notify_done,{T,Value,Loc,Opts,AddToComment}} ->
%% the framework has been notified, we're finished
- RetVal =
- case AddToComment of
- undefined ->
- {T,Value,Loc,Opts,Comment};
- _ ->
- Comment1 =
- if Comment == "" ->
- AddToComment;
- true ->
- Comment ++
- test_server_ctrl:xhtml("<br>",
- "<br />") ++
- AddToComment
- end,
- {T,Value,Loc,Opts,Comment1}
- end,
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,
- {true,RetVal},Comment,undefined,Status);
+ RetVal = {T,Value,Loc,Opts},
+ Comment0 = St0#st.comment,
+ Comment = case AddToComment of
+ undefined ->
+ Comment0;
+ _ ->
+ if Comment0 =:= "" ->
+ AddToComment;
+ true ->
+ Comment0 ++
+ test_server_ctrl:xhtml("<br>",
+ "<br />") ++
+ AddToComment
+ end
+ end,
+ St = setup_termination(RetVal, St0#st{comment=Comment,
+ config=undefined}),
+ run_test_case_msgloop(St);
{'EXIT',_FwCallPid,{fw_notify_done,Func,Error}} ->
%% a framework function failed
CB = os:getenv("TEST_SERVER_FRAMEWORK"),
Loc = case CB of
FW when FW =:= false; FW =:= "undefined" ->
- {test_server,Func};
+ [{test_server,Func}];
_ ->
- {list_to_atom(CB),Func}
+ [{list_to_atom(CB),Func}]
end,
- RetVal = {died,{framework_error,Loc,Error},Loc,"Framework error"},
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,
- {true,RetVal},Comment,undefined,Status);
+ RetVal = {died,{framework_error,Loc,Error},Loc},
+ St = setup_termination(RetVal, St0#st{comment="Framework error",
+ config=undefined}),
+ run_test_case_msgloop(St);
{failed,File,Line} ->
put(test_server_detected_fail,
[{File, Line}| get(test_server_detected_fail)]),
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,
- Terminate,Comment,CurrConf,Status);
+ run_test_case_msgloop(St0);
{user_timetrap,Pid,_TrapTime,StartTime,E={user_timetrap_error,_},_} ->
case update_user_timetraps(Pid, StartTime) of
@@ -1046,8 +603,7 @@ run_test_case_msgloop(Ref, Pid, CaptureStdout, RejectIoReqs, Terminate,
ignore ->
ok
end,
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,
- Terminate,Comment,CurrConf,Status);
+ run_test_case_msgloop(St0);
{user_timetrap,Pid,TrapTime,StartTime,ElapsedTime,Scale} ->
%% a user timetrap is triggered, ignore it if new
%% timetrap has been started since
@@ -1062,71 +618,117 @@ run_test_case_msgloop(Ref, Pid, CaptureStdout, RejectIoReqs, Terminate,
ignore ->
ok
end,
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,
- Terminate,Comment,CurrConf,Status);
+ run_test_case_msgloop(St0);
{timetrap_cancel_one,Handle,_From} ->
timetrap_cancel_one(Handle, false),
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,
- Terminate,Comment,CurrConf,Status);
+ run_test_case_msgloop(St0);
{timetrap_cancel_all,TCPid,_From} ->
timetrap_cancel_all(TCPid, false),
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,
- Terminate,Comment,CurrConf,Status);
- {get_timetrap_info,TCPid,From} ->
+ run_test_case_msgloop(St0);
+ {get_timetrap_info,From,TCPid} ->
Info = get_timetrap_info(TCPid, false),
From ! {self(),get_timetrap_info,Info},
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,
- Terminate,Comment,CurrConf,Status);
+ run_test_case_msgloop(St0);
_Other when not is_tuple(_Other) ->
%% ignore anything not generated by test server
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,
- Terminate,Comment,CurrConf,Status);
+ run_test_case_msgloop(St0);
_Other when element(1, _Other) /= 'EXIT',
element(1, _Other) /= started,
element(1, _Other) /= finished,
element(1, _Other) /= print ->
%% ignore anything not generated by test server
- run_test_case_msgloop(Ref,Pid,CaptureStdout,RejectIoReqs,
- Terminate,Comment,CurrConf,Status)
- after Timeout ->
- ReturnValue
+ run_test_case_msgloop(St0)
+ after St0#st.timeout ->
+ #st{ret_val=RetVal,comment=Comment} = St0,
+ erlang:append_element(RetVal, Comment)
end.
-run_test_case_msgloop_io(From,ReplyAs,CaptureStdout,RejectIoReqs,
- Msg,From,Func) ->
- case Msg of
- {'EXIT',_} ->
- From ! {io_reply,ReplyAs,{error,Func}};
- _ ->
- From ! {io_reply,ReplyAs,ok}
- end,
- Proceed = if RejectIoReqs -> get({permit_io,From});
- true -> true
- end,
- if Proceed ->
- if CaptureStdout /= false ->
- CaptureStdout ! {captured,Msg};
- true ->
- ok
- end,
- output({minor,Msg},From);
- true ->
- ok
- end.
+setup_termination(RetVal, #st{pid=Pid}=St) ->
+ timetrap_cancel_all(Pid, false),
+ St#st{ret_val=RetVal,timeout=20}.
+
+set_tc_state(State) ->
+ set_tc_state(State,unknown).
+set_tc_state(State, Config) ->
+ tc_supervisor_req(set_tc_state, {State,Config}).
+
+handle_tc_exit(killed, St) ->
+ %% probably the result of an exit(TestCase,kill) call, which is the
+ %% only way to abort a testcase process that traps exits
+ %% (see abort_current_testcase).
+ #st{config=Config,mf={Mod,Func},pid=Pid} = St,
+ Msg = testcase_aborted_or_killed,
+ spawn_fw_call(Mod, Func, Config, Pid, Msg, unknown, self()),
+ St;
+handle_tc_exit({testcase_aborted,{user_timetrap_error,_}=Msg,_}, St) ->
+ #st{config=Config,mf={Mod,Func},pid=Pid} = St,
+ spawn_fw_call(Mod, Func, Config, Pid, Msg, unknown, self()),
+ St;
+handle_tc_exit(Reason, #st{status={framework,FwMod,FwFunc},
+ config=Config,pid=Pid}=St) ->
+ R = case Reason of
+ {timetrap_timeout,TVal,_} ->
+ {timetrap,TVal};
+ {testcase_aborted=E,AbortReason,_} ->
+ {E,AbortReason};
+ {fw_error,{FwMod,FwFunc,FwError}} ->
+ FwError;
+ Other ->
+ Other
+ end,
+ Error = {framework_error,R},
+ spawn_fw_call(FwMod, FwFunc, Config, Pid, Error, unknown, self()),
+ St;
+handle_tc_exit(Reason, #st{status=tc,config=Config0,mf={Mod,Func},pid=Pid}=St)
+ when is_list(Config0) ->
+ {R,Loc1,F} = case Reason of
+ {timetrap_timeout=E,TVal,Loc0} ->
+ {{E,TVal},Loc0,E};
+ {testcase_aborted=E,AbortReason,Loc0} ->
+ Msg = {E,AbortReason},
+ {Msg,Loc0,Msg};
+ Other ->
+ {Other,unknown,Other}
+ end,
+ Timeout = end_conf_timeout(Reason, St),
+ Config = [{tc_status,{failed,F}}|Config0],
+ EndConfPid = call_end_conf(Mod, Func, Pid, R, Loc1, Config, Timeout),
+ St#st{end_conf_pid=EndConfPid};
+handle_tc_exit(Reason, #st{config=Config,mf={Mod,Func0},pid=Pid,
+ status=Status}=St) ->
+ {R,Loc1} = case Reason of
+ {timetrap_timeout=E,TVal,Loc0} ->
+ {{E,TVal},Loc0};
+ {testcase_aborted=E,AbortReason,Loc0} ->
+ {{E,AbortReason},Loc0};
+ Other ->
+ {Other,unknown}
+ end,
+ Func = case Status of
+ init_per_testcase=F -> {F,Func0};
+ end_per_testcase=F -> {F,Func0};
+ _ -> Func0
+ end,
+ spawn_fw_call(Mod, Func, Config, Pid, R, Loc1, self()),
+ St.
-output(Msg,Sender) ->
- local_or_remote_apply({test_server_ctrl,output,[Msg,Sender]}).
+end_conf_timeout({timetrap_timeout,Timeout,_}, _) ->
+ Timeout;
+end_conf_timeout(_, #st{config=Config}) when is_list(Config) ->
+ proplists:get_value(default_timeout, Config, ?DEFAULT_TIMETRAP_SECS*1000);
+end_conf_timeout(_, _) ->
+ ?DEFAULT_TIMETRAP_SECS*1000.
call_end_conf(Mod,Func,TCPid,TCExitReason,Loc,Conf,TVal) ->
- %% Starter is also the group leader process
Starter = self(),
Data = {Mod,Func,TCPid,TCExitReason,Loc},
EndConfProc =
fun() ->
- group_leader(Starter, self()),
+ process_flag(trap_exit,true), % to catch timetraps
Supervisor = self(),
EndConfApply =
fun() ->
+ timetrap(TVal),
case catch apply(Mod,end_per_testcase,[Func,Conf]) of
{'EXIT',Why} ->
timer:sleep(1),
@@ -1145,29 +747,26 @@ call_end_conf(Mod,Func,TCPid,TCExitReason,Loc,Conf,TVal) ->
{Pid,end_conf} ->
Starter ! {self(),{call_end_conf,Data,ok}};
{'EXIT',Pid,Reason} ->
- Starter ! {self(),{call_end_conf,Data,{error,Reason}}}
- after TVal ->
- exit(Pid, kill),
group_leader() ! {printout,12,
"WARNING! ~p:end_per_testcase(~p, ~p)"
- " failed!\n\tReason: timetrap timeout"
- " after ~w ms!\n", [Mod,Func,Conf,TVal]},
- Starter ! {self(),{call_end_conf,Data,{error,timeout}}}
+ " failed!\n\tReason: ~p\n",
+ [Mod,Func,Conf,Reason]},
+ Starter ! {self(),{call_end_conf,Data,{error,Reason}}};
+ {'EXIT',_OtherPid,Reason} ->
+ %% Probably the parent - not much to do about that
+ exit(Reason)
end
end,
spawn_link(EndConfProc).
-spawn_fw_call(Mod,{init_per_testcase,Func},_,Pid,{timetrap_timeout,TVal}=Why,
+spawn_fw_call(Mod,{init_per_testcase,Func},CurrConf,Pid,{timetrap_timeout,TVal}=Why,
Loc,SendTo) ->
FwCall =
fun() ->
- %% set group leader so that printouts/comments
- %% from the framework get printed in the logs
- group_leader(SendTo, self()),
Skip = {skip,{failed,{Mod,init_per_testcase,Why}}},
%% if init_per_testcase fails, the test case
%% should be skipped
- case catch do_end_tc_call(Mod,Func, Loc, {Pid,Skip,[[]]}, Why) of
+ case catch do_end_tc_call(Mod,Func, {Pid,Skip,[CurrConf]}, Why) of
{'EXIT',FwEndTCErr} ->
exit({fw_notify_done,end_tc,FwEndTCErr});
_ ->
@@ -1181,22 +780,10 @@ spawn_fw_call(Mod,{init_per_testcase,Func},_,Pid,{timetrap_timeout,TVal}=Why,
spawn_fw_call(Mod,{end_per_testcase,Func},EndConf,Pid,
{timetrap_timeout,TVal}=Why,_Loc,SendTo) ->
- %%! This is a temporary fix that keeps Test Server alive during
- %%! execution of a parallel test case group, when sometimes
- %%! this clause gets called with EndConf == undefined. See OTP-9594
- %%! for more info.
- EndConf1 = if EndConf == undefined ->
- [{tc_status,{failed,{Mod,end_per_testcase,Why}}}];
- true ->
- EndConf
- end,
FwCall =
fun() ->
- %% set group leader so that printouts/comments
- %% from the framework get printed in the logs
- group_leader(SendTo, self()),
{RetVal,Report} =
- case proplists:get_value(tc_status, EndConf1) of
+ case proplists:get_value(tc_status, EndConf) of
undefined ->
E = {failed,{Mod,end_per_testcase,Why}},
{E,E};
@@ -1210,9 +797,9 @@ spawn_fw_call(Mod,{end_per_testcase,Func},EndConf,Pid,
"WARNING! ~p:end_per_testcase(~p, ~p)"
" failed!\n\tReason: timetrap timeout"
" after ~w ms!\n", [Mod,Func,EndConf,TVal]},
- FailLoc = proplists:get_value(tc_fail_loc, EndConf1),
- case catch do_end_tc_call(Mod,Func, FailLoc,
- {Pid,Report,[EndConf1]}, Why) of
+ FailLoc = proplists:get_value(tc_fail_loc, EndConf),
+ case catch do_end_tc_call(Mod,Func,
+ {Pid,Report,[EndConf]}, Why) of
{'EXIT',FwEndTCErr} ->
exit({fw_notify_done,end_tc,FwEndTCErr});
_ ->
@@ -1230,9 +817,6 @@ spawn_fw_call(Mod,{end_per_testcase,Func},EndConf,Pid,
spawn_fw_call(FwMod,FwFunc,_,_Pid,{framework_error,FwError},_,SendTo) ->
FwCall =
fun() ->
- %% set group leader so that printouts/comments
- %% from the framework get printed in the logs
- group_leader(SendTo, self()),
test_server_sup:framework_call(report, [framework_error,
{{FwMod,FwFunc},
FwError}]),
@@ -1249,17 +833,9 @@ spawn_fw_call(FwMod,FwFunc,_,_Pid,{framework_error,FwError},_,SendTo) ->
spawn_link(FwCall);
spawn_fw_call(Mod,Func,CurrConf,Pid,Error,Loc,SendTo) ->
- {Mod1,Func1} =
- case {Mod,Func,CurrConf} of
- {undefined,undefined,{{M,F},_}} -> {M,F};
- _ -> {Mod,Func}
- end,
FwCall =
fun() ->
- %% set group leader so that printouts/comments
- %% from the framework get printed in the logs
- group_leader(SendTo, self()),
- case catch fw_error_notify(Mod1,Func1,[],
+ case catch fw_error_notify(Mod,Func,[],
Error,Loc) of
{'EXIT',FwErrorNotifyErr} ->
exit({fw_notify_done,error_notification,
@@ -1267,8 +843,8 @@ spawn_fw_call(Mod,Func,CurrConf,Pid,Error,Loc,SendTo) ->
_ ->
ok
end,
- Conf = [{tc_status,{failed,timetrap_timeout}}],
- case catch do_end_tc_call(Mod1,Func1, Loc,
+ Conf = [{tc_status,{failed,timetrap_timeout}}|CurrConf],
+ case catch do_end_tc_call(Mod,Func,
{Pid,Error,[Conf]},Error) of
{'EXIT',FwEndTCErr} ->
exit({fw_notify_done,end_tc,FwEndTCErr});
@@ -1333,83 +909,73 @@ run_test_case_eval(Mod, Func, Args0, Name, Ref, RunInit,
TimetrapData, LogOpts, TCCallback) ->
put(test_server_multiply_timetraps, TimetrapData),
put(test_server_logopts, LogOpts),
+ Where = [{Mod,Func}],
+ put(test_server_loc, Where),
FWInitResult = test_server_sup:framework_call(init_tc,[?pl2a(Mod),Func,Args0],
{ok,Args0}),
- group_leader() ! {test_case_initialized,self()},
+ set_tc_state(running),
{{Time,Value},Loc,Opts} =
case FWInitResult of
{ok,Args} ->
run_test_case_eval1(Mod, Func, Args, Name, RunInit, TCCallback);
Error = {error,_Reason} ->
- Where = {Mod,Func},
- NewResult = do_end_tc_call(Mod,Func, Where, {Error,Args0},
+ NewResult = do_end_tc_call(Mod,Func, {Error,Args0},
{skip,{failed,Error}}),
{{0,NewResult},Where,[]};
{fail,Reason} ->
Conf = [{tc_status,{failed,Reason}} | hd(Args0)],
- Where = {Mod,Func},
fw_error_notify(Mod, Func, Conf, Reason),
- NewResult = do_end_tc_call(Mod,Func, Where, {{error,Reason},[Conf]},
+ NewResult = do_end_tc_call(Mod,Func, {{error,Reason},[Conf]},
{fail,Reason}),
{{0,NewResult},Where,[]};
Skip = {skip,_Reason} ->
- Where = {Mod,Func},
- NewResult = do_end_tc_call(Mod,Func, Where, {Skip,Args0}, Skip),
+ NewResult = do_end_tc_call(Mod,Func, {Skip,Args0}, Skip),
{{0,NewResult},Where,[]};
{auto_skip,Reason} ->
- Where = {Mod,Func},
- NewResult = do_end_tc_call(Mod,Func, Where, {{skip,Reason},Args0},
+ NewResult = do_end_tc_call(Mod,Func, {{skip,Reason},Args0},
{skip,Reason}),
{{0,NewResult},Where,[]}
end,
exit({Ref,Time,Value,Loc,Opts}).
run_test_case_eval1(Mod, Func, Args, Name, RunInit, TCCallback) ->
- %% save current state in controller loop
- sync_send(group_leader(),set_curr_conf,{{Mod,Func},hd(Args)},
- 5000, fun() -> exit(no_answer_from_group_leader) end),
case RunInit of
run_init ->
- put(test_server_init_or_end_conf,{init_per_testcase,Func}),
- put(test_server_loc, {Mod,{init_per_testcase,Func}}),
+ set_tc_state(init_per_testcase, hd(Args)),
ensure_timetrap(Args),
case init_per_testcase(Mod, Func, Args) of
Skip = {skip,Reason} ->
Line = get_loc(),
- Conf = [{tc_status,{skipped,Reason}}],
- NewRes = do_end_tc_call(Mod,Func, Line, {Skip,[Conf]}, Skip),
+ Conf = [{tc_status,{skipped,Reason}}|hd(Args)],
+ NewRes = do_end_tc_call(Mod,Func, {Skip,[Conf]}, Skip),
{{0,NewRes},Line,[]};
{skip_and_save,Reason,SaveCfg} ->
Line = get_loc(),
- Conf = [{tc_status,{skipped,Reason}},{save_config,SaveCfg}],
- NewRes = do_end_tc_call(Mod,Func, Line, {{skip,Reason},[Conf]},
+ Conf = [{tc_status,{skipped,Reason}},{save_config,SaveCfg}|hd(Args)],
+ NewRes = do_end_tc_call(Mod,Func, {{skip,Reason},[Conf]},
{skip,Reason}),
{{0,NewRes},Line,[]};
FailTC = {fail,Reason} -> % user fails the testcase
EndConf = [{tc_status,{failed,Reason}} | hd(Args)],
fw_error_notify(Mod, Func, EndConf, Reason),
- NewRes = do_end_tc_call(Mod,Func, {Mod,Func},
+ NewRes = do_end_tc_call(Mod,Func,
{{error,Reason},[EndConf]},
FailTC),
- {{0,NewRes},{Mod,Func},[]};
+ {{0,NewRes},[{Mod,Func}],[]};
{ok,NewConf} ->
- put(test_server_init_or_end_conf,undefined),
%% call user callback function if defined
NewConf1 = user_callback(TCCallback, Mod, Func, init, NewConf),
%% save current state in controller loop
- sync_send(group_leader(),set_curr_conf,{{Mod,Func},NewConf1},
- 5000, fun() -> exit(no_answer_from_group_leader) end),
- put(test_server_loc, {Mod,Func}),
+ set_tc_state(tc, NewConf1),
%% execute the test case
{{T,Return},Loc} = {ts_tc(Mod, Func, [NewConf1]),get_loc()},
{EndConf,TSReturn,FWReturn} =
case Return of
{E,TCError} when E=='EXIT' ; E==failed ->
- ModLoc = mod_loc(Loc),
fw_error_notify(Mod, Func, NewConf1,
- TCError, ModLoc),
+ TCError, Loc),
{[{tc_status,{failed,TCError}},
- {tc_fail_loc,ModLoc}|NewConf1],
+ {tc_fail_loc,Loc}|NewConf1],
Return,{error,TCError}};
SaveCfg={save_config,_} ->
{[{tc_status,ok},SaveCfg|NewConf1],Return,ok};
@@ -1426,8 +992,6 @@ run_test_case_eval1(Mod, Func, Args, Name, RunInit, TCCallback) ->
%% call user callback function if defined
EndConf1 = user_callback(TCCallback, Mod, Func, 'end', EndConf),
%% update current state in controller loop
- sync_send(group_leader(),set_curr_conf,EndConf1, 5000,
- fun() -> exit(no_answer_from_group_leader) end),
{FWReturn1,TSReturn1,EndConf2} =
case end_per_testcase(Mod, Func, EndConf1) of
SaveCfg1={save_config,_} ->
@@ -1447,24 +1011,21 @@ run_test_case_eval1(Mod, Func, Args, Name, RunInit, TCCallback) ->
{FWReturn,TSReturn,EndConf1}
end,
%% clear current state in controller loop
- sync_send(group_leader(),set_curr_conf,undefined,
- 5000, fun() -> exit(no_answer_from_group_leader) end),
- put(test_server_init_or_end_conf,undefined),
- case do_end_tc_call(Mod,Func, Loc,
+ case do_end_tc_call(Mod,Func,
{FWReturn1,[EndConf2]}, TSReturn1) of
{failed,Reason} = NewReturn ->
fw_error_notify(Mod,Func,EndConf2, Reason),
- {{T,NewReturn},{Mod,Func},[]};
+ {{T,NewReturn},[{Mod,Func}],[]};
NewReturn ->
{{T,NewReturn},Loc,[]}
end
end;
skip_init ->
+ set_tc_state(running, hd(Args)),
%% call user callback function if defined
Args1 = user_callback(TCCallback, Mod, Func, init, Args),
ensure_timetrap(Args1),
%% ts_tc does a catch
- put(test_server_loc, {Mod,Func}),
%% if this is a named conf group, the test case (init or end conf)
%% should be called with the name as the first argument
Args2 = if Name == undefined -> Args1;
@@ -1475,43 +1036,12 @@ run_test_case_eval1(Mod, Func, Args, Name, RunInit, TCCallback) ->
%% call user callback function if defined
Return1 = user_callback(TCCallback, Mod, Func, 'end', Return),
{Return2,Opts} = process_return_val([Return1], Mod, Func,
- Args1, {Mod,Func}, Return1),
+ Args1, [{Mod,Func}], Return1),
{{T,Return2},Loc,Opts}
end.
-do_end_tc_call(M,F, Loc, Res, Return) ->
- IsSuite = case lists:reverse(atom_to_list(M)) of
- [$E,$T,$I,$U,$S,$_|_] -> true;
- _ -> false
- end,
+do_end_tc_call(Mod, Func, Res, Return) ->
FwMod = os:getenv("TEST_SERVER_FRAMEWORK"),
- {Mod,Func} =
- if FwMod == M ; FwMod == "undefined"; FwMod == false ->
- {M,F};
- (not IsSuite) and is_list(Loc) and (length(Loc)>1) ->
- %% If failure in other module (M) than suite, try locate
- %% suite name in Loc list and call end_tc with Suite:TestCase
- %% instead of M:F.
- GetSuite = fun(S,TC) ->
- case lists:reverse(atom_to_list(S)) of
- [$E,$T,$I,$U,$S,$_|_] -> [{S,TC}];
- _ -> []
- end
- end,
- case lists:flatmap(fun({S,TC,_}) -> GetSuite(S,TC);
- ({{S,TC},_}) -> GetSuite(S,TC);
- ({S,TC}) -> GetSuite(S,TC);
- (_) -> []
- end, Loc) of
- [] ->
- {M,F};
- [FoundSuite|_] ->
- FoundSuite
- end;
- true ->
- {M,F}
- end,
-
Ref = make_ref(),
if FwMod == "ct_framework" ; FwMod == "undefined"; FwMod == false ->
case test_server_sup:framework_call(
@@ -1553,7 +1083,7 @@ process_return_val([Return], M,F,A, Loc, Final) when is_list(Return) ->
true -> % must be return value from end conf case
process_return_val1(Return, M,F,A, Loc, Final, []);
false -> % must be Config value from init conf case
- case do_end_tc_call(M, F, Loc, {ok,A}, Return) of
+ case do_end_tc_call(M, F, {ok,A}, Return) of
{failed, FWReason} = Failed ->
fw_error_notify(M,F,A, FWReason),
{Failed, []};
@@ -1569,9 +1099,9 @@ process_return_val(Return, M,F,A, Loc, Final) ->
process_return_val1([Failed={E,TCError}|_], M,F,A=[Args], Loc, _, SaveOpts)
when E=='EXIT';
E==failed ->
- fw_error_notify(M,F,A, TCError, mod_loc(Loc)),
- case do_end_tc_call(M,F, Loc, {{error,TCError},
- [[{tc_status,{failed,TCError}}|Args]]},
+ fw_error_notify(M,F,A, TCError, Loc),
+ case do_end_tc_call(M,F, {{error,TCError},
+ [[{tc_status,{failed,TCError}}|Args]]},
Failed) of
{failed,FWReason} ->
{{failed,FWReason},SaveOpts};
@@ -1589,8 +1119,8 @@ process_return_val1([RetVal={Tag,_}|Opts], M,F,A, Loc, _, SaveOpts) when Tag==sk
process_return_val1(Opts, M,F,A, Loc, RetVal, SaveOpts);
process_return_val1([_|Opts], M,F,A, Loc, Final, SaveOpts) ->
process_return_val1(Opts, M,F,A, Loc, Final, SaveOpts);
-process_return_val1([], M,F,A, Loc, Final, SaveOpts) ->
- case do_end_tc_call(M,F, Loc, {Final,A}, Final) of
+process_return_val1([], M,F,A, _Loc, Final, SaveOpts) ->
+ case do_end_tc_call(M,F, {Final,A}, Final) of
{failed,FWReason} ->
{{failed,FWReason},SaveOpts};
NewReturn ->
@@ -1656,7 +1186,7 @@ do_init_per_testcase(Mod, Args) ->
throw:Other ->
set_loc(erlang:get_stacktrace()),
Line = get_loc(),
- FormattedLoc = test_server_sup:format_loc(mod_loc(Line)),
+ FormattedLoc = test_server_sup:format_loc(Line),
group_leader() ! {printout,12,
"ERROR! init_per_testcase thrown!\n"
"\tLocation: ~s\n\tReason: ~p\n",
@@ -1667,7 +1197,7 @@ do_init_per_testcase(Mod, Args) ->
Reason = {Reason0,Stk},
set_loc(Stk),
Line = get_loc(),
- FormattedLoc = test_server_sup:format_loc(mod_loc(Line)),
+ FormattedLoc = test_server_sup:format_loc(Line),
group_leader() ! {printout,12,
"ERROR! init_per_testcase crashed!\n"
"\tLocation: ~s\n\tReason: ~p\n",
@@ -1690,8 +1220,7 @@ end_per_testcase(Mod, Func, Conf) ->
end.
do_end_per_testcase(Mod,EndFunc,Func,Conf) ->
- put(test_server_init_or_end_conf,{EndFunc,Func}),
- put(test_server_loc, {Mod,{EndFunc,Func}}),
+ set_tc_state(end_per_testcase, Conf),
try Mod:EndFunc(Func, Conf) of
{save_config,_}=SaveCfg ->
SaveCfg;
@@ -1715,8 +1244,7 @@ do_end_per_testcase(Mod,EndFunc,Func,Conf) ->
"Reason: ~p\n"
"Line: ~s\n",
[EndFunc, Other,
- test_server_sup:format_loc(
- mod_loc(get_loc()))]},
+ test_server_sup:format_loc(get_loc())]},
{failed,{Mod,end_per_testcase,Other}};
Class:Reason ->
Stk = erlang:get_stacktrace(),
@@ -1738,8 +1266,7 @@ do_end_per_testcase(Mod,EndFunc,Func,Conf) ->
"Reason: ~p\n"
"Line: ~s\n",
[EndFunc, Reason,
- test_server_sup:format_loc(
- mod_loc(get_loc()))]},
+ test_server_sup:format_loc(get_loc())]},
{failed,{Mod,end_per_testcase,Why}}
end.
@@ -1752,66 +1279,19 @@ get_loc(Pid) ->
lists:foreach(fun({Key,Val}) -> put(Key, Val) end, Dict),
Stk = [rewrite_loc_item(Loc) || Loc <- Stk0],
case get(test_server_loc) of
- undefined ->
- put(test_server_loc, Stk);
- {Suite,Case} ->
+ [{Suite,Case}] ->
%% location info unknown, check if {Suite,Case,Line}
%% is available in stacktrace. and if so, use stacktrace
- %% instead of currect test_server_loc
+ %% instead of current test_server_loc
case [match || {S,C,_L} <- Stk, S == Suite, C == Case] of
[match|_] -> put(test_server_loc, Stk);
_ -> ok
end;
_ ->
- ok
+ put(test_server_loc, Stk)
end,
get_loc().
-%% find the latest known Suite:Testcase
-get_mf(MFs) ->
- get_mf(MFs, {undefined,undefined}).
-
-get_mf([MF|MFs], _Found) when is_tuple(MF) ->
- ModFunc = {Mod,_} = case MF of
- {M,F,_} -> {M,F};
- MF -> MF
- end,
- case is_suite(Mod) of
- true -> ModFunc;
- false -> get_mf(MFs, ModFunc)
- end;
-get_mf(_, Found) ->
- Found.
-
-is_suite(Mod) ->
- case lists:reverse(atom_to_list(Mod)) of
- "ETIUS" ++ _ -> true;
- _ -> false
- end.
-
-mod_loc(Loc) ->
- %% handle diff line num versions
- case Loc of
- [{{_M,_F},_L}|_] ->
- [begin if L /= 0 -> {?pl2a(M),F,L};
- true -> {?pl2a(M),F} end end || {{M,F},L} <- Loc];
- [{_M,_F}|_] ->
- [{?pl2a(M),F} || {M,F} <- Loc];
- {{M,F},0} ->
- [{?pl2a(M),F}];
- {{M,F},L} ->
- [{?pl2a(M),F,L}];
- {M,ForL} ->
- [{?pl2a(M),ForL}];
- {M,F,0} ->
- [{M,F}];
- [{M,F,0}|Stack] ->
- [{M,F}|Stack];
- _ ->
- Loc
- end.
-
-
fw_error_notify(Mod, Func, Args, Error) ->
test_server_sup:framework_call(error_notification,
[?pl2a(Mod),Func,[Args],
@@ -1833,10 +1313,10 @@ fw_error_notify(Mod, Func, Args, Error, Loc) ->
%% is directed to console, major and/or minor log files.
print(Detail,Format,Args) ->
- local_or_remote_apply({test_server_ctrl,print,[Detail,Format,Args]}).
+ test_server_ctrl:print(Detail, Format, Args).
print(Detail,Format,Args,Printer) ->
- local_or_remote_apply({test_server_ctrl,print,[Detail,Format,Args,Printer]}).
+ test_server_ctrl:print(Detail, Format, Args, Printer).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% print_timsteamp(Detail,Leader) -> ok
@@ -1846,7 +1326,7 @@ print(Detail,Format,Args,Printer) ->
%% log files.
print_timestamp(Detail,Leader) ->
- local_or_remote_apply({test_server_ctrl,print_timestamp,[Detail,Leader]}).
+ test_server_ctrl:print_timestamp(Detail, Leader).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -1894,7 +1374,12 @@ ts_tc(M, F, A) ->
{Elapsed, Result}.
set_loc(Stk) ->
- Loc = [rewrite_loc_item(I) || {_,_,_,_}=I <- Stk],
+ Loc = case [rewrite_loc_item(I) || {_,_,_,_}=I <- Stk] of
+ [{M,F,0}|Stack] ->
+ [{M,F}|Stack];
+ Other ->
+ Other
+ end,
put(test_server_loc, Loc).
rewrite_loc_item({M,F,_,Loc}) ->
@@ -1908,16 +1393,6 @@ rewrite_loc_item({M,F,_,Loc}) ->
%% Note: Some of these functions have been moved to test_server_sup %%
%% in an attempt to keep this modules small (yeah, right!) %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-unicode_to_latin1(Chars) when is_list(Chars); is_binary(Chars) ->
- lists:flatten(
- [ case X of
- High when High > 255 ->
- io_lib:format("\\{~.8B}",[X]);
- Low ->
- Low
- end || X <- unicode:characters_to_list(Chars,unicode) ]);
-unicode_to_latin1(Garbage) ->
- Garbage.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% format(Format) -> IoLibReturn
@@ -2510,11 +1985,7 @@ get_timetrap_info(TCPid, SendToServer) ->
[I|_] ->
I;
[] when SendToServer == true ->
- MsgLooper = group_leader(),
- MsgLooper ! {get_timetrap_info,TCPid,self()},
- receive
- {MsgLooper,get_timetrap_info,I} -> I
- end;
+ tc_supervisor_req({get_timetrap_info,TCPid});
[] ->
undefined
end
@@ -2533,17 +2004,29 @@ hours(N) -> trunc(N * 1000 * 60 * 60).
minutes(N) -> trunc(N * 1000 * 60).
seconds(N) -> trunc(N * 1000).
-
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% sync_send(Pid,Tag,Msg,Timeout,DoAfter) -> Result
+%% tc_supervisor_req(Tag) -> Result
+%% tc_supervisor_req(Tag, Msg) -> Result
%%
-sync_send(Pid,Tag,Msg,Timeout,DoAfter) ->
+
+tc_supervisor_req(Tag) ->
+ Pid = test_server_gl:get_tc_supervisor(group_leader()),
+ Pid ! {Tag,self()},
+ receive
+ {Pid,Tag,Result} ->
+ Result
+ after 5000 ->
+ error(no_answer_from_tc_supervisor)
+ end.
+
+tc_supervisor_req(Tag, Msg) ->
+ Pid = test_server_gl:get_tc_supervisor(group_leader()),
Pid ! {Tag,self(),Msg},
receive
{Pid,Tag,Result} ->
Result
- after Timeout ->
- DoAfter()
+ after 5000 ->
+ error(no_answer_from_tc_supervisor)
end.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -2705,7 +2188,10 @@ start_node(Name, Type, Options) ->
%% by a shielded node.
Cover = case is_cover() of
true ->
- not is_shielded(Name) andalso same_version(Node);
+ not is_shielded(Name)
+ andalso same_version(Node)
+ andalso proplists:get_value(start_cover,Options,
+ true);
false ->
false
end,
@@ -2713,9 +2199,7 @@ start_node(Name, Type, Options) ->
net_adm:ping(Node),
case Cover of
true ->
- Sticky = unstick_all_sticky(Node),
- cover:start(Node),
- stick_all_sticky(Node,Sticky);
+ do_cover_for_node(Node,start);
_ ->
ok
end,
@@ -2743,7 +2227,27 @@ wait_for_node(Slave) ->
group_leader() ! {sync_apply,
self(),
{test_server_ctrl,wait_for_node,[Slave]}},
- receive {sync_result,R} -> R end.
+ Result = receive {sync_result,R} -> R end,
+ case Result of
+ ok ->
+ Cover = case is_cover() of
+ true ->
+ not is_shielded(Slave) andalso same_version(Slave);
+ false ->
+ false
+ end,
+
+ net_adm:ping(Slave),
+ case Cover of
+ true ->
+ do_cover_for_node(Slave,start);
+ _ ->
+ ok
+ end;
+ _ ->
+ ok
+ end,
+ Result.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -2755,9 +2259,7 @@ stop_node(Slave) ->
Nocover = is_shielded(Slave) orelse not same_version(Slave),
case is_cover() of
true when not Nocover ->
- Sticky = unstick_all_sticky(Slave),
- cover:stop(Slave),
- stick_all_sticky(Slave,Sticky);
+ do_cover_for_node(Slave,flush);
_ ->
ok
end,
@@ -2938,13 +2440,7 @@ comment(String) ->
%% Read the current comment string stored in
%% state during test case execution.
read_comment() ->
- MsgLooper = group_leader(),
- MsgLooper ! {read_comment,self()},
- receive
- {MsgLooper,read_comment,Comment} -> Comment
- after
- 5000 -> ""
- end.
+ tc_supervisor_req(read_comment).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% make_priv_dir() -> ok
@@ -2952,13 +2448,7 @@ read_comment() ->
%% Order test server to create the private directory
%% for the current test case.
make_priv_dir() ->
- MsgLooper = group_leader(),
- group_leader() ! {make_priv_dir,self()},
- receive
- {MsgLooper,make_priv_dir,Result} -> Result
- after
- 5000 -> error
- end.
+ tc_supervisor_req(make_priv_dir).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% os_type() -> OsType
@@ -2966,7 +2456,7 @@ make_priv_dir() ->
%% Returns the OsType of the target node. OsType is
%% the same as returned from os:type()
os_type() ->
- test_server_ctrl:get_target_os_type().
+ os:type().
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -3085,47 +2575,9 @@ purify_format(Format, Args) ->
%%
%% Generic send functions for communication with host
%%
-sync_local_or_remote_apply(Proxy,From,{M,F,A} = MFA) ->
- case get(test_server_job_sock) of
- undefined ->
- %% i'm a local target
- Result = apply(M,F,A),
- if is_pid(Proxy) -> Proxy ! {sync_result_proxy,From,Result};
- true -> From ! {sync_result,Result}
- end;
- JobSock ->
- %% i'm a remote target
- request(JobSock,{sync_apply,MFA}),
- {sync_result,Result} = recv(JobSock),
- if is_pid(Proxy) -> Proxy ! {sync_result_proxy,From,Result};
- true -> From ! {sync_result,Result}
- end
- end.
-local_or_remote_apply({M,F,A} = MFA) ->
- case get(test_server_job_sock) of
- undefined ->
- %% i'm a local target
- apply(M,F,A),
- ok;
- JobSock ->
- %% i'm a remote target
- request(JobSock,{apply,MFA}),
- ok
- end.
-
-request(Sock,Request) ->
- gen_tcp:send(Sock,<<1,(term_to_binary(Request))/binary>>).
-
-%%
-%% Generic receive function for communication with host
-%%
-recv(Sock) ->
- case gen_tcp:recv(Sock,0) of
- {error,closed} ->
- gen_tcp:close(Sock),
- exit(connection_lost);
- {ok,<<1,Request/binary>>} ->
- binary_to_term(Request);
- {ok,<<0,B/binary>>} ->
- B
+sync_local_or_remote_apply(Proxy, From, {M,F,A}) ->
+ %% i'm a local target
+ Result = apply(M, F, A),
+ if is_pid(Proxy) -> Proxy ! {sync_result_proxy,From,Result};
+ true -> From ! {sync_result,Result}
end.
diff --git a/lib/test_server/src/test_server_ctrl.erl b/lib/test_server/src/test_server_ctrl.erl
index 88d86285d5..bc08c12089 100644
--- a/lib/test_server/src/test_server_ctrl.erl
+++ b/lib/test_server/src/test_server_ctrl.erl
@@ -34,118 +34,6 @@
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% ARCHITECTURE
-%%
-%% The Erlang Test Server can be run on the target machine (local target)
-%% or towards a remote target. The execution flow is mainly the same in
-%% both cases, but with a remote target the test cases are (obviously)
-%% executed on the target machine. Host and target communicates over
-%% socket connections because the host should not be introduced as an
-%% additional node in the distributed erlang system in which the test
-%% cases are run.
-%%
-%%
-%% Local Target:
-%% =============
-%%
-%% -----
-%% | | test_server_ctrl ({global,test_server})
-%% ----- (test_server_ctrl.erl)
-%% |
-%% |
-%% -----
-%% | | JobProc
-%% ----- (test_server_ctrl.erl and test_server.erl)
-%% |
-%% |
-%% -----
-%% | | CaseProc
-%% ----- (test_server.erl)
-%%
-%%
-%%
-%% test_server_ctrl is the main process in the system. It is a registered
-%% process, and it will always be alive when testing is ongoing.
-%% test_server_ctrl initiates testing and monitors JobProc(s).
-%%
-%% When target is local, and Test Server is *not* being used by a framework
-%% application (where it might cause duplicate name problems in a distributed
-%% test environment), the process is globally registered as 'test_server'
-%% to be able to simulate the {global,test_server} process on a remote target.
-%%
-%% JobProc is spawned for each 'job' added to the test_server_ctrl.
-%% A job can mean one test case, one test suite or one spec.
-%% JobProc creates and writes logs and presents results from testing.
-%% JobProc is the group leader for CaseProc.
-%%
-%% CaseProc is spawned for each test case. It runs the test case and
-%% sends results and any other information to its group leader - JobProc.
-%%
-%%
-%%
-%% Remote Target:
-%% ==============
-%%
-%% HOST TARGET
-%%
-%% ----- MainSock -----
-%% test_server_ctrl | |- - - - - - -| | {global,test_server}
-%% (test_server_ctrl.erl) ----- ----- (test_server.erl)
-%% | |
-%% | |
-%% ----- JobSock -----
-%% JobProcH | |- - - - - - -| | JobProcT
-%% (test_server_ctrl.erl) ----- ----- (test_server.erl)
-%% |
-%% |
-%% -----
-%% | | CaseProc
-%% ----- (test_server.erl)
-%%
-%%
-%%
-%%
-%% A separate test_server process only exists when target is remote. It
-%% is then the main process on target. It is started when test_server_ctrl
-%% is started, and a socket connection is established between
-%% test_server_ctrl and test_server. The following information can be sent
-%% over MainSock:
-%%
-%% HOST TARGET
-%% -> {target_info, TargetInfo} (during initiation)
-%% <- {job_proc_killed,Name,Reason} (if a JobProcT dies unexpectedly)
-%% -> {job,Port,Name} (to start a new JobProcT)
-%%
-%%
-%% When target is remote, JobProc is split into to processes: JobProcH
-%% executing on Host and JobProcT executing on Target. (The two processes
-%% execute the same code as JobProc does when target is local.) JobProcH
-%% and JobProcT communicates over a socket connection. The following
-%% information can be sent over JobSock:
-%%
-%% HOST TARGET
-%% -> {test_case, Case} To start a new test case
-%% -> {beam,Mod} .beam file as binary to be loaded
-%% on target, e.g. a test suite
-%% -> {datadir,Tarfile} Content of the datadir for a test suite
-%% <- {apply,MFA} MFA to be applied on host, ignore return;
-%% (apply is used for printing information in
-%% log or console)
-%% <- {sync_apply,MFA} MFA to be applied on host, wait for return
-%% (used for starting and stopping slave nodes)
-%% -> {sync_apply,MFA} MFA to be applied on target, wait for return
-%% (used for cover compiling and analysing)
-%% <-> {sync_result,Result} Return value from sync_apply
-%% <- {test_case_result,Result} When a test case is finished
-%% <- {crash_dumps,Tarfile} When a test case is finished
-%% -> job_done When a job is finished
-%% <- {privdir,Privdir} When a job is finished
-%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-
-
%%% SUPERVISOR INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-export([start/0, start/1, start_link/1, stop/0]).
@@ -165,19 +53,18 @@
-export([reject_io_reqs/1, get_levels/0, set_levels/3]).
-export([multiply_timetraps/1, scale_timetraps/1, get_timetrap_parameters/0]).
-export([create_priv_dir/1]).
--export([cover/2, cover/3, cover/7,
- cross_cover_analyse/1, cross_cover_analyse/2, trc/1, stop_trace/0]).
+-export([cover/2, cover/3, cover/8,
+ cross_cover_analyse/2, cross_cover_analyse/3, trc/1, stop_trace/0]).
-export([testcase_callback/1]).
-export([set_random_seed/1]).
-export([kill_slavenodes/0]).
%%% TEST_SERVER INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
--export([output/2, print/2, print/3, print/4, print_timestamp/2]).
+-export([print/2, print/3, print/4, print_timestamp/2]).
-export([start_node/3, stop_node/1, wait_for_node/1, is_release_available/1]).
-export([format/1, format/2, format/3, to_string/1]).
-export([get_target_info/0]).
-export([get_hosts/0]).
--export([get_target_os_type/0]).
-export([node_started/1]).
%%% DEBUGGER INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -203,6 +90,7 @@
-define(coverlog_name, "cover.html").
-define(cross_coverlog_name, "cross_cover.html").
-define(cover_total, "total_cover.log").
+-define(unexpected_io_log, "unexpected_io.log").
-define(last_file, "last_name").
-define(last_link, "last_link").
-define(last_test, "last_test").
@@ -462,8 +350,7 @@ wait_finish() ->
ok.
abort_current_testcase(Reason) ->
- controller_call({abort_current_testcase,Reason}),
- ok.
+ controller_call({abort_current_testcase,Reason}).
abort() ->
OldTrap = process_flag(trap_exit, true),
@@ -520,9 +407,9 @@ cover(App, Analyse) when is_atom(App) ->
cover(CoverFile, Analyse) ->
cover(none, CoverFile, Analyse).
cover(App, CoverFile, Analyse) ->
- controller_call({cover,{App,CoverFile},Analyse}).
-cover(App, CoverFile, Exclude, Include, Cross, Export, Analyse) ->
- controller_call({cover,{App,{CoverFile,Exclude,Include,Cross,Export}},Analyse}).
+ controller_call({cover,{App,CoverFile},Analyse,true}).
+cover(App, CoverFile, Exclude, Include, Cross, Export, Analyse, Stop) ->
+ controller_call({cover,{App,{CoverFile,Exclude,Include,Cross,Export}},Analyse,Stop}).
testcase_callback(ModFunc) ->
controller_call({testcase_callback,ModFunc}).
@@ -536,20 +423,6 @@ kill_slavenodes() ->
get_hosts() ->
get(test_server_hosts).
-get_target_os_type() ->
- case whereis(?MODULE) of
- undefined ->
- %% This is probably called on the target node
- os:type();
- Pid when Pid =:= self() ->
- os:type();
- _pid ->
- %% This is called on the controller, e.g. from a
- %% specification clause of a test case
- #target_info{os_type=OsType} = controller_call(get_target_info),
- OsType
- end.
-
%%--------------------------------------------------------------------
add_job(Name, TopCase) ->
@@ -605,7 +478,7 @@ controller_call(Arg, Timeout) ->
%% Mode 'lazy' ignores (and resets to []) any jobs in the state file
%%
-init([Param]) ->
+init([_]) ->
case os:getenv("TEST_SERVER_CALL_TRACE") of
false ->
ok;
@@ -631,104 +504,14 @@ init([Param]) ->
test_server_sup:cleanup_crash_dumps(),
State = #state{jobs=[],finish=false},
put(test_server_free_targets,[]),
- case contact_main_target(Param) of
- {ok,TI} ->
- ets:new(slave_tab, [named_table,set,public,{keypos,2}]),
- set_hosts([TI#target_info.host]),
- {ok,State#state{target_info=TI}};
- {error,Reason} ->
- {stop,Reason}
- end.
-
-
-%% If the test is to be run at a remote target, this function sets up
-%% a socket communication with the target.
-contact_main_target(local) ->
- %% When used by a general framework, global registration of
- %% test_server should not be required.
- case get_fw_mod(undefined) of
- undefined ->
- %% Local target! The global test_server process implemented by
- %% test_server.erl will not be started, so we simulate it by
- %% globally registering this process instead.
- global:sync(),
- case global:whereis_name(test_server) of
- undefined ->
- global:register_name(test_server, self());
- Pid ->
- case node() of
- N when N == node(Pid) ->
- io:format(user, "Warning: test_server already running!\n", []),
- global:re_register_name(test_server,self());
- _ ->
- ok
- end
- end;
- _ ->
- ok
- end,
- TI = test_server:init_target_info(),
+ TI0 = test_server:init_target_info(),
TargetHost = test_server_sup:hoststr(),
- {ok,TI#target_info{where=local,
- host=TargetHost,
- naming=naming(),
- master=TargetHost}};
-
-contact_main_target(ParameterFile) ->
- case read_parameters(ParameterFile) of
- {ok,Par} ->
- case test_server_node:start_remote_main_target(Par) of
- {ok,TI} ->
- {ok,TI};
- {error,Error} ->
- {error,{could_not_start_main_target,Error}}
- end;
- {error,Error} ->
- {error,{could_not_read_parameterfile,Error}}
- end.
-
-read_parameters(File) ->
- case file:consult(File) of
- {ok,Data} ->
- read_parameters(lists:flatten(Data), #par{naming=naming()});
- Error ->
- Error
- end.
-read_parameters([{type,Type}|Data], Par) -> % mandatory
- read_parameters(Data, Par#par{type=Type});
-read_parameters([{target,Target}|Data], Par) -> % mandatory
- read_parameters(Data, Par#par{target=cast_to_list(Target)});
-read_parameters([{slavetargets,SlaveTargets}|Data], Par) ->
- read_parameters(Data, Par#par{slave_targets=SlaveTargets});
-read_parameters([{longnames,Bool}|Data], Par) ->
- Naming = if Bool->"-name"; true->"-sname" end,
- read_parameters(Data, Par#par{naming=Naming});
-read_parameters([{master,{Node,Cookie}}|Data], Par) ->
- read_parameters(Data, Par#par{master=cast_to_list(Node),
- cookie=cast_to_list(Cookie)});
-read_parameters([Other|_Data], _Par) ->
- {error,{illegal_parameter,Other}};
-read_parameters([], Par) when Par#par.type==undefined ->
- {error, {missing_mandatory_parameter,type}};
-read_parameters([], Par) when Par#par.target==undefined ->
- {error, {missing_mandatory_parameter,target}};
-read_parameters([], Par0) ->
- Par =
- case {Par0#par.type, Par0#par.master} of
- {ose, undefined} ->
- %% Use this node as master and bootserver for target
- %% and slave nodes
- Par0#par{master = atom_to_list(node()),
- cookie = atom_to_list(erlang:get_cookie())};
- {ose, _Master} ->
- %% Master for target and slave nodes was defined in parameterfile
- Par0;
- _ ->
- %% Use target as master for slave nodes,
- %% (No master is used for target)
- Par0#par{master="test_server@" ++ Par0#par.target}
- end,
- {ok,Par}.
+ TI = TI0#target_info{host=TargetHost,
+ naming=naming(),
+ master=TargetHost},
+ ets:new(slave_tab, [named_table,set,public,{keypos,2}]),
+ set_hosts([TI#target_info.host]),
+ {ok,State#state{target_info=TI}}.
naming() ->
case lists:member($., test_server_sup:hoststr()) of
@@ -795,7 +578,7 @@ handle_call({add_job,Dir,Name,TopCase,Skip}, _From, State) ->
ExtraTools =
case State#state.cover of
false -> [];
- {App,Analyse} -> [{cover,App,Analyse}]
+ {App,Analyse,Stop} -> [{cover,App,Analyse,Stop}]
end,
ExtraTools1 =
case State#state.random_seed of
@@ -1051,13 +834,13 @@ handle_call(stop_trace, _From, State) ->
{reply,R,State#state{trc=false}};
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% handle_call({cover,App,Analyse}, _, State) -> ok | {error,Reason}
+%% handle_call({cover,App,Analyse,Stop}, _, State) -> ok | {error,Reason}
%%
%% All modules inn application App are cover compiled
%% Analyse indicates on which level the coverage should be analysed
-handle_call({cover,App,Analyse}, _From, State) ->
- {reply,ok,State#state{cover={App,Analyse}}};
+handle_call({cover,App,Analyse,Stop}, _From, State) ->
+ {reply,ok,State#state{cover={App,Analyse,Stop}}};
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% handle_call({create_priv_dir,Value}, _, State) -> ok | {error,Reason}
@@ -1209,25 +992,17 @@ handle_cast({node_started,Node}, State) ->
%% Pid = pid()
%% Reason = term()
%%
-%% Handles exit messages from linked processes. Only test suites and
-%% possibly a target client are expected to be linked.
-%% When a test suite terminates, it is removed from the job queue.
-%% If a target client terminates it means that we lost contact with
-%% target. The test_server_ctrl process is terminated, and teminate/2
-%% will do the cleanup
+%% Handles exit messages from linked processes. Only test suites are
+%% expected to be linked. When a test suite terminates, it is removed
+%% from the job queue. If a target client terminates it means that we
+%% lost contact with target. The test_server_ctrl process is
+%% terminated, and teminate/2 will do the cleanup
handle_info({'EXIT',Pid,Reason}, State) ->
case lists:keysearch(Pid,2,State#state.jobs) of
false ->
- TI = State#state.target_info,
- case TI#target_info.target_client of
- Pid ->
- %% The target client died - lost contact with target
- {stop,{lost_contact_with_target,Reason},State};
- _other ->
- %% not our problem
- {noreply,State}
- end;
+ %% not our problem
+ {noreply,State};
{value,{Name,_}} ->
NewJobs = lists:keydelete(Pid, 2, State#state.jobs),
case Reason of
@@ -1302,14 +1077,8 @@ handle_info({tcp_closed,Sock}, State=#state{trc=Sock}) ->
%%! Maybe print something???
{noreply,State#state{trc=false}};
handle_info({tcp_closed,Sock}, State) ->
- case test_server_node:nodedown(Sock,State#state.target_info) of
- target_died ->
- %% terminate/2 will do the cleanup
- {stop,target_died,State};
- _ ->
- {noreply,State}
- end;
-
+ test_server_node:nodedown(Sock, State#state.target_info),
+ {noreply,State};
handle_info(_, State) ->
%% dummy; accept all, do nothing.
{noreply, State}.
@@ -1370,24 +1139,22 @@ kill_all_jobs([]) ->
spawn_tester(Mod, Func, Args, Dir, Name, Levels, RejectIoReqs,
CreatePrivDir, TCCallback, ExtraTools) ->
- spawn_link(
- fun() -> init_tester(Mod, Func, Args, Dir, Name, Levels, RejectIoReqs,
+ spawn_link(fun() ->
+ init_tester(Mod, Func, Args, Dir, Name, Levels, RejectIoReqs,
CreatePrivDir, TCCallback, ExtraTools)
end).
-init_tester(Mod, Func, Args, Dir, Name, {SumLev,MajLev,MinLev}, RejectIoReqs,
- CreatePrivDir, TCCallback, ExtraTools) ->
+init_tester(Mod, Func, Args, Dir, Name, {_,_,MinLev}=Levels,
+ RejectIoReqs, CreatePrivDir, TCCallback, ExtraTools) ->
process_flag(trap_exit, true),
+ test_server_io:start_link(),
put(test_server_name, Name),
put(test_server_dir, Dir),
put(test_server_total_time, 0),
put(test_server_ok, 0),
put(test_server_failed, 0),
put(test_server_skipped, {0,0}),
- put(test_server_summary_level, SumLev),
- put(test_server_major_level, MajLev),
put(test_server_minor_level, MinLev),
- put(test_server_reject_io_reqs, RejectIoReqs),
put(test_server_create_priv_dir, CreatePrivDir),
put(test_server_random_seed, proplists:get_value(random_seed, ExtraTools)),
put(test_server_testcase_callback, TCCallback),
@@ -1403,24 +1170,30 @@ init_tester(Mod, Func, Args, Dir, Name, {SumLev,MajLev,MinLev}, RejectIoReqs,
put(test_server_framework_name, list_to_atom(FWName))
end
end,
+
%% before first print, read and set logging options
LogOpts = test_server_sup:framework_call(get_logopts, [], []),
put(test_server_logopts, LogOpts),
- put(test_server_log_nl, not lists:member(no_nl, LogOpts)),
+
StartedExtraTools = start_extra_tools(ExtraTools),
+
+ test_server_io:set_job_name(Name),
+ test_server_io:set_gl_props([{levels,Levels},
+ {auto_nl,not lists:member(no_nl, LogOpts)},
+ {reject_io_reqs,RejectIoReqs}]),
+ group_leader(test_server_io:get_gl(true), self()),
{TimeMy,Result} = ts_tc(Mod, Func, Args),
- put(test_server_common_io_handler, undefined),
+ set_io_buffering(undefined),
+ test_server_io:set_job_name(undefined),
catch stop_extra_tools(StartedExtraTools),
case Result of
{'EXIT',test_suites_done} ->
- print(25, "DONE, normal exit", []);
+ ok;
{'EXIT',_Pid,Reason} ->
print(1, "EXIT, reason ~p", [Reason]);
{'EXIT',Reason} ->
report_severe_error(Reason),
- print(1, "EXIT, reason ~p", [Reason]);
- _Other ->
- print(25, "DONE", [])
+ print(1, "EXIT, reason ~p", [Reason])
end,
Time = TimeMy/1000000,
SuccessStr =
@@ -1439,7 +1212,8 @@ init_tester(Mod, Func, Args, Dir, Name, {SumLev,MajLev,MinLev}, RejectIoReqs,
"<tr><td></td><td><b>TOTAL</b></td><td></td><td></td><td></td>"
"<td>~.3fs</td><td><b>~s</b></td><td>~p Ok, ~p Failed~s of ~p</td></tr>\n"
"</tfoot>\n",
- [Time,SuccessStr,OkN,FailedN,SkipStr,OkN+FailedN+SkippedN]).
+ [Time,SuccessStr,OkN,FailedN,SkipStr,OkN+FailedN+SkippedN]),
+ test_server_io:stop().
report_severe_error(Reason) ->
test_server_sup:framework_call(report, [severe_error,Reason]).
@@ -1460,11 +1234,11 @@ elapsed_time(Before, After) ->
start_extra_tools(ExtraTools) ->
start_extra_tools(ExtraTools, []).
-start_extra_tools([{cover,App,Analyse} | ExtraTools], Started) ->
+start_extra_tools([{cover,App,Analyse,Stop} | ExtraTools], Started) ->
case cover_compile(App) of
{ok,AnalyseMods} ->
start_extra_tools(ExtraTools,
- [{cover,App,Analyse,AnalyseMods}|Started]);
+ [{cover,App,Analyse,AnalyseMods,Stop}|Started]);
{error,_} ->
start_extra_tools(ExtraTools, Started)
end;
@@ -1483,8 +1257,8 @@ stop_extra_tools(ExtraTools) ->
end,
stop_extra_tools(ExtraTools, TestDir).
-stop_extra_tools([{cover,App,Analyse,AnalyseMods}|ExtraTools], TestDir) ->
- cover_analyse(App, Analyse, AnalyseMods, TestDir),
+stop_extra_tools([{cover,App,Analyse,AnalyseMods,Stop}|ExtraTools], TestDir) ->
+ cover_analyse(App, Analyse, AnalyseMods, Stop, TestDir),
stop_extra_tools(ExtraTools, TestDir);
%%stop_extra_tools([_ | ExtraTools], TestDir) ->
%% stop_extra_tools(ExtraTools, TestDir);
@@ -1816,8 +1590,9 @@ do_test_cases(TopCases, SkipCases,
print(html,
"<p><ul>\n"
"<li><a href=\"~s\">Full textual log</a></li>\n"
- "<li><a href=\"~s\">Coverage log</a></li>\n</ul></p>\n",
- [?suitelog_name,?coverlog_name]),
+ "<li><a href=\"~s\">Coverage log</a></li>\n"
+ "<li><a href=\"~s\">Unexpected I/O log</a></li>\n</ul></p>\n",
+ [?suitelog_name,?coverlog_name,?unexpected_io_log]),
print(html,
"<p>~s</p>\n" ++
xhtml("<table bgcolor=\"white\" border=\"3\" cellpadding=\"5\">",
@@ -1902,10 +1677,16 @@ start_log_file() ->
put(test_server_log_dir_base,TestDir1),
MajorName = filename:join(TestDir1, ?suitelog_name),
HtmlName = MajorName ++ ?html_ext,
+ UnexpectedName = filename:join(TestDir1, ?unexpected_io_log),
{ok,Major} = file:open(MajorName, [write]),
{ok,Html} = file:open(HtmlName, [write]),
+ {ok,Unexpected} = file:open(UnexpectedName, [write]),
+ test_server_io:set_fd(major, Major),
+ test_server_io:set_fd(html, Html),
+ test_server_io:set_fd(unexpected_io, Unexpected),
put(test_server_major_fd,Major),
put(test_server_html_fd,Html),
+ put(test_server_unexpected_io, Unexpected),
make_html_link(filename:absname(?last_test ++ ?html_ext),
HtmlName, filename:basename(Dir)),
@@ -1916,7 +1697,7 @@ start_log_file() ->
PrivDir = filename:join(TestDir1, ?priv_dir),
ok = file:make_dir(PrivDir),
put(test_server_priv_dir,PrivDir++"/"),
- print_timestamp(13,"Suite started at "),
+ print_timestamp(major, "Suite started at "),
LogInfo = [{topdir,Dir},{rundir,lists:flatten(TestDir1)}],
test_server_sup:framework_call(report, [loginfo,LogInfo]),
@@ -1958,13 +1739,14 @@ make_html_link(LinkName, Target, Explanation) ->
%% Some header info will also be inserted into the log file.
start_minor_log_file(Mod, Func) ->
+ MFA = {Mod,Func,1},
LogDir = get(test_server_log_dir_base),
Name0 = lists:flatten(io_lib:format("~s.~s~s", [Mod,Func,?html_ext])),
Name = downcase(Name0),
AbsName = filename:join(LogDir, Name),
case file:read_file_info(AbsName) of
{error,_} -> %% normal case, unique name
- start_minor_log_file1(Mod, Func, LogDir, AbsName);
+ start_minor_log_file1(Mod, Func, LogDir, AbsName, MFA);
{ok,_} -> %% special case, duplicate names
{_,S,Us} = now(),
Name1_0 =
@@ -1973,14 +1755,15 @@ start_minor_log_file(Mod, Func) ->
?html_ext])),
Name1 = downcase(Name1_0),
AbsName1 = filename:join(LogDir, Name1),
- start_minor_log_file1(Mod, Func, LogDir, AbsName1)
+ start_minor_log_file1(Mod, Func, LogDir, AbsName1, MFA)
end.
-start_minor_log_file1(Mod, Func, LogDir, AbsName) ->
+start_minor_log_file1(Mod, Func, LogDir, AbsName, MFA) ->
{ok,Fd} = file:open(AbsName, [write]),
Lev = get(test_server_minor_level)+1000, %% far down in the minor levels
put(test_server_minor_fd, Fd),
-
+ test_server_gl:set_minor_fd(group_leader(), Fd, MFA),
+
TestDescr = io_lib:format("Test ~p:~p result", [Mod,Func]),
{Header,Footer} =
case test_server_sup:framework_call(get_html_wrapper,
@@ -2013,7 +1796,7 @@ start_minor_log_file1(Mod, Func, LogDir, AbsName) ->
lists:member(no_src, get(test_server_logopts))} of
{true,false} ->
print(Lev, "<a href=\"~s#~s\">source code for ~p:~p/1</a>\n",
- [SrcListing,Func,Mod,Func]);
+ [SrcListing,atom_to_list(Func)++"-1",Mod,Func]);
_ -> ok
end,
@@ -2028,6 +1811,7 @@ start_minor_log_file1(Mod, Func, LogDir, AbsName) ->
AbsName.
stop_minor_log_file() ->
+ test_server_gl:unset_minor_fd(group_leader()),
Fd = get(test_server_minor_fd),
Footer = get(test_server_minor_footer),
io:fwrite(Fd, "</pre>\n" ++ Footer, []),
@@ -2303,9 +2087,7 @@ do_add_end_per_suite_and_skip(LastMod, LastRef, Mod, FwMod) ->
%% Runs the specified tests, then displays/logs the summary.
run_test_cases(TestSpec, Config, TimetrapData) ->
-
- maybe_open_job_sock(),
-
+ test_server:init_purify(),
case lists:member(no_src, get(test_server_logopts)) of
true ->
ok;
@@ -2315,8 +2097,6 @@ run_test_cases(TestSpec, Config, TimetrapData) ->
run_test_cases_loop(TestSpec, [Config], TimetrapData, [], []),
- maybe_get_privdir(),
-
{AllSkippedN,UserSkipN,AutoSkipN,SkipStr} =
case get(test_server_skipped) of
{0,0} -> {0,0,0,""};
@@ -2335,41 +2115,6 @@ run_test_cases(TestSpec, Config, TimetrapData) ->
print(major, "=auto_skipped ~p", [AutoSkipN]),
exit(test_suites_done).
-%% If the test is run at a remote target, this function sets up a socket
-%% communication with the target for handling this particular job.
-maybe_open_job_sock() ->
- TI = get_target_info(),
- case TI#target_info.where of
- local ->
- %% local target
- test_server:init_purify();
- MainSock ->
- %% remote target
- {ok,LSock} = gen_tcp:listen(0, [binary,
- {reuseaddr,true},
- {packet,4},
- {active,false}]),
- {ok,Port} = inet:port(LSock),
- request(MainSock, {job,Port,get(test_server_name)}),
- case gen_tcp:accept(LSock, ?ACCEPT_TIMEOUT) of
- {ok,Sock} -> put(test_server_ctrl_job_sock, Sock);
- {error,Reason} -> exit({no_contact,Reason})
- end
- end.
-
-%% If the test is run at a remote target, this function waits for a
-%% tar packet containing the privdir created by the test case.
-maybe_get_privdir() ->
- case get(test_server_ctrl_job_sock) of
- undefined ->
- %% local target
- ok;
- Sock ->
- %% remote target
- request(Sock, job_done),
- gen_tcp:close(Sock)
- end.
-
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% run_test_cases_loop(TestCases, Config, TimetrapData, Mode, Status) -> ok
@@ -2448,27 +2193,38 @@ maybe_get_privdir() ->
%% reason, the Mode argument specifies if a parallel group is currently
%% being executed.
%%
-%% A parallel test case process will always set the dictionary value
-%% 'test_server_common_io_handler' to the pid of the main (starting)
-%% process. With this value set, the print/3 function will send print
-%% messages to the main process instead of writing the data to file
-%% (only true for printouts to common log files).
+%% The low-level mechanism for buffering IO for the common log files
+%% is handled by the test_server_io module. Buffering is turned on by
+%% test_server_io:start_transaction/0 and off by calling
+%% test_server_io:end_transaction/0. The buffered data for the transaction
+%% can printed by calling test_server_io:print_buffered/1.
+%%
+%% This module is responsible for turning on IO buffering and to later
+%% test_server_io:print_buffered/1 to print the data. To help with this,
+%% two variables in the process dictionary are used:
+%% 'test_server_common_io_handler' and 'test_server_queued_io'. The values
+%% are set to as follwing:
+%%
+%% Value Meaning
+%% ----- -------
+%% undefined No parallel test cases running
+%% {tc,Pid} Running test cases in a top-level parallel group
+%% {Ref,Pid} Running sequential test case inside a parallel group
+%%
+%% FIXME: The Pid is no longer used.
%%
%% If a conf group nested under a parallel group in the test
%% specification should be started, the 'test_server_common_io_handler'
-%% value gets set also on the main process. This causes all printouts
-%% to common files - both from parallel test cases and from cases
-%% executed by the main process - to all end up as messages in the
-%% inbox of the main process.
+%% value gets set also on the main process.
%%
%% During execution of a parallel group (or of a group nested under a
%% parallel group), *any* new test case being started gets registered
%% in a list saved in the dictionary with 'test_server_queued_io' as key.
%% When the top level parallel group is finished (only then can we be
%% sure all parallel test cases have finished and "reported in"), the
-%% list of test cases is traversed in order and printout messages from
-%% each process - including the main process - are handled in turn. See
-%% handle_test_case_io_and_status/0 for details.
+%% list of test cases is traversed in order and test_server_io:print_buffered/1
+%% can be called for each test case. See handle_test_case_io_and_status/0
+%% for details.
%%
%% To be able to handle nested conf groups with different properties,
%% the Mode argument specifies a list of {Ref,Properties} tuples.
@@ -2611,16 +2367,15 @@ run_test_cases_loop([{auto_skip_case,{Type,Ref,Case,Comment},SkipMode}|Cases],
run_test_cases_loop([{auto_skip_case,{Case,Comment},SkipMode}|Cases],
Config, TimetrapData, Mode, Status) ->
- {Mod,Func} = skip_case(auto, undefined, get(test_server_case_num)+1, Case, Comment,
- (undefined /= get(test_server_common_io_handler)), SkipMode),
+ {Mod,Func} = skip_case(auto, undefined, get(test_server_case_num)+1,
+ Case, Comment, is_io_buffered(), SkipMode),
test_server_sup:framework_call(report, [tc_auto_skip,{?pl2a(Mod),Func,Comment}]),
run_test_cases_loop(Cases, Config, TimetrapData, Mode,
update_status(skipped, Mod, Func, Status));
run_test_cases_loop([{skip_case,{conf,Ref,Case,Comment}}|Cases0],
Config, TimetrapData, Mode, Status) ->
- {Mod,Func} = skip_case(user, Ref, 0, Case, Comment,
- (undefined /= get(test_server_common_io_handler))),
+ {Mod,Func} = skip_case(user, Ref, 0, Case, Comment, is_io_buffered()),
{Cases,Config1} =
case curr_ref(Mode) of
Ref ->
@@ -2636,8 +2391,8 @@ run_test_cases_loop([{skip_case,{conf,Ref,Case,Comment}}|Cases0],
run_test_cases_loop([{skip_case,{Case,Comment}}|Cases],
Config, TimetrapData, Mode, Status) ->
- {Mod,Func} = skip_case(user, undefined, get(test_server_case_num)+1, Case, Comment,
- (undefined /= get(test_server_common_io_handler))),
+ {Mod,Func} = skip_case(user, undefined, get(test_server_case_num)+1,
+ Case, Comment, is_io_buffered()),
test_server_sup:framework_call(report, [tc_user_skip,{?pl2a(Mod),Func,Comment}]),
run_test_cases_loop(Cases, Config, TimetrapData, Mode,
update_status(skipped, Mod, Func, Status));
@@ -2874,7 +2629,7 @@ run_test_cases_loop([{conf,Ref,Props,{Mod,Func}}|_Cases]=Cs0,
end,
CurrMode = curr_mode(Ref, Mode0, Mode),
- ConfCaseResult = run_test_case(Ref, 0, Mod, Func, [ActualCfg], skip_init, target,
+ ConfCaseResult = run_test_case(Ref, 0, Mod, Func, [ActualCfg], skip_init,
TimetrapData, CurrMode),
case ConfCaseResult of
@@ -2908,6 +2663,7 @@ run_test_cases_loop([{conf,Ref,Props,{Mod,Func}}|_Cases]=Cs0,
exit(framework_error);
{_,Fail,_} when element(1,Fail) == 'EXIT';
element(1,Fail) == timetrap_timeout;
+ element(1,Fail) == user_timetrap_error;
element(1,Fail) == failed ->
{Cases2,Config1,Status3} =
if StartConf ->
@@ -2927,14 +2683,6 @@ run_test_cases_loop([{conf,Ref,Props,{Mod,Func}}|_Cases]=Cs0,
set_io_buffering(IOHandler),
stop_minor_log_file(),
run_test_cases_loop(Cases2, Config1, TimetrapData, Mode, Status3);
- {died,Why,_} when Func == init_per_suite ->
- print(minor, "~n*** Unexpected exit during init_per_suite.~n", []),
- Reason = {failed,{Mod,init_per_suite,Why}},
- Cases2 = skip_cases_upto(Ref, Cases, Reason, conf, CurrMode),
- set_io_buffering(IOHandler),
- stop_minor_log_file(),
- run_test_cases_loop(Cases2, Config, TimetrapData, Mode,
- delete_status(Ref, Status2));
{_,{Skip,Reason},_} when StartConf and ((Skip==skip) or (Skip==skipped)) ->
ReportAbortRepeat(skipped),
print(minor, "~n*** ~p skipped.~n"
@@ -3005,7 +2753,7 @@ run_test_cases_loop([{conf,Ref,Props,{Mod,Func}}|_Cases]=Cs0,
end;
run_test_cases_loop([{make,Ref,{Mod,Func,Args}}|Cases0], Config, TimetrapData, Mode, Status) ->
- case run_test_case(Ref, 0, Mod, Func, Args, skip_init, host, TimetrapData) of
+ case run_test_case(Ref, 0, Mod, Func, Args, skip_init, TimetrapData) of
{_,Why={'EXIT',_},_} ->
print(minor, "~n*** ~p failed.~n"
" Skipping all cases.", [Func]),
@@ -3036,23 +2784,21 @@ run_test_cases_loop([{Mod,Case}|Cases], Config, TimetrapData, Mode, Status) ->
run_test_cases_loop([{Mod,Func,Args}|Cases], Config, TimetrapData, Mode, Status) ->
Num = put(test_server_case_num, get(test_server_case_num)+1),
+
%% check the current execution mode and save info about the case if
%% detected that printouts to common log files is handled later
- case check_prop(parallel, Mode) of
+
+ case check_prop(parallel, Mode) =:= false andalso is_io_buffered() of
+ true ->
+ %% sequential test case nested in a parallel group;
+ %% io is buffered, so we must queue this test case
+ queue_test_case_io(undefined, self(), Num+1, Mod, Func);
false ->
- case get(test_server_common_io_handler) of
- undefined ->
- %% io printouts are written to straight to file
- ok;
- _ ->
- %% io messages are buffered, put test case in queue
- queue_test_case_io(undefined, self(), Num+1, Mod, Func)
- end;
- _ ->
ok
end,
+
case run_test_case(undefined, Num+1, Mod, Func, Args,
- run_init, target, TimetrapData, Mode) of
+ run_init, TimetrapData, Mode) of
%% callback to framework module failed, exit immediately
{_,{framework_error,{FwMod,FwFunc},Reason},_} ->
print(minor, "~n*** ~p failed in ~p. Reason: ~p~n", [FwMod,FwFunc,Reason]),
@@ -3099,8 +2845,8 @@ run_test_cases_loop([{Mod,Func,Args}|Cases], Config, TimetrapData, Mode, Status)
%% the test case is being executed in parallel with the main process (and
%% other test cases) and Pid is the dedicated process executing the case
Pid ->
- %% io from Pid will be buffered in the main process inbox and handled
- %% later, so we have to save info about the case
+ %% io from Pid will be buffered by the test_server_io process and
+ %% handled later, so we have to save info about the case
queue_test_case_io(undefined, Pid, Num+1, Mod, Func),
run_test_cases_loop(Cases, Config, TimetrapData, Mode, Status)
end;
@@ -3207,11 +2953,17 @@ get_data_dir(Mod, Suite) ->
non_existing ->
print(12, "The module ~p is not loaded", [Mod]),
[];
+ cover_compiled ->
+ MainCoverNode = cover:get_main_node(),
+ {file,File} = rpc:call(MainCoverNode,cover,is_compiled,[UseMod]),
+ do_get_data_dir(UseMod,File);
FullPath ->
- filename:dirname(FullPath) ++ "/" ++ cast_to_list(UseMod) ++
- ?data_dir_suffix
+ do_get_data_dir(UseMod,FullPath)
end.
+do_get_data_dir(Mod,File) ->
+ filename:dirname(File) ++ "/" ++ cast_to_list(Mod) ++ ?data_dir_suffix.
+
print_conf_time(0) ->
ok;
print_conf_time(ConfTime) ->
@@ -3355,7 +3107,9 @@ skip_case(Type, Ref, CaseNum, Case, Comment, SendSync, Mode) ->
if SendSync ->
queue_test_case_io(Ref, self(), CaseNum, Mod, Func),
self() ! {started,Ref,self(),CaseNum,Mod,Func},
+ test_server_io:start_transaction(),
skip_case1(Type, CaseNum, Mod, Func, Comment, Mode),
+ test_server_io:end_transaction(),
self() ! {finished,Ref,self(),CaseNum,Mod,Func,skipped,{0,skipped,[]}};
not SendSync ->
skip_case1(Type, CaseNum, Mod, Func, Comment, Mode)
@@ -3496,13 +3250,20 @@ modify_cases_upto1(Ref, CopyOp, [C|T], Orig, Alt) ->
%%
%% Save info about current process (always the main process) buffering
%% io printout messages from parallel test case processes (*and* possibly
-%% also the main process). If the value is the default 'undefined',
-%% io is not buffered but printed directly to file (see print/3).
+%% also the main process).
set_io_buffering(IOHandler) ->
put(test_server_common_io_handler, IOHandler).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% is_io_buffered() -> true|false
+%%
+%% Test whether is being buffered.
+
+is_io_buffered() ->
+ get(test_server_common_io_handler) =/= undefined.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% queue_test_case_io(Pid, Num, Mod, Func) -> ok
%%
%% Save info about test case that gets its io buffered. This can
@@ -3549,7 +3310,7 @@ wait_and_resend(Ref, [{_,CurrPid,CaseNum,Mod,Func}|Ps] = Cases, Ok,Skip,Fail) ->
receive
{finished,_Ref,CurrPid,CaseNum,Mod,Func,Result,_RetVal} = Msg ->
%% resend message to main process so that it can be used
- %% to handle buffered io messages later
+ %% to test_server_io:print_buffered/1 later
self() ! Msg,
MF = {Mod,Func},
{Ok1,Skip1,Fail1} =
@@ -3580,16 +3341,18 @@ rm_cases_upto(Ref, [_|Ps]) ->
%%
%% Each parallel test case process prints to its own minor log file during
%% execution. The common log files (major, html etc) must however be
-%% written to sequentially. The test case processes send print requests
-%% to the main (starting) process (the same process executing
-%% run_test_cases_loop/4), which handles these requests in the same
-%% order that the test case processes were started.
-%%
-%% An io session is always started with a {started,Ref,Pid,Num,Mod,Func}
-%% message and terminated with {finished,Ref,Pid,Num,Mod,Func,Result,RetVal}.
-%% The result shipped with the finished message from a parallel process
-%% is used to update status data of the current test run. An 'EXIT'
-%% message from each parallel test case process (after finishing and
+%% written to sequentially. This is handled by calling
+%% test_server_io:start_transaction/0 to tell the test_server_io process
+%% to buffer all print requests.
+%%
+%% An io session is always started with a
+%% {started,Ref,Pid,Num,Mod,Func} message (and
+%% test_server_io:start_transaction/0 will be called) and terminated
+%% with {finished,Ref,Pid,Num,Mod,Func,Result,RetVal} (and
+%% test_server_io:end_transaction/0 will be called). The result
+%% shipped with the finished message from a parallel process is used
+%% to update status data of the current test run. An 'EXIT' message
+%% from each parallel test case process (after finishing and
%% terminating) is also received and handled here.
%%
%% During execution of a parallel group, any cases (conf or normal)
@@ -3598,13 +3361,13 @@ rm_cases_upto(Ref, [_|Ps]) ->
%% correct sequence. This function handles also the print messages
%% generated by nested group cases that have been executed sequentially
%% by the main process (note that these cases do not generate 'EXIT'
-%% messages, only 'start', 'print' and 'finished' messages).
+%% messages, only 'start' and 'finished' messages).
%%
%% See the header comment for run_test_cases_loop/4 for more
%% info about IO handling.
%%
%% Note: It is important that the type of messages handled here
-%% do not get consumated by test_server:run_test_case_msgloop/5
+%% do not get consumed by test_server:run_test_case_msgloop/5
%% during the test case execution (e.g. in the catch clause of
%% the receive)!
@@ -3631,7 +3394,7 @@ handle_test_case_io_and_status() ->
%% Handle cases (without Ref) that belong to the top parallel group (i.e. when Refs = [])
handle_io_and_exit_loop([], [{undefined,CurrPid,CaseNum,Mod,Func}|Ps] = Cases, Ok,Skip,Fail) ->
- %% retreive the start message for the current io session (= testcase)
+ %% retrieve the start message for the current io session (= testcase)
receive
{started,_,CurrPid,CaseNum,Mod,Func} ->
{Ok1,Skip1,Fail1} =
@@ -3671,11 +3434,18 @@ handle_io_and_exit_loop(_, [], Ok,Skip,Fail) ->
handle_io_and_exits(Main, CurrPid, CaseNum, Mod, Func, Cases) ->
receive
+ {abort_current_testcase=Tag,_Reason,From} ->
+ %% If a parallel group is executing, there is no unique
+ %% current test case, so we must generate an error.
+ From ! {self(),Tag,{error,parallel_group}},
+ handle_io_and_exits(Main, CurrPid, CaseNum, Mod, Func, Cases);
%% end of io session from test case executed by main process
{finished,_,Main,CaseNum,Mod,Func,Result,_RetVal} ->
+ test_server_io:print_buffered(CurrPid),
{Result,{Mod,Func}};
%% end of io session from test case executed by parallel process
{finished,_,CurrPid,CaseNum,Mod,Func,Result,RetVal} ->
+ test_server_io:print_buffered(CurrPid),
case Result of
ok ->
put(test_server_ok, get(test_server_ok)+1);
@@ -3688,13 +3458,9 @@ handle_io_and_exits(Main, CurrPid, CaseNum, Mod, Func, Cases) ->
end,
{Result,{Mod,Func}};
- %% print to common log file
- {print,CurrPid,Detail,Msg} ->
- output({Detail,Msg}, internal),
- handle_io_and_exits(Main, CurrPid, CaseNum, Mod, Func, Cases);
-
%% unexpected termination of test case process
{'EXIT',TCPid,Reason} when Reason /= normal ->
+ test_server_io:print_buffered(CurrPid),
{value,{_,_,Num,M,F}} = lists:keysearch(TCPid, 2, Cases),
print(1, "Error! Process for test case #~p (~p:~p) died! Reason: ~p",
[Num, M, F, Reason]),
@@ -3726,59 +3492,52 @@ handle_io_and_exits(Main, CurrPid, CaseNum, Mod, Func, Cases) ->
%% RetVal is the result of executing the test case. It contains info
%% about the execution time and the return value of the test case function.
-run_test_case(Ref, Num, Mod, Func, Args, RunInit, Where, TimetrapData) ->
+run_test_case(Ref, Num, Mod, Func, Args, RunInit, TimetrapData) ->
file:set_cwd(filename:dirname(get(test_server_dir))),
- run_test_case1(Ref, Num, Mod, Func, Args, RunInit, Where,
- TimetrapData, [], [], self()).
+ run_test_case1(Ref, Num, Mod, Func, Args, RunInit,
+ TimetrapData, [], self()).
-run_test_case(Ref, Num, Mod, Func, Args, skip_init, Where, TimetrapData, Mode) ->
+run_test_case(Ref, Num, Mod, Func, Args, skip_init, TimetrapData, Mode) ->
%% a conf case is always executed by the main process
- run_test_case1(Ref, Num, Mod, Func, Args, skip_init, Where,
- TimetrapData, [], Mode, self());
+ run_test_case1(Ref, Num, Mod, Func, Args, skip_init,
+ TimetrapData, Mode, self());
-run_test_case(Ref, Num, Mod, Func, Args, RunInit, Where, TimetrapData, Mode) ->
+run_test_case(Ref, Num, Mod, Func, Args, RunInit, TimetrapData, Mode) ->
file:set_cwd(filename:dirname(get(test_server_dir))),
+ Main = self(),
case check_prop(parallel, Mode) of
false ->
%% this is a sequential test case
- run_test_case1(Ref, Num, Mod, Func, Args, RunInit, Where,
- TimetrapData, [], Mode, self());
+ run_test_case1(Ref, Num, Mod, Func, Args, RunInit,
+ TimetrapData, Mode, Main);
_Ref ->
%% this a parallel test case, spawn the new process
- Main = self(),
- {dictionary,State} = process_info(self(), dictionary),
- spawn_link(fun() ->
- run_test_case1(Ref, Num, Mod, Func, Args, RunInit, Where,
- TimetrapData, State, Mode, Main)
- end)
+ Dictionary = get(),
+ {dictionary,Dictionary} = process_info(self(), dictionary),
+ spawn_link(
+ fun() ->
+ process_flag(trap_exit, true),
+ [put(Key, Val) || {Key,Val} <- Dictionary],
+ set_io_buffering({tc,Main}),
+ run_test_case1(Ref, Num, Mod, Func, Args, RunInit,
+ TimetrapData, Mode, Main)
+ end)
end.
-run_test_case1(Ref, Num, Mod, Func, Args, RunInit, Where,
- TimetrapData, State, Mode, Main) ->
- %% if this runs on a parallel test case process,
- %% copy the dictionary from the main process
- do_if_parallel(Main, fun() -> process_flag(trap_exit, true) end, ok),
- CopyDict = fun() -> lists:foreach(fun({Key,Val}) ->
- put(Key, Val)
- end, State)
- end,
- do_if_parallel(Main, CopyDict, ok),
- do_if_parallel(Main, fun() ->
- put(test_server_common_io_handler, {tc,Main})
- end, ok),
+run_test_case1(Ref, Num, Mod, Func, Args, RunInit,
+ TimetrapData, Mode, Main) ->
+ group_leader(test_server_io:get_gl(Main == self()), self()),
+
%% if io is being buffered, send start io session message
%% (no matter if case runs on parallel or main process)
- case get(test_server_common_io_handler) of
- undefined -> ok;
- _ -> Main ! {started,Ref,self(),Num,Mod,Func}
+ case is_io_buffered() of
+ false -> ok;
+ true ->
+ test_server_io:start_transaction(),
+ Main ! {started,Ref,self(),Num,Mod,Func}
end,
TSDir = get(test_server_dir),
- case Where of
- target ->
- maybe_send_beam_and_datadir(Mod);
- host ->
- ok
- end,
+
print(major, "=case ~p:~p", [Mod, Func]),
MinorName = start_minor_log_file(Mod, Func),
print(minor, "<a name=\"top\"></a>", [], internal_raw),
@@ -3830,13 +3589,12 @@ run_test_case1(Ref, Num, Mod, Func, Args, RunInit, Where,
[num2str(Num),fw_name(Mod),GroupName,MinorBase,Func,
MinorBase,MinorBase]),
- do_if_parallel(Main, ok, fun erlang:yield/0),
+ do_unless_parallel(Main, fun erlang:yield/0),
- RejectIoReqs = get(test_server_reject_io_reqs),
%% run the test case
{Result,DetectedFail,ProcsBefore,ProcsAfter} =
run_test_case_apply(Num, Mod, Func, [UpdatedArgs], get_name(Mode),
- RunInit, Where, TimetrapData, RejectIoReqs),
+ RunInit, TimetrapData),
{Time,RetVal,Loc,Opts,Comment} =
case Result of
Normal={_Time,_RetVal,_Loc,_Opts,_Comment} -> Normal;
@@ -3848,7 +3606,7 @@ run_test_case1(Ref, Num, Mod, Func, Args, RunInit, Where,
print_timestamp(minor, "Ended at "),
print(major, "=ended ~s", [lists:flatten(timestamp_get(""))]),
- do_if_parallel(Main, ok, fun() -> file:set_cwd(filename:dirname(TSDir)) end),
+ do_unless_parallel(Main, fun() -> file:set_cwd(filename:dirname(TSDir)) end),
%% call the appropriate progress function clause to print the results to log
Status =
@@ -3953,14 +3711,17 @@ run_test_case1(Ref, Num, Mod, Func, Args, RunInit, Where,
true ->
ok
end,
- check_new_crash_dumps(Where),
+ test_server_sup:check_new_crash_dumps(),
%% if io is being buffered, send finished message
%% (no matter if case runs on parallel or main process)
- case get(test_server_common_io_handler) of
- undefined -> ok;
- _ -> Main ! {finished,Ref,self(),Num,Mod,Func,
- ?mod_result(Status),{Time,RetVal,Opts}}
+ case is_io_buffered() of
+ false ->
+ ok;
+ true ->
+ test_server_io:end_transaction(),
+ Main ! {finished,Ref,self(),Num,Mod,Func,
+ ?mod_result(Status),{Time,RetVal,Opts}}
end,
{Time,RetVal,Opts}.
@@ -3968,126 +3729,16 @@ run_test_case1(Ref, Num, Mod, Func, Args, RunInit, Where,
%%--------------------------------------------------------------------
%% various help functions
-%% Call If() if we're on parallel process, or
-%% call Else() if we're on main process
-do_if_parallel(Pid, If, Else) ->
+%% Call Action if we are running on the main process (not parallel).
+do_unless_parallel(Main, Action) when is_function(Action, 0) ->
case self() of
- Pid ->
- if is_function(Else) -> Else();
- true -> Else
- end;
- _ ->
- if is_function(If) -> If();
- true -> If
- end
+ Main -> Action();
+ _ -> ok
end.
num2str(0) -> "";
num2str(N) -> integer_to_list(N).
-%% If remote target, this function sends the test suite (if not already sent)
-%% and the content of datadir til target.
-maybe_send_beam_and_datadir(Mod) ->
- case get(test_server_ctrl_job_sock) of
- undefined ->
- %% local target
- ok;
- JobSock ->
- %% remote target
- case get(test_server_downloaded_suites) of
- undefined ->
- send_beam_and_datadir(Mod, JobSock),
- put(test_server_downloaded_suites, [Mod]);
- Suites ->
- case lists:member(Mod, Suites) of
- false ->
- send_beam_and_datadir(Mod, JobSock),
- put(test_server_downloaded_suites, [Mod|Suites]);
- true ->
- ok
- end
- end
- end.
-
-send_beam_and_datadir(Mod, JobSock) ->
- case code:which(Mod) of
- non_existing ->
- io:format("** WARNING: Suite ~w could not be found on host\n",
- [Mod]);
- BeamFile ->
- send_beam(JobSock, Mod, BeamFile)
- end,
- DataDir = get_data_dir(Mod),
- case file:read_file_info(DataDir) of
- {ok,_I} ->
- {ok,All} = file:list_dir(DataDir),
- AddTarFiles =
- case controller_call(get_target_info) of
- #target_info{os_family=ose} ->
- ObjExt = code:objfile_extension(),
- Wc = filename:join(DataDir, "*" ++ ObjExt),
- ModsInDatadir = filelib:wildcard(Wc),
- SendBeamFun = fun(X) -> send_beam(JobSock, X) end,
- lists:foreach(SendBeamFun, ModsInDatadir),
- %% No need to send C code or makefiles since
- %% no compilation can be done on target anyway.
- %% Compiled C code must exist on target.
- %% Beam files are already sent as binaries.
- %% Erlang source are sent in case the test case
- %% is to compile it.
- Filter = fun("Makefile") -> false;
- ("Makefile.src") -> false;
- (Y) ->
- case filename:extension(Y) of
- ".c" -> false;
- ObjExt -> false;
- _ -> true
- end
- end,
- lists:filter(Filter, All);
- _ ->
- All
- end,
- Tarfile = "data_dir.tar.gz",
- {ok,Tar} = erl_tar:open(Tarfile, [write,compressed]),
- ShortDataDir = filename:basename(DataDir),
- AddTarFun =
- fun(File) ->
- Long = filename:join(DataDir, File),
- Short = filename:join(ShortDataDir, File),
- ok = erl_tar:add(Tar, Long, Short, [])
- end,
- lists:foreach(AddTarFun, AddTarFiles),
- ok = erl_tar:close(Tar),
- {ok,TarBin} = file:read_file(Tarfile),
- file:delete(Tarfile),
- request(JobSock, {{datadir,Tarfile}, TarBin});
- {error,_R} ->
- ok
- end.
-
-send_beam(JobSock, BeamFile) ->
- Mod=filename:rootname(filename:basename(BeamFile), code:objfile_extension()),
- send_beam(JobSock, list_to_atom(Mod), BeamFile).
-send_beam(JobSock, Mod, BeamFile) ->
- {ok,BeamBin} = file:read_file(BeamFile),
- request(JobSock, {{beam,Mod,BeamFile}, BeamBin}).
-
-check_new_crash_dumps(Where) ->
- case Where of
- target ->
- case get(test_server_ctrl_job_sock) of
- undefined ->
- ok;
- Socket ->
- read_job_sock_loop(Socket)
- end;
- _ ->
- ok
- end,
- test_server_sup:check_new_crash_dumps().
-
-
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% progress(Result, CaseNum, Mod, Func, Location, Reason, Time,
%% Comment, TimeFormat) -> Result
@@ -4455,11 +4106,10 @@ do_format_exception(Reason={Error,Stack}) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% run_test_case_apply(CaseNum, Mod, Func, Args, Name, RunInit,
-%% Where, TimetrapData, RejectIoReqs) ->
+%% TimetrapData) ->
%% {{Time,RetVal,Loc,Opts,Comment},DetectedFail,ProcessesBefore,ProcessesAfter} |
%% {{died,Reason,unknown,Comment},DetectedFail,ProcessesBefore,ProcessesAfter}
%% Name = atom()
-%% Where = target | host
%% Time = float() (seconds)
%% RetVal = term()
%% Loc = term()
@@ -4474,23 +4124,10 @@ do_format_exception(Reason={Error,Stack}) ->
%% sent over socket to target, and test_server runs the case and sends the
%% result back over the socket. Else test_server runs the case directly on host.
-run_test_case_apply(CaseNum, Mod, Func, Args, Name, RunInit, host,
- TimetrapData, RejectIoReqs) ->
+run_test_case_apply(CaseNum, Mod, Func, Args, Name, RunInit,
+ TimetrapData) ->
test_server:run_test_case_apply({CaseNum,Mod,Func,Args,Name,RunInit,
- TimetrapData,RejectIoReqs});
-run_test_case_apply(CaseNum, Mod, Func, Args, Name, RunInit, target,
- TimetrapData, RejectIoReqs) ->
- case get(test_server_ctrl_job_sock) of
- undefined ->
- %% local target
- test_server:run_test_case_apply({CaseNum,Mod,Func,Args,Name,RunInit,
- TimetrapData,RejectIoReqs});
- JobSock ->
- %% remote target
- request(JobSock, {test_case,{CaseNum,Mod,Func,Args,Name,RunInit,
- TimetrapData,RejectIoReqs}}),
- read_job_sock_loop(JobSock)
- end.
+ TimetrapData}).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% print(Detail, Format, Args) -> ok
@@ -4500,16 +4137,6 @@ run_test_case_apply(CaseNum, Mod, Func, Args, Name, RunInit, target,
%%
%% Just like io:format, except that depending on the Detail value, the output
%% is directed to console, major and/or minor log files.
-%%
-%% To handle printouts to common (not minor) log files from parallel test
-%% case processes, the test_server_common_io_handler value is checked. If
-%% set, the data is sent to the main controlling process. Note that test
-%% cases that belong to a conf group nested under a parallel group will also
-%% get its io data sent to main rather than immediately printed out, even
-%% if the test cases are executed by the same, main, process (ie the main
-%% process sends messages to itself then).
-%%
-%% Buffered io is handled by the handle_test_case_io_and_status/0 function.
print(Detail, Format) ->
print(Detail, Format, []).
@@ -4522,19 +4149,7 @@ print(Detail, Format, Args, Printer) ->
print_or_buffer(Detail, Msg, Printer).
print_or_buffer(Detail, Msg, Printer) ->
- case get(test_server_minor_level) of
- _ when Detail == minor ->
- output({Detail,Msg}, Printer);
- MinLevel when is_number(Detail), Detail >= MinLevel ->
- output({Detail,Msg}, Printer);
- _ -> % Detail < Minor | major | html
- case get(test_server_common_io_handler) of
- undefined ->
- output({Detail,Msg}, Printer);
- {_,MainPid} ->
- MainPid ! {print,self(),Detail,Msg}
- end
- end.
+ test_server_gl:print(group_leader(), Detail, Msg, Printer).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% print_timestamp(Detail, Leader) -> ok
@@ -4598,107 +4213,6 @@ format(Detail, Format, Args) ->
print_or_buffer(Detail, Str, self()).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% output({Level,Message}, Sender) -> ok
-%% Level = integer() | minor | major | html
-%% Message = string() | [integer()]
-%% Sender = string() | internal
-%%
-%% Outputs the message on the channels indicated by Level. If Level is an
-%% atom, only the corresponding channel receives the output. When Level is
-%% an integer console, major and/or minor log file will receive output
-%% depending on the user set thresholds (see get_levels/0, set_levels/3)
-%%
-%% When printing on the console, the message is prefixed with the test
-%% suite's name. In case a name is not set (yet), Sender is used.
-%%
-%% When not outputting to the console, and the Sender is 'internal',
-%% the message is prefixed with "=== ", so that it will be apparent that
-%% the message comes from the test server and not the test suite itself.
-
-output({Level,Msg}, Sender) when is_integer(Level) ->
- SumLev = get(test_server_summary_level),
- if Level =< SumLev ->
- output_to_fd(stdout, Msg, Sender);
- true ->
- ok
- end,
- MajLev = get(test_server_major_level),
- if Level =< MajLev ->
- output_to_fd(get(test_server_major_fd), Msg, Sender);
- true ->
- ok
- end,
- MinLev = get(test_server_minor_level),
- if Level >= MinLev ->
- output_to_fd(get(test_server_minor_fd), Msg, Sender);
- true ->
- ok
- end;
-output({minor,Bytes}, Sender) when is_list(Bytes) ->
- output_to_fd(get(test_server_minor_fd), Bytes, Sender);
-output({major,Bytes}, Sender) when is_list(Bytes) ->
- output_to_fd(get(test_server_major_fd), Bytes, Sender);
-output({minor,Bytes}, Sender) when is_binary(Bytes) ->
- output_to_fd(get(test_server_minor_fd),binary_to_list(Bytes), Sender);
-output({major,Bytes}, Sender) when is_binary(Bytes) ->
- output_to_fd(get(test_server_major_fd),binary_to_list(Bytes), Sender);
-output({html,Msg}, _Sender) ->
- case get(test_server_html_fd) of
- undefined ->
- ok;
- Fd ->
- io:put_chars(Fd,Msg),
- case file:position(Fd, {cur, 0}) of
- {ok, Pos} ->
- %% We are writing to a seekable file. Finalise so
- %% we get complete valid (and viewable) HTML code.
- %% Then rewind to overwrite the finalising code.
- io:put_chars(Fd, "\n</table>\n"),
- case get(test_server_html_footer) of
- undefined ->
- io:put_chars(Fd, "</body>\n</html>\n");
- Footer ->
- io:put_chars(Fd, Footer)
- end,
- file:position(Fd, Pos);
- {error, epipe} ->
- %% The file is not seekable. We cannot erase what
- %% we've already written --- so the reader will
- %% have to wait until we're done.
- ok
- end
- end;
-output({minor,Data}, Sender) ->
- output_to_fd(get(test_server_minor_fd),
- lists:flatten(io_lib:format(
- "Unexpected output: ~p~n", [Data])),Sender);
-output({major,Data}, Sender) ->
- output_to_fd(get(test_server_major_fd),
- lists:flatten(io_lib:format(
- "Unexpected output: ~p~n", [Data])),Sender).
-
-output_to_fd(stdout, Msg, Sender) ->
- Name =
- case get(test_server_name) of
- undefined -> Sender;
- Other -> Other
- end,
- io:format("Testing ~s: ~s\n", [Name, lists:flatten(Msg)]);
-output_to_fd(undefined, _Msg, _Sender) ->
- ok;
-output_to_fd(Fd, Msg=[$=|_], internal) ->
- io:put_chars(Fd, [Msg,"\n"]);
-
-output_to_fd(Fd, Msg, internal) ->
- io:put_chars(Fd, [$=,$=,$=,$ , Msg, "\n"]);
-
-output_to_fd(Fd, Msg, _Sender) ->
- case get(test_server_log_nl) of
- false -> io:put_chars(Fd, Msg);
- _ -> io:put_chars(Fd, [Msg,"\n"])
- end.
-
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% xhtml(BasicHtml, XHtml) -> BasicHtml | XHtml
%%
xhtml(HTML, XHTML) ->
@@ -5210,7 +4724,7 @@ get_target_info() ->
%% Called by test_server. See test_server:start_node/3 for details
start_node(Name, Type, Options) ->
- T = 10 * ?ACCEPT_TIMEOUT, % give some extra time
+ T = 10 * ?ACCEPT_TIMEOUT * test_server:timetrap_scale_factor(),
format(minor, "Attempt to start ~w node ~p with options ~p",
[Type, Name, Options]),
case controller_call({start_node,Name,Type,Options}, T) of
@@ -5255,7 +4769,8 @@ start_node(Name, Type, Options) ->
%% when the new node has contacted test_server_ctrl again
wait_for_node(Slave) ->
- case catch controller_call({wait_for_node,Slave},10000) of
+ T = 10000 * test_server:timetrap_scale_factor(),
+ case catch controller_call({wait_for_node,Slave},T) of
{'EXIT',{timeout,_}} -> {error,timeout};
ok -> ok
end.
@@ -5279,60 +4794,6 @@ stop_node(Slave) ->
controller_call({stop_node,Slave}).
-%%--------------------------------------------------------------------
-%% Functions handling target communication over socket
-
-%% Generic send function for communication with target
-request(Sock,Request) ->
- gen_tcp:send(Sock,<<1,(term_to_binary(Request))/binary>>).
-
-%% Receive and decode request on job specific socket
-%% Used when test is running on a remote target
-read_job_sock_loop(Sock) ->
- case gen_tcp:recv(Sock,0) of
- {error,Reason} ->
- gen_tcp:close(Sock),
- exit({controller,connection_lost,Reason});
- {ok,<<1,Request/binary>>} ->
- case decode(binary_to_term(Request)) of
- ok ->
- read_job_sock_loop(Sock);
- {stop,Result} ->
- Result
- end
- end.
-
-decode({apply,{M,F,A}}) ->
- apply(M,F,A),
- ok;
-decode({sync_apply,{M,F,A}}) ->
- R = apply(M,F,A),
- request(get(test_server_ctrl_job_sock),{sync_result,R}),
- ok;
-decode({sync_result,Result}) ->
- {stop,Result};
-decode({test_case_result,Result}) ->
- {stop,Result};
-decode({privdir,empty_priv_dir}) ->
- {stop,ok};
-decode({{privdir,PrivDirTar},TarBin}) ->
- Root = get(test_server_log_dir_base),
- unpack_tar(Root,PrivDirTar,TarBin),
- {stop,ok};
-decode({crash_dumps,no_crash_dumps}) ->
- {stop,ok};
-decode({{crash_dumps,CrashDumpTar},TarBin}) ->
- Dir = test_server_sup:crash_dump_dir(),
- unpack_tar(Dir,CrashDumpTar,TarBin),
- {stop,ok}.
-
-unpack_tar(Dir,TarFileName0,TarBin) ->
- TarFileName = filename:join(Dir,TarFileName0),
- ok = file:write_file(TarFileName,TarBin),
- ok = erl_tar:extract(TarFileName,[compressed,{cwd,Dir}]),
- ok = file:delete(TarFileName).
-
-
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% DEBUGGER INTERFACE %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -5477,16 +4938,7 @@ cover_compile({App,CoverFile}) ->
cover_compile1({App,Exclude,Include,Cross}).
cover_compile1(What) ->
- case get(test_server_ctrl_job_sock) of
- undefined ->
- %% local target
- test_server:cover_compile(What);
- JobSock ->
- %% remote target
- request(JobSock, {sync_apply,{test_server,cover_compile,[What]}}),
- read_job_sock_loop(JobSock)
- end.
-
+ test_server:cover_compile(What).
%% Read the coverfile for an application and return a list of modules
%% that are members of the application but shall not be compiled
@@ -5538,7 +4990,7 @@ check_cover_file([], Exclude, Include) ->
%%
%% This per application analysis writes the file cover.html in the
%% application's run.<timestamp> directory.
-cover_analyse({App,CoverInfo}, Analyse, AnalyseMods, TestDir) ->
+cover_analyse({App,CoverInfo}, Analyse, AnalyseMods, Stop, TestDir) ->
write_default_cross_coverlog(TestDir),
{ok,CoverLog} = file:open(filename:join(TestDir, ?coverlog_name), [write]),
@@ -5569,7 +5021,7 @@ cover_analyse({App,CoverInfo}, Analyse, AnalyseMods, TestDir) ->
io:fwrite(CoverLog, "<p>Excluded module(s): <code>~p</code>\n", [Excluded]),
- Coverage = cover_analyse(Analyse, AnalyseMods),
+ Coverage = cover_analyse(Analyse, AnalyseMods, Stop),
case lists:filter(fun({_M,{_,_,_}}) -> false;
(_) -> true
@@ -5586,32 +5038,27 @@ cover_analyse({App,CoverInfo}, Analyse, AnalyseMods, TestDir) ->
file:write_file(filename:join(TestDir, ?cover_total),
term_to_binary(TotPercent)).
-cover_analyse(Analyse, AnalyseMods) ->
+cover_analyse(Analyse, AnalyseMods, Stop) ->
TestDir = get(test_server_log_dir_base),
- case get(test_server_ctrl_job_sock) of
- undefined ->
- %% local target
- test_server:cover_analyse({Analyse,TestDir}, AnalyseMods);
- JobSock ->
- %% remote target
- request(JobSock, {sync_apply,{test_server,
- cover_analyse,
- [Analyse,AnalyseMods]}}),
- read_job_sock_loop(JobSock)
- end.
+ test_server:cover_analyse({Analyse,TestDir}, AnalyseMods, Stop).
%% Cover analysis, cross application
%% This can be executed on any node after all tests are finished.
-%% The node's current directory must be the same as when the tests
-%% were run.
-cross_cover_analyse(Analyse) ->
- cross_cover_analyse(Analyse, undefined).
-
-cross_cover_analyse(Analyse, CrossModules) ->
- CoverdataFiles = get_coverdata_files(),
+%% Apps = [{App,Dir}]
+%% App = atom(), application name
+%% Dir = string(), the log directory for App, normally where
+%% run.<timestamp> is found.
+%% Modules = [atom()], modules that have been cover compiled during tests
+%% of other apps than the one they belong to.
+cross_cover_analyse(Analyse, Apps) ->
+ cross_cover_analyse(Analyse, Apps, get_cross_modules()).
+cross_cover_analyse(Analyse, Apps, Modules) ->
+ Apps1 = get_latest_run_dirs(Apps),
+ Apps2 = add_cross_modules(Modules,Apps1),
+ CoverdataFiles = get_coverdata_files(Apps2),
lists:foreach(fun(CDF) -> cover:import(CDF) end, CoverdataFiles),
- io:fwrite("Cover analysing... ", []),
+ io:fwrite("Cover analysing...\n", []),
DetailsFun =
case Analyse of
details ->
@@ -5625,25 +5072,15 @@ cross_cover_analyse(Analyse, CrossModules) ->
_ ->
fun(_,_) -> undefined end
end,
- SortedModules =
- case CrossModules of
- undefined ->
- sort_modules([Mod || Mod <- get_all_cross_modules(),
- lists:member(Mod, cover:imported_modules())], []);
- _ ->
- sort_modules(CrossModules, [])
- end,
- Coverage = analyse_apps(SortedModules, DetailsFun, []),
+ Coverage = analyse_apps(Apps2, DetailsFun, []),
cover:stop(),
- write_cross_cover_logs(Coverage).
+ write_cross_cover_logs(Coverage,Apps2).
-%% For each application from which there are modules listed in the
-%% cross.cover, write a cross cover log (cross_cover.html).
-write_cross_cover_logs([{App,Coverage}|T]) ->
- case last_test_for_app(App) of
- false ->
- ok;
- Dir ->
+%% For each application from which there are cross cover analysed
+%% modules, write a cross cover log (cross_cover.html).
+write_cross_cover_logs([{App,Coverage}|T],Apps) ->
+ case lists:keyfind(App,1,Apps) of
+ {_,Dir,Mods} when Mods=/=[] ->
CoverLogName = filename:join(Dir,?cross_coverlog_name),
{ok,CoverLog} = file:open(CoverLogName, [write]),
write_coverlog_header(CoverLog),
@@ -5651,54 +5088,51 @@ write_cross_cover_logs([{App,Coverage}|T]) ->
"<h1>Coverage results for \'~w\' from all tests</h1>\n",
[App]),
write_cover_result_table(CoverLog, Coverage),
- io:fwrite("Written file ~p\n", [CoverLogName])
+ io:fwrite("Written file ~p\n", [CoverLogName]);
+ _ ->
+ ok
end,
- write_cross_cover_logs(T);
-write_cross_cover_logs([]) ->
+ write_cross_cover_logs(T,Apps);
+write_cross_cover_logs([],_) ->
io:fwrite("done\n", []).
-%% Find all exported coverdata files. First find all the latest
-%% run.<timestamp> directories, and the check if there is a file named
-%% all.coverdata.
-get_coverdata_files() ->
- PossibleFiles = [last_coverdata_file(Dir) ||
- Dir <- filelib:wildcard([$*|?logdir_ext]),
- filelib:is_dir(Dir)],
- [File || File <- PossibleFiles, filelib:is_file(File)].
-
-last_coverdata_file(Dir) ->
- LastDir = last_test(filelib:wildcard(filename:join(Dir,"run.[1-2]*")),false),
- filename:join(LastDir,"all.coverdata").
-
-
-%% Find the latest run.<timestamp> directory for the given application.
-last_test_for_app(App) ->
- AppLogDir = atom_to_list(App)++?logdir_ext,
- last_test(filelib:wildcard(filename:join(AppLogDir,"run.[1-2]*")),false).
-
-last_test([Run|Rest], false) ->
- last_test(Rest, Run);
-last_test([Run|Rest], Latest) when Run > Latest ->
- last_test(Rest, Run);
-last_test([_|Rest], Latest) ->
- last_test(Rest, Latest);
-last_test([], Latest) ->
+%% Get the latest run.<timestamp> directories
+get_latest_run_dirs([{App,Dir}|Apps]) ->
+ [{App,get_latest_run_dir(Dir)} | get_latest_run_dirs(Apps)];
+get_latest_run_dirs([]) ->
+ [].
+
+get_latest_run_dir(Dir) ->
+ case filelib:wildcard(filename:join(Dir,"run.[1-2]*")) of
+ [] ->
+ Dir;
+ [H|T] ->
+ get_latest_dir(T,H)
+ end.
+
+get_latest_dir([H|T],Latest) when H>Latest ->
+ get_latest_dir(T,H);
+get_latest_dir([_|T],Latest) ->
+ get_latest_dir(T,Latest);
+get_latest_dir([],Latest) ->
Latest.
-%% Sort modules according to the application they belong to.
-%% Return [{App,LastTestDir,ModuleList}]
-sort_modules([M|Modules], Acc) ->
- App = get_app(M),
- Acc1 =
- case lists:keysearch(App, 1, Acc) of
- {value,{App,LastTest,List}} ->
- lists:keyreplace(App, 1, Acc, {App,LastTest,[M|List]});
+%% Associate the cross cover modules with their applications.
+add_cross_modules(Mods,Apps)->
+ do_add_cross_modules(Mods,[{App,Dir,[]} || {App,Dir} <- Apps]).
+do_add_cross_modules([Mod|Mods],Apps)->
+ App = get_app(Mod),
+ NewApps =
+ case lists:keytake(App,1,Apps) of
+ {value,{App,Dir,AppMods},Rest} ->
+ [{App,Dir,lists:umerge([Mod],AppMods)}|Rest];
false ->
- [{App,last_test_for_app(App),[M]}|Acc]
+ Apps
end,
- sort_modules(Modules, Acc1);
-sort_modules([], Acc) ->
- Acc.
+ do_add_cross_modules(Mods,NewApps);
+do_add_cross_modules([],Apps) ->
+ %% Just to get the modules in the same order as app-only cover log
+ [{App,Dir,lists:reverse(Mods)} || {App,Dir,Mods} <- Apps].
get_app(Module) ->
Beam = code:which(Module),
@@ -5706,6 +5140,14 @@ get_app(Module) ->
[AppStr|_] = string:tokens(AppDir,"-"),
list_to_atom(AppStr).
+%% Find all exported coverdata files.
+get_coverdata_files(Apps) ->
+ lists:flatmap(
+ fun({_,LatestAppDir,_}) ->
+ filelib:wildcard(filename:join(LatestAppDir,"all.coverdata"))
+ end,
+ Apps).
+
%% For each application, analyse all modules
%% Used for cross cover analysis.
@@ -5726,7 +5168,7 @@ analyse_modules(_Dir, [], _DetailsFun, Acc) ->
%% Read the cross cover file (cross.cover)
-get_all_cross_modules() ->
+get_cross_modules() ->
get_cross_modules(all).
get_cross_modules(App) ->
case file:consult(?cross_cover_file) of
diff --git a/lib/test_server/src/test_server_gl.erl b/lib/test_server/src/test_server_gl.erl
new file mode 100644
index 0000000000..d32c7c07dc
--- /dev/null
+++ b/lib/test_server/src/test_server_gl.erl
@@ -0,0 +1,293 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2012. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%% This module implements group leader processes for test cases.
+%% Each group leader process handles output to the minor log file for
+%% a test case, and calls test_server_io to handle output to the common
+%% log files. The group leader processes are created and destroyed
+%% through the test_server_io module/process.
+
+-module(test_server_gl).
+-export([start_link/0,stop/1,set_minor_fd/3,unset_minor_fd/1,
+ get_tc_supervisor/1,print/4,set_props/2]).
+
+-export([init/1,handle_call/3,handle_cast/2,handle_info/2,terminate/2]).
+
+-record(st, {tc_supervisor :: 'none'|pid(), %Test case supervisor
+ tc :: mfa(), %Current test case MFA
+ minor :: 'none'|pid(), %Minor fd
+ minor_monitor, %Monitor ref for minor fd
+ capture :: 'none'|pid(), %Capture output
+ reject_io :: boolean(), %Reject I/O requests...
+ permit_io, %... and exceptions
+ auto_nl=true :: boolean(), %Automatically add NL
+ levels %{Stdout,Major,Minor}
+ }).
+
+%% start_link()
+%% Start a new group leader process. Only to be called by
+%% the test_server_io process.
+
+start_link() ->
+ case gen_server:start_link(?MODULE, [], []) of
+ {ok,Pid} ->
+ {ok,Pid};
+ Other ->
+ Other
+ end.
+
+
+%% stop(Pid)
+%% Stop a group leader process. Only to be called by
+%% the test_server_io process.
+
+stop(GL) ->
+ gen_server:cast(GL, stop).
+
+
+%% set_minor_fd(GL, Fd, MFA)
+%% GL = Pid for the group leader process
+%% Fd = file descriptor for the minor log file
+%% MFA = {M,F,A} for the test case owning the minor log file
+%%
+%% Register the file descriptor for the minor log file. Subsequent
+%% IO directed to the minor log file will be written to this file.
+%% Also register the currently executing process at the testcase
+%% supervisor corresponding to this group leader process.
+
+set_minor_fd(GL, Fd, MFA) ->
+ req(GL, {set_minor_fd,Fd,MFA,self()}).
+
+
+%% unset_minor_fd(GL, Fd, MFA)
+%% GL = Pid for the group leader process
+%%
+%% Unregister the file descriptor for minor log file (typically
+%% because the test case has ended the minor log file is about
+%% to be closed). Subsequent IO (for example, by a process spawned
+%% by the testcase process) will go to the unexpected_io log file.
+
+unset_minor_fd(GL) ->
+ req(GL, unset_minor_fd).
+
+
+%% get_tc_supervisor(GL)
+%% GL = Pid for the group leader process
+%%
+%% Return the Pid for the process that supervises the test case
+%% that has this group leader.
+
+get_tc_supervisor(GL) ->
+ req(GL, get_tc_supervisor).
+
+
+%% print(GL, Detail, Format, Args) -> ok
+%% GL = Pid for the group leader process
+%% Detail = integer() | minor | major | html | stdout
+%% Msg = iodata()
+%% Printer = internal | pid()
+%%
+%% Print a message to one of the log files. If Detail is an integer,
+%% it will be compared to the levels (set by set_props/2) to
+%% determine which log file(s) that are to receive the output. If
+%% Detail is an atom, the value of the atom will directly determine
+%% which log file to use. IO to the minor log file will be handled
+%% directly by this group leader process (printing to the file set by
+%% set_minor_fd/3), and all other IO will be handled by calling
+%% test_server_io:print/3.
+
+print(GL, Detail, Msg, Printer) ->
+ req(GL, {print,Detail,Msg,Printer}).
+
+
+%% set_props(GL, [PropertyTuple])
+%% GL = Pid for the group leader process
+%% PropertyTuple = {levels,{Show,Major,Minor}} |
+%% {auto_nl,boolean()} |
+%% {reject_io_reqs,boolean()}
+%%
+%% Set properties for this group leader process.
+
+set_props(GL, PropList) ->
+ req(GL, {set_props,PropList}).
+
+%%% Internal functions.
+
+init([]) ->
+ {ok,#st{tc_supervisor=none,
+ minor=none,
+ minor_monitor=none,
+ capture=none,
+ reject_io=false,
+ permit_io=gb_sets:empty(),
+ auto_nl=true,
+ levels={1,19,10}
+ }}.
+
+req(GL, Req) ->
+ gen_server:call(GL, Req, infinity).
+
+handle_call(get_tc_supervisor, _From, #st{tc_supervisor=Pid}=St) ->
+ {reply,Pid,St};
+handle_call({set_minor_fd,Fd,MFA,Supervisor}, _From, St) ->
+ Ref = erlang:monitor(process, Fd),
+ {reply,ok,St#st{tc=MFA,minor=Fd,minor_monitor=Ref,
+ tc_supervisor=Supervisor}};
+handle_call(unset_minor_fd, _From, St) ->
+ {reply,ok,St#st{minor=none,tc_supervisor=none}};
+handle_call({set_props,PropList}, _From, St) ->
+ {reply,ok,do_set_props(PropList, St)};
+handle_call({print,Detail,Msg,Printer}, {From,_}, St) ->
+ output(Detail, Msg, Printer, From, St),
+ {reply,ok,St}.
+
+handle_cast(stop, St) ->
+ {stop,normal,St}.
+
+handle_info({'DOWN',Ref,process,_,_}, #st{minor_monitor=Ref}=St) ->
+ {noreply,St#st{minor=none,minor_monitor=none}};
+handle_info({permit_io,Pid}, #st{permit_io=P}=St) ->
+ {noreply,St#st{permit_io=gb_sets:add(Pid, P)}};
+handle_info({capture,Cap0}, St) ->
+ Cap = case Cap0 of
+ false -> none;
+ Pid when is_pid(Cap0) -> Pid
+ end,
+ {noreply,St#st{capture=Cap}};
+handle_info({io_request,From,ReplyAs,Req}=IoReq, St) ->
+ try io_req(Req, From, St) of
+ passthrough ->
+ group_leader() ! IoReq;
+ Data ->
+ case is_io_permitted(From, St) of
+ false ->
+ ok;
+ true ->
+ case St of
+ #st{capture=none} ->
+ ok;
+ #st{capture=CapturePid} ->
+ CapturePid ! {captured,Data}
+ end,
+ output(minor, Data, From, From, St)
+ end,
+ From ! {io_reply,ReplyAs,ok}
+ catch
+ _:_ ->
+ {io_reply,ReplyAs,{error,arguments}}
+ end,
+ {noreply,St};
+handle_info({structured_io,ClientPid,{Detail,Str}}, St) ->
+ output(Detail, Str, ClientPid, ClientPid, St),
+ {noreply,St};
+handle_info({printout,Detail,Format,Args}, St) ->
+ Str = io_lib:format(Format, Args),
+ output(Detail, Str, internal, none, St),
+ {noreply,St};
+handle_info(Msg, #st{tc_supervisor=Pid}=St) when is_pid(Pid) ->
+ %% The process overseeing the testcase process also used to be
+ %% the group leader; thus, it is widely expected that it can be
+ %% reached by sending a message to the group leader. Therefore
+ %% we'll need to forward any non-recognized messaged to the test
+ %% case supervisor.
+ Pid ! Msg,
+ {noreply,St};
+handle_info(_Msg, #st{}=St) ->
+ %% There is no known supervisor process. Ignore this message.
+ {noreply,St}.
+
+terminate(_, _) ->
+ ok.
+
+do_set_props([{levels,Levels}|Ps], St) ->
+ do_set_props(Ps, St#st{levels=Levels});
+do_set_props([{auto_nl,AutoNL}|Ps], St) ->
+ do_set_props(Ps, St#st{auto_nl=AutoNL});
+do_set_props([{reject_io_reqs,Bool}|Ps], St) ->
+ do_set_props(Ps, St#st{reject_io=Bool});
+do_set_props([], St) -> St.
+
+io_req({put_chars,Enc,Bytes}, _, _) when Enc =:= latin1; Enc =:= unicode ->
+ to_latin1(Enc, Bytes);
+io_req({put_chars,Encoding,Mod,Func,[Format,Args]}, _, _) ->
+ Str = Mod:Func(Format, Args),
+ to_latin1(Encoding, Str);
+io_req(_, _, _) -> passthrough.
+
+to_latin1(unicode, Str) ->
+ [if C > 255 ->
+ io_lib:format("\\{~.8B}", [C]);
+ true ->
+ C
+ end || C <- unicode:characters_to_list(Str, unicode)];
+to_latin1(latin1, Str) -> Str.
+
+output(Level, Str, Sender, From, St) when is_integer(Level) ->
+ case selected_by_level(Level, stdout, St) of
+ true -> output(stdout, Str, Sender, From, St);
+ false -> ok
+ end,
+ case selected_by_level(Level, major, St) of
+ true -> output(major, Str, Sender, From, St);
+ false -> ok
+ end,
+ case selected_by_level(Level, minor, St) of
+ true -> output(minor, Str, Sender, From, St);
+ false -> ok
+ end;
+output(stdout, Str, _Sender, From, St) ->
+ output_to_file(stdout, Str, From, St);
+output(html, Str, _Sender, From, St) ->
+ output_to_file(html, Str, From, St);
+output(Level, Str, Sender, From, St) when is_atom(Level) ->
+ output_to_file(Level, dress_output(Str, Sender, St), From, St).
+
+output_to_file(minor, Data0, From, #st{tc={M,F,A},minor=none}) ->
+ Data = [io_lib:format("=== ~p:~p/~p\n", [M,F,A]),Data0],
+ test_server_io:print(From, unexpected_io, Data),
+ ok;
+output_to_file(minor, Data, From, #st{minor=Fd}) ->
+ try
+ io:put_chars(Fd, Data)
+ catch
+ _:_ ->
+ test_server_io:print(From, unexpected_io, Data)
+ end;
+output_to_file(Detail, Data, From, _) ->
+ test_server_io:print(From, Detail, Data).
+
+is_io_permitted(From, #st{reject_io=true,permit_io=P}) ->
+ gb_sets:is_member(From, P);
+is_io_permitted(_, #st{reject_io=false}) -> true.
+
+selected_by_level(Level, stdout, #st{levels={Stdout,_,_}}) ->
+ Level =< Stdout;
+selected_by_level(Level, major, #st{levels={_,Major,_}}) ->
+ Level =< Major;
+selected_by_level(Level, minor, #st{levels={_,_,Minor}}) ->
+ Level >= Minor.
+
+dress_output([$=|_]=Str, internal, _) ->
+ [Str,$\n];
+dress_output(Str, internal, _) ->
+ ["=== ",Str,$\n];
+dress_output(Str, _, #st{auto_nl=AutoNL}) ->
+ case AutoNL of
+ true -> [Str,$\n];
+ false -> Str
+ end.
diff --git a/lib/test_server/src/test_server_h.erl b/lib/test_server/src/test_server_h.erl
index fdeee59326..78daba855d 100644
--- a/lib/test_server/src/test_server_h.erl
+++ b/lib/test_server/src/test_server_h.erl
@@ -131,6 +131,11 @@ report_receiver(warning_msg, _) -> kernel;
report_receiver(warning_report, _) -> kernel;
report_receiver(info, _) -> kernel;
report_receiver(info_msg, _) -> kernel;
+report_receiver(info_report,Tuple)
+ when is_tuple(Tuple) andalso
+ (element(1,Tuple)==ct_connection orelse
+ element(1,Tuple)==conn_log) ->
+ none;
report_receiver(info_report, _) -> kernel;
report_receiver(_, _) -> none.
diff --git a/lib/test_server/src/test_server_internal.hrl b/lib/test_server/src/test_server_internal.hrl
index b58b42805e..d204c35293 100644
--- a/lib/test_server/src/test_server_internal.hrl
+++ b/lib/test_server/src/test_server_internal.hrl
@@ -24,8 +24,7 @@
%% Target information generated by test_server:init_target_info/0 and
%% test_server_ctrl:contact_main_target/2
%% Once initiated, this information will never change!!
--record(target_info, {where, % local | Socket
- os_family, % atom(); win32 | unix
+-record(target_info, {os_family, % atom(); win32 | unix
os_type, % result of os:type()
host, % string(); the name of the target machine
version, % string()
@@ -43,7 +42,6 @@
% itself is master for slave nodes
%% The following are only used for remote targets
- target_client, % reference to a client talking to target
slave_targets=[]}).% list() of atom(); all available
% targets for starting slavenodes
diff --git a/lib/test_server/src/test_server_io.erl b/lib/test_server/src/test_server_io.erl
new file mode 100644
index 0000000000..e960b3087a
--- /dev/null
+++ b/lib/test_server/src/test_server_io.erl
@@ -0,0 +1,317 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2012. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%% This module implements a process with the registered name 'test_server_io',
+%% which has two main responsibilities:
+%%
+%% * Manage group leader processes (see the test_server_gl module)
+%% for test cases. A group_leader process is obtained by calling
+%% get_gl/1. Group leader processes will be kept alive as along as
+%% the 'test_server_io' process is alive.
+%%
+%% * Handle output to the common log files (stdout, major, html,
+%% unexpected_io).
+%%
+
+-module(test_server_io).
+-export([start_link/0,stop/0,get_gl/1,set_fd/2,
+ start_transaction/0,end_transaction/0,print_buffered/1,print/3,
+ set_footer/1,set_job_name/1,set_gl_props/1]).
+
+-export([init/1,handle_call/3,handle_info/2,terminate/2]).
+
+-record(st, {fds, %Singleton fds (gb_tree)
+ shared_gl :: pid(), %Shared group leader
+ gls, %Group leaders (gb_set)
+ io_buffering=false, %I/O buffering
+ buffered, %Buffered I/O requests
+ html_footer, %HTML footer
+ job_name, %Name of current job.
+ gl_props, %Properties for GL.
+ stopping
+ }).
+
+start_link() ->
+ case gen_server:start_link({local,?MODULE}, ?MODULE, [], []) of
+ {ok,Pid} ->
+ {ok,Pid};
+ Other ->
+ Other
+ end.
+
+stop() ->
+ OldGL = group_leader(),
+ group_leader(self(), self()),
+ req(stop),
+ group_leader(OldGL, self()),
+ ok.
+
+%% get_gl(Shared) -> Pid
+%% Shared = boolean()
+%% Pid = pid()
+%%
+%% Return a group leader (a process using the test_server_gl module).
+%% If Shared is true, the shared group leader is returned (suitable for
+%% running sequential test cases), otherwise a new group leader process
+%% is spawned. Group leader processes will live until the
+%% 'test_server_io' process is stopped.
+
+get_gl(Shared) when is_boolean(Shared) ->
+ req({get_gl,Shared}).
+
+%% set_fd(Tag, Fd) -> ok.
+%% Tag = major | html | unexpected_io
+%% Fd = a file descriptor (as returned by file:open/2)
+%%
+%% Associate a file descriptor with the given Tag. This
+%% Tag can later be used in when calling to print/3.
+
+set_fd(Tag, Fd) ->
+ req({set_fd,Tag,Fd}).
+
+%% start_transaction()
+%%
+%% Subsequent calls to print/3 from the process executing start_transaction/0
+%% will cause the messages to be buffered instead of printed directly.
+
+start_transaction() ->
+ req({start_transaction,self()}).
+
+%% end_transaction()
+%%
+%% End the transaction started by start_transaction/0. Subsequent calls to
+%% print/3 will cause the message to be printed directly.
+
+end_transaction() ->
+ req({end_transaction,self()}).
+
+%% print(From, Tag, Msg)
+%% From = pid()
+%% Tag = stdout, or any tag that has been registered using set_fd/2
+%% Msg = string or iolist
+%%
+%% Either print Msg to the file identified by Tag, or buffer the message
+%% start_transaction/0 has been called from the process From.
+%%
+%% NOTE: The tags have various special meanings. For example, 'html'
+%% is assumed to be a HTML file.
+
+print(From, Tag, Msg) ->
+ req({print,From,Tag,Msg}).
+
+%% print_buffered(Pid)
+%% Pid = pid()
+%%
+%% Print all messages buffered in the *first* transaction buffered for Pid.
+%% (If start_transaction/0 and end_transaction/0 has been called N times,
+%% print_buffered/1 must be called N times to print all transactions.)
+
+print_buffered(Pid) ->
+ req({print_buffered,Pid}).
+
+%% set_footer(IoData)
+%%
+%% Set a footer for the file associated with the 'html' tag.
+%% It will be used by print/3 to print a footer for the HTML file.
+
+set_footer(Footer) ->
+ req({set_footer,Footer}).
+
+%% set_job_name(Name)
+%% Set a name for the currently running job. The name will be used
+%% when printing to 'stdout'.
+%%
+set_job_name(Name) ->
+ req({set_job_name,Name}).
+
+%% set_gl_props(PropList)
+%% Set properties for group leader processes. When a group_leader process
+%% is created, test_server_gl:set_props(PropList) will be called.
+
+set_gl_props(PropList) ->
+ req({set_gl_props,PropList}).
+
+
+%%% Internal functions.
+
+init([]) ->
+ process_flag(trap_exit, true),
+ Empty = gb_trees:empty(),
+ {ok,Shared} = test_server_gl:start_link(),
+ {ok,#st{fds=Empty,shared_gl=Shared,gls=gb_sets:empty(),
+ io_buffering=gb_sets:empty(),
+ buffered=Empty,
+ html_footer="</body>\n</html>\n",
+ job_name="<name not set>",
+ gl_props=[]}}.
+
+req(Req) ->
+ gen_server:call(?MODULE, Req, infinity).
+
+handle_call({get_gl,false}, _From, #st{gls=Gls,gl_props=Props}=St) ->
+ {ok,Pid} = test_server_gl:start_link(),
+ test_server_gl:set_props(Pid, Props),
+ {reply,Pid,St#st{gls=gb_sets:insert(Pid, Gls)}};
+handle_call({get_gl,true}, _From, #st{shared_gl=Shared}=St) ->
+ {reply,Shared,St};
+handle_call({set_fd,Tag,Fd}, _From, #st{fds=Fds0}=St) ->
+ Fds = gb_trees:enter(Tag, Fd, Fds0),
+ {reply,ok,St#st{fds=Fds}};
+handle_call({start_transaction,Pid}, _From, #st{io_buffering=Buffer0,
+ buffered=Buf0}=St) ->
+ Buf = case gb_trees:is_defined(Pid, Buf0) of
+ false -> gb_trees:insert(Pid, queue:new(), Buf0);
+ true -> Buf0
+ end,
+ Buffer = gb_sets:add(Pid, Buffer0),
+ {reply,ok,St#st{io_buffering=Buffer,buffered=Buf}};
+handle_call({print,From,Tag,Str}, _From, St0) ->
+ St = output(From, Tag, Str, St0),
+ {reply,ok,St};
+handle_call({end_transaction,Pid}, _From, #st{io_buffering=Buffer0,
+ buffered=Buffered0}=St0) ->
+ Q0 = gb_trees:get(Pid, Buffered0),
+ Q = queue:in(eot, Q0),
+ Buffered = gb_trees:update(Pid, Q, Buffered0),
+ Buffer = gb_sets:delete_any(Pid, Buffer0),
+ St = St0#st{io_buffering=Buffer,buffered=Buffered},
+ {reply,ok,St};
+handle_call({print_buffered,Pid}, _From, #st{buffered=Buffered0}=St0) ->
+ Q0 = gb_trees:get(Pid, Buffered0),
+ Q = do_print_buffered(Q0, St0),
+ Buffered = gb_trees:update(Pid, Q, Buffered0),
+ St = St0#st{buffered=Buffered},
+ {reply,ok,St};
+handle_call({set_footer,Footer}, _From, St) ->
+ {reply,ok,St#st{html_footer=Footer}};
+handle_call({set_job_name,Name}, _From, St) ->
+ {reply,ok,St#st{job_name=Name}};
+handle_call({set_gl_props,Props}, _From, #st{shared_gl=Shared}=St) ->
+ test_server_gl:set_props(Shared, Props),
+ {reply,ok,St#st{gl_props=Props}};
+handle_call(stop, From, #st{shared_gl=SGL,gls=Gls0}=St0) ->
+ St = St0#st{gls=gb_sets:insert(SGL, Gls0),stopping=From},
+ gc(St),
+ %% Give the users of the surviving group leaders some
+ %% time to finish.
+ erlang:send_after(2000, self(), stop_group_leaders),
+ {noreply,St}.
+
+handle_info({'EXIT',Pid,normal}, #st{gls=Gls0,stopping=From}=St) ->
+ Gls = gb_sets:delete_any(Pid, Gls0),
+ case gb_sets:is_empty(Gls) andalso stopping =/= undefined of
+ true ->
+ %% No more group leaders left.
+ gen_server:reply(From, ok),
+ {stop,normal,St#st{gls=Gls,stopping=undefined}};
+ false ->
+ %% Wait for more group leaders to finish.
+ {noreply,St#st{gls=Gls}}
+ end;
+handle_info({'EXIT',_Pid,Reason}, _St) ->
+ exit(Reason);
+handle_info(stop_group_leaders, #st{gls=Gls}=St) ->
+ %% Stop the remaining group leaders.
+ [test_server_gl:stop(GL) || GL <- gb_sets:to_list(Gls)],
+ erlang:send_after(2000, self(), kill_group_leaders),
+ {noreply,St};
+handle_info(kill_group_leaders, #st{gls=Gls,stopping=From}=St) ->
+ [exit(GL, kill) || GL <- gb_sets:to_list(Gls)],
+ gen_server:reply(From, ok),
+ {stop,normal,St};
+handle_info(Other, St) ->
+ io:format("Ignoring: ~p\n", [Other]),
+ {noreply,St}.
+
+terminate(_, _) ->
+ ok.
+
+output(From, Tag, Str, #st{io_buffering=Buffered,buffered=Buf0}=St) ->
+ case gb_sets:is_member(From, Buffered) of
+ false ->
+ do_output(Tag, Str, St),
+ St;
+ true ->
+ Q0 = gb_trees:get(From, Buf0),
+ Q = queue:in({Tag,Str}, Q0),
+ Buf = gb_trees:update(From, Q, Buf0),
+ St#st{buffered=Buf}
+ end.
+
+do_output(stdout, Str, #st{job_name=undefined}) ->
+ io:put_chars(Str);
+do_output(stdout, Str0, #st{job_name=Name}) ->
+ Str = io_lib:format("Testing ~s: ~s\n", [Name,Str0]),
+ io:put_chars(Str);
+do_output(Tag, Str, #st{fds=Fds}=St) ->
+ case gb_trees:lookup(Tag, Fds) of
+ none ->
+ S = io_lib:format("\n*** ERROR: ~p, line ~p: No known '~p' log file\n",
+ [?MODULE,?LINE,Tag]),
+ do_output(stdout, [S,Str], St);
+ {value,Fd} ->
+ try
+ io:put_chars(Fd, Str),
+ case Tag of
+ html -> finalise_table(Fd, St);
+ _ -> ok
+ end
+ catch _:Error ->
+ S = io_lib:format("\n*** ERROR: ~p, line ~p: Error writing to "
+ "log file '~p': ~p\n",
+ [?MODULE,?LINE,Tag,Error]),
+ do_output(stdout, [S,Str], St)
+ end
+ end.
+
+finalise_table(Fd, #st{html_footer=Footer}) ->
+ case file:position(Fd, {cur,0}) of
+ {ok,Pos} ->
+ %% We are writing to a seekable file. Finalise so
+ %% we get complete valid (and viewable) HTML code.
+ %% Then rewind to overwrite the finalising code.
+ io:put_chars(Fd, ["\n</table>\n",Footer]),
+ file:position(Fd, Pos);
+ {error,epipe} ->
+ %% The file is not seekable. We cannot erase what
+ %% we've already written --- so the reader will
+ %% have to wait until we're done.
+ ok
+ end.
+
+do_print_buffered(Q0, St) ->
+ Item = queue:get(Q0),
+ Q = queue:drop(Q0),
+ case Item of
+ eot ->
+ Q;
+ {Tag,Str} ->
+ do_output(Tag, Str, St),
+ do_print_buffered(Q, St)
+ end.
+
+gc(#st{gls=Gls0}) ->
+ InUse0 = [begin
+ {group_leader,GL} = process_info(P, group_leader),
+ GL
+ end || P <- processes()],
+ InUse = ordsets:from_list(InUse0),
+ Gls = gb_sets:to_list(Gls0),
+ NotUsed = ordsets:subtract(Gls, InUse),
+ [test_server_gl:stop(Pid) || Pid <- NotUsed],
+ ok.
diff --git a/lib/test_server/src/test_server_node.erl b/lib/test_server/src/test_server_node.erl
index 872f15f2be..b307d93c7d 100644
--- a/lib/test_server/src/test_server_node.erl
+++ b/lib/test_server/src/test_server_node.erl
@@ -26,7 +26,7 @@
%% Test Controller interface
-export([is_release_available/1]).
--export([start_remote_main_target/1,stop/1]).
+-export([stop/1]).
-export([start_tracer_node/2,trace_nodes/2,stop_tracer_node/1]).
-export([start_node/5, stop_node/2]).
-export([kill_nodes/1, nodedown/2]).
@@ -57,79 +57,8 @@ is_release_available(Rel) ->
false
end.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%% Start main target node on remote host
-%%% The target node must not know the controller node via erlang distribution.
-start_remote_main_target(Parameters) ->
- #par{type=TargetType,
- target=TargetHost,
- naming=Naming,
- master=MasterNode,
- cookie=MasterCookie,
- slave_targets=SlaveTargets} = Parameters,
-
- lists:foreach(fun(T) -> maybe_reboot_target({TargetType,T}) end,
- [list_to_atom(TargetHost)|SlaveTargets]),
-
- Cmd0 = get_main_target_start_command(TargetType,TargetHost,Naming,
- MasterNode,MasterCookie),
- Cmd =
- case os:getenv("TEST_SERVER_FRAMEWORK") of
- FW when FW =:= false; FW =:= "undefined" -> Cmd0;
- FW -> Cmd0 ++ " -env TEST_SERVER_FRAMEWORK " ++ FW
- end,
-
- {ok,LSock} = gen_tcp:listen(?MAIN_PORT,[binary,{reuseaddr,true},{packet,2}]),
- case start_target(TargetType,TargetHost,Cmd) of
- {ok,TargetClient,AcceptTimeout} ->
- case gen_tcp:accept(LSock,AcceptTimeout) of
- {ok,Sock} ->
- gen_tcp:close(LSock),
- receive
- {tcp,Sock,Bin} when is_binary(Bin) ->
- case unpack(Bin) of
- error ->
- gen_tcp:close(Sock),
- close_target_client(TargetClient),
- {error,bad_message};
- {ok,{target_info,TI}} ->
- put(test_server_free_targets,SlaveTargets),
- {ok, TI#target_info{where=Sock,
- host=TargetHost,
- naming=Naming,
- master=MasterNode,
- target_client=TargetClient,
- slave_targets=SlaveTargets}}
- end;
- {tcp_closed,Sock} ->
- gen_tcp:close(Sock),
- close_target_client(TargetClient),
- {error,could_not_contact_target}
- after AcceptTimeout ->
- gen_tcp:close(Sock),
- close_target_client(TargetClient),
- {error,timeout}
- end;
- Error ->
- %%! maybe something like kill_target(...)???
- gen_tcp:close(LSock),
- close_target_client(TargetClient),
- {error,{could_not_contact_target,Error}}
- end;
- Error ->
- gen_tcp:close(LSock),
- {error,{could_not_start_target,Error}}
- end.
-
stop(TI) ->
- kill_nodes(TI),
- case TI#target_info.where of
- local -> % there is no remote target to stop
- ok;
- Sock -> % stop remote target
- gen_tcp:close(Sock),
- close_target_client(TI#target_info.target_client)
- end.
+ kill_nodes(TI).
nodedown(Sock, TI) ->
Match = #slave_info{name='$1',socket=Sock,client='$2',_='_'},
@@ -146,14 +75,8 @@ nodedown(Sock, TI) ->
false -> ok
end,
slave_died;
- [] ->
- case TI#target_info.where of
- Sock ->
- %% test_server_ctrl will do the cleanup
- target_died;
- _ ->
- ignore
- end
+ [] ->
+ ok
end.
@@ -167,10 +90,7 @@ start_tracer_node(TraceFile,TI) ->
Match = #slave_info{name='$1',_='_'},
SlaveNodes = lists:map(fun([N]) -> [" ",N] end,
ets:match(slave_tab,Match)),
- TargetNode = case TI#target_info.where of
- local -> node();
- _ -> "test_server@" ++ TI#target_info.host
- end,
+ TargetNode = node(),
Cookie = TI#target_info.cookie,
{ok,LSock} = gen_tcp:listen(0,[binary,{reuseaddr,true},{packet,2}]),
{ok,TracePort} = inet:port(LSock),
@@ -471,129 +391,29 @@ start_node_slave(SlaveName, OptList, From, TI) ->
Ret =
case start_which_node(OptList) of
{error,Reason} -> {{error,Reason},undefined,undefined};
- Host0 -> do_start_node_slave(Host0,SlaveName,Args,Prog,Cleanup,TI)
+ Host0 -> do_start_node_slave(Host0,SlaveName,Args,Prog,Cleanup)
end,
gen_server:reply(From,Ret).
-do_start_node_slave(Host0, SlaveName, Args, Prog, Cleanup, TI) ->
- case TI#target_info.where of
- local ->
- Host =
- case Host0 of
- local -> test_server_sup:hoststr();
- _ -> cast_to_list(Host0)
- end,
- Cmd = Prog ++ " " ++ Args,
- %% Can use slave.erl here because I'm both controller and target
- %% so I will ping the new node anyway
- case slave:start(Host, SlaveName, Args, no_link, Prog) of
- {ok,Nodename} ->
- case Cleanup of
- true -> ets:insert(slave_tab,#slave_info{name=Nodename});
- false -> ok
- end,
- {{ok,Nodename}, Host, Cmd, [], []};
- Ret ->
- {Ret, Host, Cmd}
- end;
-
- _Sock ->
- %% Cannot use slave.erl here because I'm only controller, and will
- %% not ping the new node. Only target shall contact the new node!!
- no_contact_start_slave(Host0,SlaveName,Args,Prog,Cleanup,TI)
- end.
-
-
-
-no_contact_start_slave(Host, Name, Args0, Prog, Cleanup,TI) ->
- Args1 = case string:str(Args0,"-setcookie") of
- 0 -> "-setcookie " ++ TI#target_info.cookie ++ " " ++ Args0;
- _ -> Args0
+do_start_node_slave(Host0, SlaveName, Args, Prog, Cleanup) ->
+ Host =
+ case Host0 of
+ local -> test_server_sup:hoststr();
+ _ -> cast_to_list(Host0)
+ end,
+ Cmd = Prog ++ " " ++ Args,
+ %% Can use slave.erl here because I'm both controller and target
+ %% so I will ping the new node anyway
+ case slave:start(Host, SlaveName, Args, no_link, Prog) of
+ {ok,Nodename} ->
+ case Cleanup of
+ true -> ets:insert(slave_tab,#slave_info{name=Nodename});
+ false -> ok
end,
- Args = TI#target_info.naming ++ " " ++ cast_to_list(Name) ++ " " ++ Args1,
- case Host of
- local ->
- case get(test_server_free_targets) of
- [] ->
- io:format("Starting slave ~p on HOST~n", [Name]),
- TargetType = test_server_sup:get_os_family(),
- Cmd0 = get_slave_node_start_command(TargetType,
- Prog,
- TI#target_info.master),
- Cmd = Cmd0 ++ " " ++ Args,
- do_no_contact_start_slave(TargetType,
- test_server_sup:hoststr(),
- Cmd, Cleanup,TI, false);
- [H|T] ->
- TargetType = TI#target_info.os_family,
- Cmd0 = get_slave_node_start_command(TargetType,
- Prog,
- TI#target_info.master),
- Cmd = Cmd0 ++ " " ++ Args,
- case do_no_contact_start_slave(TargetType,H,Cmd,Cleanup,
- TI,true) of
- {error,remove} ->
- io:format("Cannot start node on ~p, "
- "removing from slave "
- "target list.", [H]),
- put(test_server_free_targets,T),
- no_contact_start_slave(Host,Name,Args,Prog,
- Cleanup,TI);
- {error,keep} ->
- %% H is added to the END OF THE LIST
- %% in order to avoid the same target to
- %% be selected each time
- put(test_server_free_targets,T++[H]),
- no_contact_start_slave(Host,Name,Args,Prog,
- Cleanup,TI);
- R ->
- put(test_server_free_targets,T),
- R
- end
- end;
- _ ->
- TargetType = TI#target_info.os_family,
- Cmd0 = get_slave_node_start_command(TargetType,
- Prog,
- TI#target_info.master),
- Cmd = Cmd0 ++ " " ++ Args,
- do_no_contact_start_slave(TargetType, Host, Cmd, Cleanup, TI, false)
- end.
-
-do_no_contact_start_slave(TargetType,Host0,Cmd0,Cleanup,TI,Retry) ->
- %% Must use TargetType instead of TI#target_info.os_familiy here
- %% because if there were no free_targets we will be starting the
- %% slave node on host which might have a different os_familiy
- Host = cast_to_list(Host0),
- {ok,LSock} = gen_tcp:listen(0,[binary,
- {reuseaddr,true},
- {packet,2}]),
- {ok,WaitPort} = inet:port(LSock),
- Cmd = lists:concat([Cmd0, " -s ", ?MODULE, " node_started ",
- test_server_sup:hoststr(), " ", WaitPort]),
-
- case start_target(TargetType,Host,Cmd) of
- {ok,Client,AcceptTimeout} ->
- case wait_for_node_started(LSock,AcceptTimeout,
- Client,Cleanup,TI,self()) of
- {error,_}=WaitError ->
- if Retry ->
- case maybe_reboot_target(Client) of
- {error,_} -> {error,remove};
- ok -> {error,keep}
- end;
- true ->
- {WaitError,Host,Cmd}
- end;
- {Ok,Warning} ->
- {Ok,Host,Cmd,[],Warning}
- end;
- StartError ->
- gen_tcp:close(LSock),
- if Retry -> {error,remove};
- true -> {{error,{could_not_start_target,StartError}},Host,Cmd}
- end
+ {{ok,Nodename}, Host, Cmd, [], []};
+ Ret ->
+ {Ret, Host, Cmd}
end.
@@ -777,40 +597,10 @@ kill_node(SI,TI) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Platform specific code
-start_target(unix,TargetHost,Cmd0) ->
- Cmd =
- case test_server_sup:hoststr() of
- TargetHost -> Cmd0;
- _ -> lists:concat(["rsh ",TargetHost, " ", Cmd0])
- end,
- open_port({spawn, Cmd}, [stream]),
- {ok,undefined,?ACCEPT_TIMEOUT}.
-
-maybe_reboot_target(_) ->
- {error, cannot_reboot_target}.
-
close_target_client(undefined) ->
ok.
-
-%%
-%% Command for starting main target
-%%
-get_main_target_start_command(unix,_TargetHost,Naming,
- _MasterNode,_MasterCookie) ->
- Prog = pick_erl_program(default),
- Prog ++ " " ++ Naming ++ " test_server" ++
- " -boot start_sasl -sasl errlog_type error"
- " -s test_server start " ++ test_server_sup:hoststr().
-
-%%
-%% Command for starting slave nodes
-%%
-get_slave_node_start_command(unix, Prog, MasterNode) ->
- cast_to_list(Prog) ++ " -detached -master " ++ MasterNode.
-
-
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% cast_to_list(X) -> string()
%%% X = list() | atom() | void()
diff --git a/lib/test_server/src/test_server_sup.erl b/lib/test_server/src/test_server_sup.erl
index 4a27c1ebae..c7553cccb5 100644
--- a/lib/test_server/src/test_server_sup.erl
+++ b/lib/test_server/src/test_server_sup.erl
@@ -64,13 +64,7 @@ timetrap(Timeout0, ReportTVal, Scale, Pid) ->
true -> ReportTVal end,
MFLs = test_server:get_loc(Pid),
Mon = erlang:monitor(process, Pid),
- Trap =
- case get(test_server_init_or_end_conf) of
- undefined ->
- {timetrap_timeout,TimeToReport,MFLs};
- InitOrEnd ->
- {timetrap_timeout,TimeToReport,MFLs,InitOrEnd}
- end,
+ Trap = {timetrap_timeout,TimeToReport,MFLs},
exit(Pid, Trap),
receive
{'DOWN', Mon, process, Pid, _} ->
@@ -518,8 +512,18 @@ framework_call(Callback,Func,Args,DefaultReturn) ->
end,
case erlang:function_exported(Mod,Func,length(Args)) of
true ->
- put(test_server_loc, {Mod,Func,framework}),
EH = fun(Reason) -> exit({fw_error,{Mod,Func,Reason}}) end,
+ SetTcState = case Func of
+ end_tc -> true;
+ init_tc -> true;
+ _ -> false
+ end,
+ case SetTcState of
+ true ->
+ test_server:set_tc_state({framework,Mod,Func});
+ false ->
+ ok
+ end,
try apply(Mod,Func,Args) of
Result ->
Result
@@ -550,18 +554,6 @@ format_loc([{Mod,LineOrFunc}]) ->
format_loc({Mod,LineOrFunc});
format_loc({Mod,Func}) when is_atom(Func) ->
io_lib:format("{~s,~w}",[package_str(Mod),Func]);
-format_loc({Mod,Line}) when is_integer(Line) ->
- %% ?line macro is used
- ModStr = package_str(Mod),
- case {lists:member(no_src, get(test_server_logopts)),
- lists:reverse(ModStr)} of
- {false,[$E,$T,$I,$U,$S,$_|_]} ->
- io_lib:format("{~s,<a href=\"~s~s#~w\">~w</a>}",
- [ModStr,downcase(ModStr),?src_listing_ext,
- round_to_10(Line),Line]);
- _ ->
- io_lib:format("{~s,~w}",[ModStr,Line])
- end;
format_loc(Loc) ->
io_lib:format("~p",[Loc]).
@@ -576,16 +568,11 @@ format_loc1({Mod,Func,Line}) ->
{false,[$E,$T,$I,$U,$S,$_|_]} ->
io_lib:format("{~s,~w,<a href=\"~s~s#~w\">~w</a>}",
[ModStr,Func,downcase(ModStr),?src_listing_ext,
- round_to_10(Line),Line]);
+ Line,Line]);
_ ->
io_lib:format("{~s,~w,~w}",[ModStr,Func,Line])
end.
-round_to_10(N) when (N rem 10) == 0 ->
- N;
-round_to_10(N) ->
- trunc(N/10)*10.
-
downcase(S) -> downcase(S, []).
downcase([Uc|Rest], Result) when $A =< Uc, Uc =< $Z ->
downcase(Rest, [Uc-$A+$a|Result]);
diff --git a/lib/test_server/src/ts.erl b/lib/test_server/src/ts.erl
index db16b6ecd2..3ddc58fdbc 100644
--- a/lib/test_server/src/ts.erl
+++ b/lib/test_server/src/ts.erl
@@ -260,12 +260,19 @@ run(Testspec, Config) when is_atom(Testspec), is_list(Config) ->
Options=check_test_get_opts(Testspec, Config),
File=atom_to_list(Testspec),
Spec = case code:lib_dir(Testspec) of
- {error, bad_name} when Testspec /= emulator,
- Testspec /= system,
- Testspec /= epmd ->
+ _ when Testspec == emulator;
+ Testspec == system;
+ Testspec == epmd ->
+ File++".spec";
+ {error, bad_name} ->
create_skip_spec(Testspec, tests(Testspec));
- _ ->
- File++".spec"
+ Path ->
+ case file:read_file_info(filename:join(Path,"ebin")) of
+ {ok,_} ->
+ File++".spec";
+ _ ->
+ create_skip_spec(Testspec, tests(Testspec))
+ end
end,
run_test(File, [{spec,[Spec]}], Options);
%% Runs one module in a spec (interactive)
@@ -498,8 +505,60 @@ estone(Opts) when is_list(Opts) -> run(emulator,estone_SUITE,Opts).
cross_cover_analyse([Level]) ->
cross_cover_analyse(Level);
cross_cover_analyse(Level) ->
- test_server_ctrl:cross_cover_analyse(Level).
-
+ Apps = get_last_app_tests(),
+ Modules = get_cross_modules(Apps,[]),
+ test_server_ctrl:cross_cover_analyse(Level,Apps,Modules).
+
+get_last_app_tests() ->
+ AllTests = filelib:wildcard(filename:join(["*","*_test.logs"])),
+ {ok,RE} = re:compile("^[^/]*/[^\.]*\.(.*)_test\.logs$"),
+ get_last_app_tests(AllTests,RE,[]).
+
+get_last_app_tests([Dir|Dirs],RE,Acc) ->
+ NewAcc =
+ case re:run(Dir,RE,[{capture,all,list}]) of
+ {match,[Dir,AppStr]} ->
+ App = list_to_atom(AppStr),
+ case lists:keytake(App,1,Acc) of
+ {value,{App,LastDir},Rest} ->
+ if Dir > LastDir ->
+ [{App,Dir}|Rest];
+ true ->
+ Acc
+ end;
+ false ->
+ [{App,Dir} | Acc]
+ end;
+ _ ->
+ Acc
+ end,
+ get_last_app_tests(Dirs,RE,NewAcc);
+get_last_app_tests([],_,Acc) ->
+ Acc.
+
+get_cross_modules([{App,_}|Apps],Acc) ->
+ Mods = cross_modules(App),
+ get_cross_modules(Apps,lists:umerge(Mods,Acc));
+get_cross_modules([],Acc) ->
+ Acc.
+
+cross_modules(App) ->
+ case default_coverfile(App) of
+ none ->
+ [];
+ File ->
+ case catch file:consult(File) of
+ {ok,CoverSpec} ->
+ case lists:keyfind(cross_apps,1,CoverSpec) of
+ false ->
+ [];
+ {cross_apps,App,Modules} ->
+ lists:usort(Modules)
+ end;
+ _ ->
+ []
+ end
+ end.
%%% Implementation.
diff --git a/lib/test_server/src/ts_run.erl b/lib/test_server/src/ts_run.erl
index 57d1b8806e..741dd483f5 100644
--- a/lib/test_server/src/ts_run.erl
+++ b/lib/test_server/src/ts_run.erl
@@ -368,7 +368,7 @@ make_common_test_args(Args0, Options0, _Vars) ->
io:format("No cover file found for ~p~n",[App]),
[];
{value,{cover,_App,File,_Analyse}} ->
- [{cover,to_list(File)}];
+ [{cover,to_list(File)},{cover_stop,false}];
false ->
[]
end,
diff --git a/lib/test_server/test/Makefile b/lib/test_server/test/Makefile
index a3f9820d7f..afccc28662 100644
--- a/lib/test_server/test/Makefile
+++ b/lib/test_server/test/Makefile
@@ -26,7 +26,8 @@ include $(ERL_TOP)/make/$(TARGET)/otp.mk
MODULES= \
test_server_SUITE \
- test_server_test_lib
+ test_server_test_lib \
+ erl2html2_SUITE
ERL_FILES= $(MODULES:%=%.erl)
diff --git a/lib/test_server/test/erl2html2_SUITE.erl b/lib/test_server/test/erl2html2_SUITE.erl
new file mode 100644
index 0000000000..96175413a1
--- /dev/null
+++ b/lib/test_server/test/erl2html2_SUITE.erl
@@ -0,0 +1,254 @@
+%%%-------------------------------------------------------------------
+%%% @author Siri Hansen <[email protected]>
+%%% @copyright (C) 2012, Siri Hansen
+%%% @doc
+%%%
+%%% @end
+%%% Created : 15 Nov 2012 by Siri Hansen <[email protected]>
+%%%-------------------------------------------------------------------
+-module(erl2html2_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+
+
+-define(HEADER,
+ ["<!DOCTYPE HTML PUBLIC",
+ "\"-//W3C//DTD HTML 3.2 Final//EN\">\n",
+ "<!-- autogenerated by 'erl2html2' -->\n",
+ "<html>\n",
+ "<head><title>Module ", Src, "</title>\n",
+ "<meta http-equiv=\"cache-control\" ",
+ "content=\"no-cache\">\n",
+ "</head>\n",
+ "<body bgcolor=\"white\" text=\"black\" ",
+ "link=\"blue\" vlink=\"purple\" alink=\"red\">\n"]).
+
+%%--------------------------------------------------------------------
+%% @spec suite() -> Info
+%% Info = [tuple()]
+%% @end
+%%--------------------------------------------------------------------
+suite() ->
+ [{timetrap,{seconds,30}},
+ {ct_hooks,[ts_install_cth,test_server_test_lib]}].
+
+%%--------------------------------------------------------------------
+%% @spec init_per_suite(Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%% @end
+%%--------------------------------------------------------------------
+init_per_suite(Config) ->
+ Config.
+
+%%--------------------------------------------------------------------
+%% @spec end_per_suite(Config0) -> void() | {save_config,Config1}
+%% Config0 = Config1 = [tuple()]
+%% @end
+%%--------------------------------------------------------------------
+end_per_suite(_Config) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% @spec init_per_group(GroupName, Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+%% GroupName = atom()
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%% @end
+%%--------------------------------------------------------------------
+init_per_group(_GroupName, Config) ->
+ Config.
+
+%%--------------------------------------------------------------------
+%% @spec end_per_group(GroupName, Config0) ->
+%% void() | {save_config,Config1}
+%% GroupName = atom()
+%% Config0 = Config1 = [tuple()]
+%% @end
+%%--------------------------------------------------------------------
+end_per_group(_GroupName, _Config) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% @spec init_per_testcase(TestCase, Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+%% TestCase = atom()
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%% @end
+%%--------------------------------------------------------------------
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+%%--------------------------------------------------------------------
+%% @spec end_per_testcase(TestCase, Config0) ->
+%% void() | {save_config,Config1} | {fail,Reason}
+%% TestCase = atom()
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%% @end
+%%--------------------------------------------------------------------
+end_per_testcase(_TestCase, _Config) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% @spec groups() -> [Group]
+%% Group = {GroupName,Properties,GroupsAndTestCases}
+%% GroupName = atom()
+%% Properties = [parallel | sequence | Shuffle | {RepeatType,N}]
+%% GroupsAndTestCases = [Group | {group,GroupName} | TestCase]
+%% TestCase = atom()
+%% Shuffle = shuffle | {shuffle,{integer(),integer(),integer()}}
+%% RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail |
+%% repeat_until_any_ok | repeat_until_any_fail
+%% N = integer() | forever
+%% @end
+%%--------------------------------------------------------------------
+groups() ->
+ [].
+
+%%--------------------------------------------------------------------
+%% @spec all() -> GroupsAndTestCases | {skip,Reason}
+%% GroupsAndTestCases = [{group,GroupName} | TestCase]
+%% GroupName = atom()
+%% TestCase = atom()
+%% Reason = term()
+%% @end
+%%--------------------------------------------------------------------
+all() ->
+ [m1].
+
+%%--------------------------------------------------------------------
+%% @spec TestCase() -> Info
+%% Info = [tuple()]
+%% @end
+%%--------------------------------------------------------------------
+m1() ->
+ [].
+
+%%--------------------------------------------------------------------
+%% @spec TestCase(Config0) ->
+%% ok | exit() | {skip,Reason} | {comment,Comment} |
+%% {save_config,Config1} | {skip_and_save,Reason,Config1}
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%% Comment = term()
+%% @end
+%%--------------------------------------------------------------------
+m1(Config) ->
+ {Src,Dst} = convert_module("m1",Config),
+ {true,L} = check_line_numbers(Src,Dst),
+ ok = check_link_targets(Src,Dst,L,[{baz,0}]),
+ ok.
+
+convert_module(Mod,Config) ->
+ DataDir = ?config(data_dir,Config),
+ PrivDir = ?config(priv_dir,Config),
+ Src = filename:join(DataDir,Mod++".erl"),
+ Dst = filename:join(PrivDir,Mod++".erl.html"),
+ io:format("<a href=\"~s\">~s</a>\n",[Src,filename:basename(Src)]),
+ ok = erl2html2:convert(Src, Dst, "<html><body>"),
+ io:format("<a href=\"~s\">~s</a>\n",[Dst,filename:basename(Dst)]),
+ {Src,Dst}.
+
+%% Check that there are the same number of lines in each file, and
+%% that all line numbers are displayed in the dst file.
+check_line_numbers(Src,Dst) ->
+ {ok,SFd} = file:open(Src,[read]),
+ {ok,DFd} = file:open(Dst,[read]),
+ {ok,SN} = count_src_lines(SFd,0),
+ ok = file:close(SFd),
+ {ok,DN} = read_dst_line_numbers(DFd),
+ ok = file:close(DFd),
+ {SN == DN,SN}.
+
+count_src_lines(Fd,N) ->
+ case io:get_line(Fd,"") of
+ eof ->
+ {ok,N};
+ {error,Reason} ->
+ {error,Reason,N};
+ _Line ->
+ count_src_lines(Fd,N+1)
+ end.
+
+read_dst_line_numbers(Fd) ->
+ "<html><body><pre>\n" = io:get_line(Fd,""),
+ read_dst_line_numbers(Fd,0).
+read_dst_line_numbers(Fd,Last) when is_integer(Last) ->
+ case io:get_line(Fd,"") of
+ eof ->
+ {ok,Last};
+ {error,Reason} ->
+ {error,Reason,Last};
+ "</pre>"++_ ->
+ {ok,Last};
+ "</body>"++_ ->
+ {ok,Last};
+ Line ->
+ %% erlang:display(Line),
+ Num = check_line_number(Last,Line,Line),
+ read_dst_line_numbers(Fd,Num)
+ end.
+
+check_line_number(Last,Line,OrigLine) ->
+ case Line of
+ "<a name="++_ ->
+ [$>|Rest] = lists:dropwhile(fun($>) -> false; (_) -> true end,Line),
+ check_line_number(Last,Rest,OrigLine);
+ _ ->
+ [N |_] = string:tokens(Line,":"),
+% erlang:display(N),
+ Num =
+ try list_to_integer(string:strip(N))
+ catch _:_ -> ct:fail({no_line_number_after,Last,OrigLine})
+ end,
+ if Num == Last+1 ->
+ Num;
+ true ->
+ ct:fail({unexpected_integer,Num,Last})
+ end
+ end.
+
+
+%% Check that there is one link target for each line and one for each
+%% function.
+%% The test module has -compile(export_all), so all functions are
+%% found by listing the exported ones.
+check_link_targets(Src,Dst,L,RmFncs) ->
+ Mod = list_to_atom(filename:basename(filename:rootname(Src))),
+ Exports = Mod:module_info(exports)--[{module_info,0},{module_info,1}|RmFncs],
+ {ok,{[],L},_} = xmerl_sax_parser:file(Dst,
+ [{event_fun,fun sax_event/3},
+ {event_state,{Exports,0}}]),
+ ok.
+
+sax_event(Event,_Loc,State) ->
+ sax_event(Event,State).
+
+sax_event({startElement,_Uri,"a",_QN,Attrs},{Exports,PrevLine}) ->
+ {_,_,"name",Name} = lists:keyfind("name",3,Attrs),
+ case catch list_to_integer(Name) of
+ Line when is_integer(Line) ->
+ case PrevLine + 1 of
+ Line ->
+% erlang:display({found_line,Line}),
+ {Exports,Line};
+ Other ->
+ ct:fail({unexpected_line_number_target,Other})
+ end;
+ {'EXIT',_} ->
+ {match,[FStr,AStr]} =
+ re:run(Name,"^(.*)-([0-9]+)$",[{capture,all_but_first,list}]),
+ F = list_to_atom(http_uri:decode(FStr)),
+ A = list_to_integer(AStr),
+% erlang:display({found_fnc,F,A}),
+ A = proplists:get_value(F,Exports),
+ {lists:delete({F,A},Exports),PrevLine}
+ end;
+sax_event(_,State) ->
+ State.
diff --git a/lib/test_server/test/erl2html2_SUITE_data/Makefile.src b/lib/test_server/test/erl2html2_SUITE_data/Makefile.src
new file mode 100644
index 0000000000..942ac0584b
--- /dev/null
+++ b/lib/test_server/test/erl2html2_SUITE_data/Makefile.src
@@ -0,0 +1,2 @@
+all:
+ erlc -Iinclude m1.erl \ No newline at end of file
diff --git a/lib/test_server/test/erl2html2_SUITE_data/header1.hrl b/lib/test_server/test/erl2html2_SUITE_data/header1.hrl
new file mode 100644
index 0000000000..53d1b79ac5
--- /dev/null
+++ b/lib/test_server/test/erl2html2_SUITE_data/header1.hrl
@@ -0,0 +1,4 @@
+baz() ->
+ ok.
+
+-define(MACRO_DEFINING_A_FUNCTION,quux() -> ok).
diff --git a/lib/test_server/test/erl2html2_SUITE_data/include/header2.hrl b/lib/test_server/test/erl2html2_SUITE_data/include/header2.hrl
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/test_server/test/erl2html2_SUITE_data/include/header2.hrl
diff --git a/lib/test_server/test/erl2html2_SUITE_data/m1.erl b/lib/test_server/test/erl2html2_SUITE_data/m1.erl
new file mode 100644
index 0000000000..156f1d0a51
--- /dev/null
+++ b/lib/test_server/test/erl2html2_SUITE_data/m1.erl
@@ -0,0 +1,46 @@
+%% Comment with <html> code &amp; </html>
+%% and also some "quotes" and 'single quotes'
+
+-module(m1).
+
+-compile(export_all).
+
+-include("header1.hrl").
+-include("header2.hrl").
+
+-define(MACRO1,value).
+
+%%% Comment
+foo(x) ->
+ %% Comment
+ ok_x;
+foo(y) ->
+ %% Second clause
+ ok_y.
+
+'quoted_foo'() ->
+ ok.
+
+'quoted_foo_with_"_and_/'() ->
+ ok.
+
+'quoted_foo_with_(_and_)'() ->
+ ok.
+
+'quoted_foo_with_<_and_>'() ->
+ ok.
+
+bar() ->
+ do_something(),
+ok. % indentation error, OTP-9710
+
+%% Function inside macro definition
+?MACRO_DEFINING_A_FUNCTION.
+
+%% Two function one one line
+quuux() -> ok. quuuux() -> ok.
+
+%% do_something/0 does something
+do_something() ->
+ ?MACRO1.
+%% comments after last line
diff --git a/lib/test_server/test/test_server.cover b/lib/test_server/test/test_server.cover
index c16212567e..052415377d 100644
--- a/lib/test_server/test/test_server.cover
+++ b/lib/test_server/test/test_server.cover
@@ -1,21 +1 @@
{incl_app,test_server,details}.
-
-{excl_mods, test_server, [test_server,
- test_server_ctrl,
- ts_selftest]}.
-
-%% Using incl_mods list here because the test_server might not find
-%% lib_dir for test_server - and so it will not find which modules to
-%% compile.
-{incl_mods, test_server, [erl2html2,
- test_server_node,
- test_server_sup,
- ts,
- ts_autoconf_win32,
- ts_erl_config,
- ts_install,
- ts_lib,
- ts_make,
- ts_run
- ]}.
-
diff --git a/lib/test_server/test/test_server_SUITE.erl b/lib/test_server/test/test_server_SUITE.erl
index cb8cb9da31..95a3423fef 100644
--- a/lib/test_server/test/test_server_SUITE.erl
+++ b/lib/test_server/test/test_server_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -79,7 +79,8 @@ groups() ->
all() ->
[test_server_SUITE, test_server_parallel01_SUITE,
test_server_conf02_SUITE, test_server_conf01_SUITE,
- test_server_skip_SUITE, test_server_shuffle01_SUITE].
+ test_server_skip_SUITE, test_server_shuffle01_SUITE,
+ test_server_break_SUITE].
%%--------------------------------------------------------------------
@@ -115,14 +116,18 @@ test_server_conf02_SUITE(Config) ->
run_test_server_tests("test_server_conf02_SUITE", 26, 0, 12,
12, 0, 0, 0, 0, 26, Config).
+test_server_break_SUITE(Config) ->
+ D = run_test_server_tests("test_server_break_SUITE", 8, 2, 6,
+ 4, 0, 0, 0, 2, 6, Config),
+ D.
run_test_server_tests(SuiteName, NCases, NFail, NExpected, NSucc,
NUsrSkip, NAutoSkip,
NActualSkip, NActualFail, NActualSucc, Config) ->
+ WorkDir = proplists:get_value(work_dir, Config),
ct:log("<a href=\"file://~s\">Test case log files</a>\n",
- [filename:join([proplists:get_value(priv_dir, Config),
- SuiteName++".logs"])]),
+ [filename:join(WorkDir, SuiteName++".logs")]),
Node = proplists:get_value(node, Config),
{ok,_Pid} = rpc:call(Node,test_server_ctrl, start, []),
@@ -139,9 +144,11 @@ run_test_server_tests(SuiteName, NCases, NFail, NExpected, NSucc,
rpc:call(Node,test_server_ctrl, stop, []),
{ok,Data} = test_server_test_lib:parse_suite(
- hd(filelib:wildcard(
- filename:join([proplists:get_value(priv_dir, Config),
- SuiteName++".logs","run*","suite.log"])))),
+ lists:last(
+ lists:sort(
+ filelib:wildcard(
+ filename:join([WorkDir,SuiteName++".logs",
+ "run*","suite.log"]))))),
check([{"Number of cases",NCases,Data#suite.n_cases},
{"Number failed",NFail,Data#suite.n_cases_failed},
{"Number expected",NExpected,Data#suite.n_cases_expected},
diff --git a/lib/test_server/test/test_server_SUITE_data/Makefile.src b/lib/test_server/test/test_server_SUITE_data/Makefile.src
index 332b855df6..ec8ddd78b0 100644
--- a/lib/test_server/test/test_server_SUITE_data/Makefile.src
+++ b/lib/test_server/test/test_server_SUITE_data/Makefile.src
@@ -4,4 +4,5 @@ all:
erlc test_server_conf01_SUITE.erl
erlc test_server_shuffle01_SUITE.erl
erlc test_server_conf02_SUITE.erl
- erlc test_server_skip_SUITE.erl \ No newline at end of file
+ erlc test_server_skip_SUITE.erl
+ erlc test_server_break_SUITE.erl \ No newline at end of file
diff --git a/lib/test_server/test/test_server_SUITE_data/test_server_SUITE.erl b/lib/test_server/test/test_server_SUITE_data/test_server_SUITE.erl
index ab25e4ad2f..fc2adcd651 100644
--- a/lib/test_server/test/test_server_SUITE_data/test_server_SUITE.erl
+++ b/lib/test_server/test/test_server_SUITE_data/test_server_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -433,7 +433,7 @@ start_stop_node(Config) when is_list(Config) ->
?t:comment("WARNING: Node started with {wait,false}"
" is up faster than expected...");
false ->
- wait_for_node(Node4,0),
+ test_server:wait_for_node(Node4),
true = lists:member(Node4,nodes())
end,
@@ -450,16 +450,6 @@ start_stop_node(Config) when is_list(Config) ->
ok.
-
-wait_for_node(Node,Acc) ->
- case net_adm:ping(Node) of
- pang ->
- timer:sleep(100),
- wait_for_node(Node,Acc+100);
- pong ->
- Acc
- end.
-
cleanup_nodes_init(doc) -> ["Test that nodes are terminated when test case"
" is finished unless {cleanup,false} is given."];
cleanup_nodes_init(Config) when is_list(Config) ->
diff --git a/lib/test_server/test/test_server_SUITE_data/test_server_break_SUITE.erl b/lib/test_server/test/test_server_SUITE_data/test_server_break_SUITE.erl
new file mode 100644
index 0000000000..70e30a3334
--- /dev/null
+++ b/lib/test_server/test/test_server_SUITE_data/test_server_break_SUITE.erl
@@ -0,0 +1,148 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2012. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(test_server_break_SUITE).
+
+-export([all/1, init_per_suite/1, end_per_suite/1]).
+-export([init_per_testcase/2, end_per_testcase/2]).
+-export([break_in_init_tc/1,
+ break_in_tc/1,
+ break_in_end_tc/1,
+ break_in_end_tc_after_fail/1,
+ break_in_end_tc_after_abort/1,
+ check_all_breaks/1]).
+
+-include_lib("test_server/include/test_server.hrl").
+
+all(suite) ->
+ [break_in_init_tc,
+ break_in_tc,
+ break_in_end_tc,
+ break_in_end_tc_after_fail,
+ break_in_end_tc_after_abort,
+ check_all_breaks]. %must be the last test - checks result of previous tests
+
+init_per_suite(Config) ->
+ spawn(fun break_and_continue_sup/0),
+ Config.
+
+end_per_suite(Config) ->
+ ok.
+
+init_per_testcase(Case,Config) when Case==break_in_init_tc ->
+ Config1 = init_timetrap(500,Config),
+ break_and_check(Case),
+ Config1;
+init_per_testcase(Case,Config) when Case==check_all_breaks ->
+ init_timetrap({seconds,20},Config);
+init_per_testcase(_Case,Config) ->
+ init_timetrap(500,Config).
+
+init_timetrap(T,Config) ->
+ Dog = ?t:timetrap(T),
+ [{watchdog, Dog}|Config].
+
+end_per_testcase(Case,Config) when Case==break_in_end_tc;
+ Case==break_in_end_tc_after_fail;
+ Case==break_in_end_tc_after_abort ->
+ break_and_check(Case),
+ cancel_timetrap(Config);
+end_per_testcase(_Case,Config) ->
+ cancel_timetrap(Config).
+
+cancel_timetrap(Config) ->
+ Dog=?config(watchdog, Config),
+ ?t:timetrap_cancel(Dog),
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% Test cases
+
+break_in_init_tc(Config) when is_list(Config) ->
+ ok.
+
+break_in_tc(Config) when is_list(Config) ->
+ break_and_check(break_in_tc),
+ ok.
+
+break_in_end_tc(Config) when is_list(Config) ->
+ ok.
+
+break_in_end_tc_after_fail(Config) when is_list(Config) ->
+ ?t:fail(test_case_should_fail).
+
+break_in_end_tc_after_abort(Config) when is_list(Config) ->
+ ?t:adjusted_sleep(2000). % will cause a timetrap timeout
+
+%%%-----------------------------------------------------------------
+%%% Internal functions
+
+%% This test case checks that all breaks in previous test cases was
+%% also continued, and that the break lasted as long as expected.
+%% The reason for this is that some of the breaks above are in
+%% end_per_testcase, and failures there will only produce a warning,
+%% not an error - so this is to catch the error for real.
+check_all_breaks(Config) ->
+ break_and_continue_sup ! {done,self()},
+ receive {Breaks,Continued} ->
+ check_all_breaks(Breaks,Continued)
+ end.
+
+check_all_breaks([{From,Case,T,Start}|Breaks],[{From,End}|Continued]) ->
+ Diff = timer:now_diff(End,Start),
+ DiffSec = round(Diff/1000000),
+ TSec = round(T/1000000),
+ if DiffSec==TSec ->
+ ?t:format("Break in ~p successfully continued after ~p second(s)~n",
+ [Case,DiffSec]),
+ check_all_breaks(Breaks,Continued);
+ true ->
+ ?t:format("Faulty duration of break in ~p: continued after ~p second(s)~n",
+ [Case,DiffSec]),
+ ?t:fail({faulty_diff,Case,DiffSec,TSec})
+ end;
+check_all_breaks([],[]) ->
+ ok;
+check_all_breaks(Breaks,Continued) ->
+ %% This is probably a case of a missing continue - i.e. a break
+ %% has been started, but it was never continued.
+ ?t:fail({no_match_in_breaks_and_continued,Breaks,Continued}).
+
+break_and_check(Case) ->
+ break_and_continue_sup ! {break,Case,1000,self()},
+ ?t:break(atom_to_list(Case)),
+ break_and_continue_sup ! {continued,self()},
+ ok.
+
+break_and_continue_sup() ->
+ register(break_and_continue_sup,self()),
+ break_and_continue_loop([],[]).
+
+break_and_continue_loop(Breaks,Continued) ->
+ receive
+ {break,Case,T,From} ->
+ Start = now(),
+ {RealT,_} = timer:tc(?t,adjusted_sleep,[T]),
+ ?t:continue(),
+ break_and_continue_loop([{From,Case,RealT,Start}|Breaks],Continued);
+ {continued,From} ->
+ break_and_continue_loop(Breaks,[{From,now()}|Continued]);
+ {done,From} ->
+ From ! {lists:reverse(Breaks),lists:reverse(Continued)}
+ end.
diff --git a/lib/test_server/test/test_server_test_lib.erl b/lib/test_server/test/test_server_test_lib.erl
index 5ca24f3df7..4e89abf308 100644
--- a/lib/test_server/test/test_server_test_lib.erl
+++ b/lib/test_server/test/test_server_test_lib.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2009-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2009-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -54,9 +54,13 @@ start_slave(Config,_Level) ->
ok
end,
DataDir = proplists:get_value(data_dir, Config),
- PrivDir = proplists:get_value(priv_dir, Config),
+ %% We would normally use priv_dir for temporary data,
+ %% but the pathnames gets too long on Windows.
+ %% Until the run-time system can support long pathnames,
+ %% use the data dir.
+ WorkDir = DataDir,
- %% PrivDir as well as directory of Test Server suites
+ %% WorkDir as well as directory of Test Server suites
%% have to be in code path on Test Server node.
[_ | Parts] = lists:reverse(filename:split(DataDir)),
TSDir = filename:join(lists:reverse(Parts)),
@@ -64,7 +68,7 @@ start_slave(Config,_Level) ->
undefined -> [];
Ds -> Ds
end,
- PathDirs = [PrivDir,TSDir | AddPathDirs],
+ PathDirs = [WorkDir,TSDir | AddPathDirs],
[true = rpc:call(Node, code, add_patha, [D]) || D <- PathDirs],
io:format("Dirs added to code path (on ~w):~n",
[Node]),
@@ -73,13 +77,18 @@ start_slave(Config,_Level) ->
true = rpc:call(Node, os, putenv,
["TEST_SERVER_FRAMEWORK", "undefined"]),
- ok = rpc:call(Node, file, set_cwd, [PrivDir]),
- [{node,Node} | Config]
+ ok = rpc:call(Node, file, set_cwd, [WorkDir]),
+ [{node,Node}, {work_dir,WorkDir} | Config]
end.
post_end_per_testcase(_TC, Config, Return, State) ->
Node = proplists:get_value(node, Config),
- cover:stop(Node),
+ case test_server:is_cover() of
+ true ->
+ cover:flush(Node);
+ false ->
+ ok
+ end,
slave:stop(Node),
{Return, State}.
diff --git a/lib/tools/doc/src/cover.xml b/lib/tools/doc/src/cover.xml
index 683acc025d..a2444ec947 100644
--- a/lib/tools/doc/src/cover.xml
+++ b/lib/tools/doc/src/cover.xml
@@ -5,7 +5,7 @@
<header>
<copyright>
<year>2001</year>
- <year>2011</year>
+ <year>2012</year>
<holder>Ericsson AB, All Rights Reserved</holder>
</copyright>
<legalnotice>
@@ -104,6 +104,13 @@
remove nodes. The same Cover compiled code will be loaded on each
node, and analysis will collect and sum up coverage data results
from all nodes.</p>
+ <p>To only collect data from remote nodes without stopping
+ <c>cover</c> on those nodes, use <c>cover:flush/1</c></p>
+ <p>If the connection to a remote node goes down, the main node
+ will mark it as lost. If the node comes back it will be added
+ again. If the remote node was alive during the disconnected
+ periode, cover data from before and during this periode will be
+ included in the analysis.</p>
</description>
<funcs>
<func>
@@ -477,6 +484,17 @@
remote nodes is fetched and stored on the main node.</p>
</desc>
</func>
+ <func>
+ <name>flush(Nodes) -> ok | {error,not_main_node}</name>
+ <fsummary>Collect cover data from remote nodes.</fsummary>
+ <type>
+ <v>Nodes = [atom()]</v>
+ </type>
+ <desc>
+ <p>Fetch data from the Cover database on the remote nodes and
+ stored on the main node.</p>
+ </desc>
+ </func>
</funcs>
<section>
diff --git a/lib/tools/src/cover.erl b/lib/tools/src/cover.erl
index e21bd1b88c..10f14b0a49 100644
--- a/lib/tools/src/cover.erl
+++ b/lib/tools/src/cover.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2001-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2001-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -26,12 +26,17 @@
%% ARCHITECTURE
%% The coverage tool consists of one process on each node involved in
%% coverage analysis. The process is registered as 'cover_server'
-%% (?SERVER). All cover_servers in the distributed system are linked
-%% together. The cover_server on the 'main' node is in charge, and it
-%% traps exits so it can detect nodedown or process crashes on the
-%% remote nodes. This process is implemented by the functions
-%% init_main/1 and main_process_loop/1. The cover_server on the remote
-%% nodes are implemented by the functions init_remote/2 and
+%% (?SERVER). The cover_server on the 'main' node is in charge, and
+%% it monitors the cover_servers on all remote nodes. When it gets a
+%% 'DOWN' message for another cover_server, it marks the node as
+%% 'lost'. If a nodeup is received for a lost node the main node
+%% ensures that the cover compiled modules are loaded again. If the
+%% remote node was alive during the disconnected periode, cover data
+%% for this periode will also be included in the analysis.
+%%
+%% The cover_server process on the main node is implemented by the
+%% functions init_main/1 and main_process_loop/1. The cover_server on
+%% the remote nodes are implemented by the functions init_remote/2 and
%% remote_process_loop/1.
%%
%% TABLES
@@ -81,15 +86,17 @@
export/1, export/2, import/1,
modules/0, imported/0, imported_modules/0, which_nodes/0, is_compiled/1,
reset/1, reset/0,
+ flush/1,
stop/0, stop/1]).
--export([remote_start/1]).
+-export([remote_start/1,get_main_node/0]).
%-export([bump/5]).
-export([transform/4]). % for test purposes
-record(main_state, {compiled=[], % [{Module,File}]
imported=[], % [{Module,File,ImportFile}]
stopper, % undefined | pid()
- nodes=[]}). % [Node]
+ nodes=[], % [Node]
+ lost_nodes=[]}). % [Node]
-record(remote_state, {compiled=[], % [{Module,File}]
main_node}). % atom()
@@ -497,6 +504,19 @@ stop(Node) when is_atom(Node) ->
stop(Nodes) ->
call({stop,remove_myself(Nodes,[])}).
+%% flush(Nodes) -> ok | {error,not_main_node}
+%% Nodes = [Node] | Node
+%% Node = atom()
+%% Error = {not_cover_compiled,Module}
+flush(Node) when is_atom(Node) ->
+ flush([Node]);
+flush(Nodes) ->
+ call({flush,remove_myself(Nodes,[])}).
+
+%% Used by test_server only. Not documented.
+get_main_node() ->
+ call(get_main_node).
+
%% bump(Module, Function, Arity, Clause, Line)
%% Module = Function = atom()
%% Arity = Clause = Line = integer()
@@ -541,7 +561,10 @@ remote_call(Node,Request) ->
Return =
receive
{'DOWN', Ref, _Type, _Object, _Info} ->
- {error,node_dead};
+ case Request of
+ {remote,stop} -> ok;
+ _ -> {error,node_dead}
+ end;
{?SERVER,Reply} ->
Reply
end,
@@ -569,40 +592,14 @@ init_main(Starter) ->
ets:new(?BINARY_TABLE, [set, named_table]),
ets:new(?COLLECTION_TABLE, [set, public, named_table]),
ets:new(?COLLECTION_CLAUSE_TABLE, [set, public, named_table]),
- process_flag(trap_exit,true),
+ net_kernel:monitor_nodes(true),
Starter ! {?SERVER,started},
main_process_loop(#main_state{}).
main_process_loop(State) ->
receive
{From, {start_nodes,Nodes}} ->
- ThisNode = node(),
- StartedNodes =
- lists:foldl(
- fun(Node,Acc) ->
- case rpc:call(Node,cover,remote_start,[ThisNode]) of
- {ok,RPid} ->
- link(RPid),
- [Node|Acc];
- Error ->
- io:format("Could not start cover on ~w: ~p\n",
- [Node,Error]),
- Acc
- end
- end,
- [],
- Nodes),
-
- %% In case some of the compiled modules have been unloaded they
- %% should not be loaded on the new node.
- {_LoadedModules,Compiled} =
- get_compiled_still_loaded(State#main_state.nodes,
- State#main_state.compiled),
- remote_load_compiled(StartedNodes,Compiled),
-
- State1 =
- State#main_state{nodes = State#main_state.nodes ++ StartedNodes,
- compiled = Compiled},
+ {StartedNodes,State1} = do_start_nodes(Nodes, State),
reply(From, {ok,StartedNodes}),
main_process_loop(State1);
@@ -707,8 +704,13 @@ main_process_loop(State) ->
{From, {stop,Nodes}} ->
remote_collect('_',Nodes,true),
reply(From, ok),
- State1 = State#main_state{nodes=State#main_state.nodes--Nodes},
- main_process_loop(State1);
+ Nodes1 = State#main_state.nodes--Nodes,
+ main_process_loop(State#main_state{nodes=Nodes1});
+
+ {From, {flush,Nodes}} ->
+ remote_collect('_',Nodes,false),
+ reply(From, ok),
+ main_process_loop(State);
{From, stop} ->
lists:foreach(
@@ -788,14 +790,30 @@ main_process_loop(State) ->
end,
main_process_loop(S);
- {'EXIT',Pid,_Reason} ->
- %% Exit is trapped on the main node only, so this will only happen
- %% there. I assume that I'm only linked to cover_servers on remote
- %% nodes, so this must be one of them crashing.
- %% Remove node from list!
- State1 = State#main_state{nodes=State#main_state.nodes--[node(Pid)]},
+ {'DOWN', _MRef, process, {?SERVER,Node}, _Info} ->
+ %% A remote cover_server is down, mark as lost
+ Nodes = State#main_state.nodes--[Node],
+ Lost = [Node|State#main_state.lost_nodes],
+ main_process_loop(State#main_state{nodes=Nodes,lost_nodes=Lost});
+
+ {nodeup,Node} ->
+ State1 =
+ case lists:member(Node,State#main_state.lost_nodes) of
+ true ->
+ sync_compiled(Node,State);
+ false ->
+ State
+ end,
main_process_loop(State1);
+
+ {nodedown,_} ->
+ %% Will be taken care of when 'DOWN' message arrives
+ main_process_loop(State);
+ {From, get_main_node} ->
+ reply(From, node()),
+ main_process_loop(State);
+
get_status ->
io:format("~p~n",[State]),
main_process_loop(State)
@@ -850,7 +868,16 @@ remote_process_loop(State) ->
{remote,stop} ->
reload_originals(State#remote_state.compiled),
unregister(?SERVER),
- remote_reply(State#remote_state.main_node, ok);
+ ok; % not replying since 'DOWN' message will be received anyway
+
+ {remote,get_compiled} ->
+ remote_reply(State#remote_state.main_node,
+ State#remote_state.compiled),
+ remote_process_loop(State);
+
+ {From, get_main_node} ->
+ remote_reply(From, State#remote_state.main_node),
+ remote_process_loop(State);
get_status ->
io:format("~p~n",[State]),
@@ -961,6 +988,36 @@ unload([]) ->
%%%--Handling of remote nodes--------------------------------------------
+do_start_nodes(Nodes, State) ->
+ ThisNode = node(),
+ StartedNodes =
+ lists:foldl(
+ fun(Node,Acc) ->
+ case rpc:call(Node,cover,remote_start,[ThisNode]) of
+ {ok,_RPid} ->
+ erlang:monitor(process,{?SERVER,Node}),
+ [Node|Acc];
+ Error ->
+ io:format("Could not start cover on ~w: ~p\n",
+ [Node,Error]),
+ Acc
+ end
+ end,
+ [],
+ Nodes),
+
+ %% In case some of the compiled modules have been unloaded they
+ %% should not be loaded on the new node.
+ {_LoadedModules,Compiled} =
+ get_compiled_still_loaded(State#main_state.nodes,
+ State#main_state.compiled),
+ remote_load_compiled(StartedNodes,Compiled),
+
+ State1 =
+ State#main_state{nodes = State#main_state.nodes ++ StartedNodes,
+ compiled = Compiled},
+ {StartedNodes, State1}.
+
%% start the cover_server on a remote node
remote_start(MainNode) ->
case whereis(?SERVER) of
@@ -984,6 +1041,30 @@ remote_start(MainNode) ->
{error,{already_started,Pid}}
end.
+%% If a lost node comes back, ensure that main and remote node has the
+%% same cover compiled modules. Note that no action is taken if the
+%% same {Mod,File} eksists on both, i.e. code change is not handled!
+sync_compiled(Node,State) ->
+ #main_state{compiled=Compiled0,nodes=Nodes,lost_nodes=Lost}=State,
+ State1 =
+ case remote_call(Node,{remote,get_compiled}) of
+ {error,node_dead} ->
+ {_,S} = do_start_nodes([Node],State),
+ S;
+ {error,_} ->
+ State;
+ RemoteCompiled ->
+ {_,Compiled} = get_compiled_still_loaded(Nodes,Compiled0),
+ Unload = [UM || {UM,_}=U <- RemoteCompiled,
+ false == lists:member(U,Compiled)],
+ remote_unload([Node],Unload),
+ Load = [L || L <- Compiled,
+ false == lists:member(L,RemoteCompiled)],
+ remote_load_compiled([Node],Load),
+ State#main_state{compiled=Compiled, nodes=[Node|Nodes]}
+ end,
+ State1#main_state{lost_nodes=Lost--[Node]}.
+
%% Load a set of cover compiled modules on remote nodes,
%% We do it ?MAX_MODS modules at a time so that we don't
%% run out of memory on the cover_server node.
@@ -1094,7 +1175,6 @@ remove_myself([Node|Nodes],Acc) ->
remove_myself(Nodes,[Node|Acc]);
remove_myself([],Acc) ->
Acc.
-
%%%--Handling of modules state data--------------------------------------
@@ -2254,7 +2334,13 @@ do_reset2([]) ->
do_clear(Module) ->
ets:match_delete(?COVER_CLAUSE_TABLE, {Module,'_'}),
ets:match_delete(?COVER_TABLE, {#bump{module=Module},'_'}),
- ets:match_delete(?COLLECTION_TABLE, {#bump{module=Module},'_'}).
+ case lists:member(?COLLECTION_TABLE, ets:all()) of
+ true ->
+ %% We're on the main node
+ ets:match_delete(?COLLECTION_TABLE, {#bump{module=Module},'_'});
+ false ->
+ ok
+ end.
not_loaded(Module, unloaded, State) ->
do_clear(Module),
@@ -2307,7 +2393,7 @@ pmap(Fun, [E | Rest], Pids, Limit, Cnt, Acc) when Cnt < Limit ->
pmap(Fun, Rest, Pids ++ [Pid], Limit, Cnt + 1, Acc);
pmap(Fun, List, [Pid | Pids], Limit, Cnt, Acc) ->
receive
- {'DOWN', _Ref, process, _, _} ->
+ {'DOWN', _Ref, process, X, _} when is_pid(X) ->
pmap(Fun, List, [Pid | Pids], Limit, Cnt - 1, Acc);
{res, Pid, Res} ->
pmap(Fun, List, Pids, Limit, Cnt, [Res | Acc])
@@ -2316,6 +2402,6 @@ pmap(_Fun, [], [], _Limit, 0, Acc) ->
lists:reverse(Acc);
pmap(Fun, [], [], Limit, Cnt, Acc) ->
receive
- {'DOWN', _Ref, process, _, _} ->
+ {'DOWN', _Ref, process, X, _} when is_pid(X) ->
pmap(Fun, [], [], Limit, Cnt - 1, Acc)
end.
diff --git a/lib/tools/test/cover_SUITE.erl b/lib/tools/test/cover_SUITE.erl
index c2c708d806..3bf1b44af8 100644
--- a/lib/tools/test/cover_SUITE.erl
+++ b/lib/tools/test/cover_SUITE.erl
@@ -23,7 +23,7 @@
init_per_group/2,end_per_group/2]).
-export([start/1, compile/1, analyse/1, misc/1, stop/1,
- distribution/1, export_import/1,
+ distribution/1, reconnect/1, die_and_reconnect/1, export_import/1,
otp_5031/1, eif/1, otp_5305/1, otp_5418/1, otp_6115/1, otp_7095/1,
otp_8188/1, otp_8270/1, otp_8273/1, otp_8340/1]).
@@ -45,7 +45,8 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
case whereis(cover_server) of
undefined ->
- [start, compile, analyse, misc, stop, distribution,
+ [start, compile, analyse, misc, stop,
+ distribution, reconnect, die_and_reconnect,
export_import, otp_5031, eif, otp_5305, otp_5418,
otp_6115, otp_7095, otp_8188, otp_8270, otp_8273,
otp_8340];
@@ -326,14 +327,16 @@ distribution(Config) when is_list(Config) ->
?line {ok,N1} = ?t:start_node(cover_SUITE_distribution1,slave,[]),
?line {ok,N2} = ?t:start_node(cover_SUITE_distribution2,slave,[]),
?line {ok,N3} = ?t:start_node(cover_SUITE_distribution3,slave,[]),
+ ?line {ok,N4} = ?t:start_node(cover_SUITE_distribution4,slave,[]),
%% Check that an already compiled module is loaded on new nodes
?line {ok,f} = cover:compile(f),
- ?line {ok,[_,_,_]} = cover:start(nodes()),
+ ?line {ok,[_,_,_,_]} = cover:start(nodes()),
?line cover_compiled = code:which(f),
?line cover_compiled = rpc:call(N1,code,which,[f]),
?line cover_compiled = rpc:call(N2,code,which,[f]),
?line cover_compiled = rpc:call(N3,code,which,[f]),
+ ?line cover_compiled = rpc:call(N4,code,which,[f]),
%% Check that a node cannot be started twice
?line {ok,[]} = cover:start(N2),
@@ -351,6 +354,7 @@ distribution(Config) when is_list(Config) ->
?line cover_compiled = rpc:call(N1,code,which,[v]),
?line cover_compiled = rpc:call(N2,code,which,[v]),
?line cover_compiled = rpc:call(N3,code,which,[v]),
+ ?line cover_compiled = rpc:call(N4,code,which,[v]),
%% this is lost when the node is killed
?line rpc:call(N3,f,f2,[]),
@@ -385,6 +389,18 @@ distribution(Config) when is_list(Config) ->
%% reset on the remote node(s))
?line check_f_calls(1,1),
+ %% Another checn that data is not fetched twice, i.e. when flushed
+ %% then analyse should not add the same data again.
+ ?line rpc:call(N4,f,f2,[]),
+ ?line ok = cover:flush(N4),
+ ?line check_f_calls(1,2),
+
+ %% Check that flush collects data so calls are not lost if node is killed
+ ?line rpc:call(N4,f,f2,[]),
+ ?line ok = cover:flush(N4),
+ ?line rpc:call(N4,erlang,halt,[]),
+ ?line check_f_calls(1,3),
+
%% Check that stop() unloads on all nodes
?line ok = cover:stop(),
?line timer:sleep(100), %% Give nodes time to unload on slow machines.
@@ -393,20 +409,117 @@ distribution(Config) when is_list(Config) ->
?line true = is_unloaded(LocalBeam),
?line true = is_unloaded(N2Beam),
- %% Check that cover_server on remote node dies if main node dies
+ %% Check that cover_server on remote node does not die if main node dies
?line {ok,[N1]} = cover:start(N1),
- ?line true = is_pid(rpc:call(N1,erlang,whereis,[cover_server])),
+ ?line true = is_pid(N1Server = rpc:call(N1,erlang,whereis,[cover_server])),
?line exit(whereis(cover_server),kill),
- ?line timer:sleep(10),
- ?line undefined = rpc:call(N1,erlang,whereis,[cover_server]),
-
+ ?line timer:sleep(100),
+ ?line N1Server = rpc:call(N1,erlang,whereis,[cover_server]),
+
%% Cleanup
?line Files = lsfiles(),
?line remove(files(Files, ".beam")),
?line ?t:stop_node(N1),
?line ?t:stop_node(N2).
-
+%% Test that a lost node is reconnected
+reconnect(Config) ->
+ DataDir = ?config(data_dir, Config),
+ ok = file:set_cwd(DataDir),
+
+ {ok,a} = compile:file(a),
+ {ok,b} = compile:file(b),
+ {ok,f} = compile:file(f),
+
+ {ok,N1} = ?t:start_node(cover_SUITE_reconnect,peer,
+ [{args," -pa " ++ DataDir},{start_cover,false}]),
+ {ok,a} = cover:compile(a),
+ {ok,f} = cover:compile(f),
+ {ok,[N1]} = cover:start(nodes()),
+
+ %% Some calls to check later
+ rpc:call(N1,f,f1,[]),
+ cover:flush(N1),
+ rpc:call(N1,f,f1,[]),
+
+ %% This will cause a call to f:f2() when nodes()==[] on N1
+ rpc:cast(N1,f,call_f2_when_isolated,[]),
+
+ %% Disconnect and check that node is removed from main cover node
+ net_kernel:disconnect(N1),
+ [] = cover:which_nodes(),
+ timer:sleep(500), % allow some time for the f:f2() call
+
+ %% Do some add one module (b) and remove one module (a)
+ code:purge(a),
+ {module,a} = code:load_file(a),
+ {ok,b} = cover:compile(b),
+ cover_compiled = code:which(b),
+
+ [] = cover:which_nodes(),
+ check_f_calls(1,0), % only the first call - before the flush
+
+ %% Reconnect the node and check that b and f are cover compiled but not a
+ net_kernel:connect_node(N1),
+ timer:sleep(100),
+ [N1] = cover:which_nodes(), % we are reconnected
+ cover_compiled = rpc:call(N1,code,which,[b]),
+ cover_compiled = rpc:call(N1,code,which,[f]),
+ ABeam = rpc:call(N1,code,which,[a]),
+ false = (cover_compiled==ABeam),
+
+ %% Ensure that we have:
+ %% * one f1 call from before the flush,
+ %% * one f1 call from after the flush but before disconnect
+ %% * one f2 call when disconnected
+ check_f_calls(2,1),
+
+ cover:stop(),
+ ?t:stop_node(N1),
+ ok.
+
+%% Test that a lost node is reconnected - also if it has been dead
+die_and_reconnect(Config) ->
+ DataDir = ?config(data_dir, Config),
+ ok = file:set_cwd(DataDir),
+
+ {ok,f} = compile:file(f),
+
+ NodeName = cover_SUITE_die_and_reconnect,
+ {ok,N1} = ?t:start_node(NodeName,peer,
+ [{args," -pa " ++ DataDir},{start_cover,false}]),
+ %% {ok,a} = cover:compile(a),
+ {ok,f} = cover:compile(f),
+ {ok,[N1]} = cover:start(nodes()),
+
+ %% Some calls to check later
+ rpc:call(N1,f,f1,[]),
+ cover:flush(N1),
+ rpc:call(N1,f,f1,[]),
+
+ %% Kill the node
+ rpc:call(N1,erlang,halt,[]),
+ [] = cover:which_nodes(),
+
+ check_f_calls(1,0), % only the first call - before the flush
+
+ %% Restart the node and check that cover reconnects
+ {ok,N1} = ?t:start_node(NodeName,peer,
+ [{args," -pa " ++ DataDir},{start_cover,false}]),
+ timer:sleep(100),
+ [N1] = cover:which_nodes(), % we are reconnected
+ cover_compiled = rpc:call(N1,code,which,[f]),
+
+ %% One more call...
+ rpc:call(N1,f,f1,[]),
+
+ %% Ensure that no more calls are counted
+ check_f_calls(2,0),
+
+ cover:stop(),
+ ?t:stop_node(N1),
+ ok.
+
export_import(suite) -> [];
export_import(Config) when is_list(Config) ->
?line DataDir = ?config(data_dir, Config),
@@ -1238,4 +1351,4 @@ is_unloaded(What) ->
end.
check_f_calls(F1,F2) ->
- {ok,[{{f,f1,0},F1},{{f,f2,0},F2}]} = cover:analyse(f,calls,function).
+ {ok,[{{f,f1,0},F1},{{f,f2,0},F2}|_]} = cover:analyse(f,calls,function).
diff --git a/lib/tools/test/cover_SUITE_data/f.erl b/lib/tools/test/cover_SUITE_data/f.erl
index 1ef8bbdb49..ce2963014a 100644
--- a/lib/tools/test/cover_SUITE_data/f.erl
+++ b/lib/tools/test/cover_SUITE_data/f.erl
@@ -1,5 +1,5 @@
-module(f).
--export([f1/0,f2/0]).
+-export([f1/0,f2/0,call_f2_when_isolated/0]).
f1() ->
f1_line1,
@@ -8,3 +8,12 @@ f1() ->
f2() ->
f2_line1,
f2_line2.
+
+call_f2_when_isolated() ->
+ case nodes() of
+ [] ->
+ f2();
+ _ ->
+ timer:sleep(100),
+ call_f2_when_isolated()
+ end.