aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/asn1/test/asn1_SUITE.erl6
-rw-r--r--lib/common_test/doc/src/cover_chapter.xml27
-rw-r--r--lib/common_test/doc/src/ct_run.xml2
-rw-r--r--lib/common_test/doc/src/run_test_chapter.xml5
-rw-r--r--lib/common_test/src/ct.erl5
-rw-r--r--lib/common_test/src/ct_config.erl3
-rw-r--r--lib/common_test/src/ct_conn_log_h.erl6
-rw-r--r--lib/common_test/src/ct_master.erl2
-rw-r--r--lib/common_test/src/ct_master_logs.erl2
-rw-r--r--lib/common_test/src/ct_netconfc.erl33
-rw-r--r--lib/common_test/src/ct_run.erl44
-rw-r--r--lib/common_test/src/ct_slave.erl16
-rw-r--r--lib/common_test/src/ct_testspec.erl4
-rw-r--r--lib/common_test/src/ct_util.hrl1
-rw-r--r--lib/common_test/src/cth_log_redirect.erl2
-rw-r--r--lib/common_test/test/Makefile5
-rw-r--r--lib/common_test/test/common_test.cover10
-rw-r--r--lib/common_test/test/ct_config_SUITE.erl1
-rw-r--r--lib/common_test/test/ct_config_SUITE_data/config/test/config_static_SUITE.erl10
-rw-r--r--lib/common_test/test/ct_config_info_SUITE.erl14
-rw-r--r--lib/common_test/test/ct_cover_SUITE.erl271
-rw-r--r--lib/common_test/test/ct_cover_SUITE_data/cover_SUITE.erl156
-rw-r--r--lib/common_test/test/ct_cover_SUITE_data/cover_SUITE_data/.gitignore0
-rw-r--r--lib/common_test/test/ct_cover_SUITE_data/cover_test_mod.erl4
-rw-r--r--lib/common_test/test/ct_error_SUITE.erl446
-rw-r--r--lib/common_test/test/ct_error_SUITE_data/error/test/misc_error_1_SUITE.erl12
-rw-r--r--lib/common_test/test/ct_error_SUITE_data/error/test/timetrap_8_SUITE.erl258
-rw-r--r--lib/common_test/test/ct_netconfc_SUITE.erl9
-rw-r--r--lib/common_test/test/ct_test_support.erl69
-rw-r--r--lib/compiler/src/v3_codegen.erl1
-rw-r--r--lib/compiler/test/bs_match_SUITE.erl17
-rw-r--r--lib/compiler/test/receive_SUITE.erl4
-rw-r--r--lib/diameter/.gitignore57
-rw-r--r--lib/diameter/Makefile32
-rw-r--r--lib/diameter/Makefile.in88
-rw-r--r--lib/diameter/aclocal.m465
-rwxr-xr-xlib/diameter/autoconf/config.guess1519
-rwxr-xr-xlib/diameter/autoconf/config.sub1630
-rwxr-xr-xlib/diameter/autoconf/install-sh519
-rw-r--r--lib/diameter/configure.in137
-rw-r--r--lib/diameter/doc/.gitignore4
-rw-r--r--lib/diameter/doc/src/Makefile10
-rw-r--r--lib/diameter/doc/src/diameter.xml103
-rw-r--r--lib/diameter/doc/src/diameter_app.xml251
-rw-r--r--lib/diameter/doc/src/diameter_transport.xml42
-rw-r--r--lib/diameter/make/release_targets.mk92
-rw-r--r--lib/diameter/make/rules.mk.in193
-rw-r--r--lib/diameter/make/subdir.mk53
-rw-r--r--lib/diameter/make/target.mk33
-rw-r--r--lib/diameter/src/Makefile9
-rw-r--r--lib/diameter/src/base/diameter.erl16
-rw-r--r--lib/diameter/src/base/diameter_capx.erl6
-rw-r--r--lib/diameter/src/base/diameter_codec.erl3
-rw-r--r--lib/diameter/src/base/diameter_config.erl83
-rw-r--r--lib/diameter/src/base/diameter_peer.erl2
-rw-r--r--lib/diameter/src/base/diameter_peer_fsm.erl82
-rw-r--r--lib/diameter/src/base/diameter_reg.erl255
-rw-r--r--lib/diameter/src/base/diameter_service.erl735
-rw-r--r--lib/diameter/src/base/diameter_session.erl14
-rw-r--r--lib/diameter/src/base/diameter_watchdog.erl144
-rw-r--r--lib/diameter/test/Makefile9
-rw-r--r--lib/diameter/test/diameter_traffic_SUITE.erl102
-rw-r--r--lib/diameter/test/diameter_watchdog_SUITE.erl711
-rw-r--r--lib/inets/doc/src/httpd.xml8
-rw-r--r--lib/inets/src/http_server/httpd_conf.erl15
-rw-r--r--lib/inets/src/http_server/httpd_request_handler.erl60
-rw-r--r--lib/inets/src/inets_app/inets.appup.src14
-rw-r--r--lib/inets/test/httpd_basic_SUITE.erl16
-rw-r--r--lib/inets/test/inets_app_test.erl16
-rw-r--r--lib/inets/vsn.mk2
-rw-r--r--lib/jinterface/java_src/com/ericsson/otp/erlang/OtpEpmd.java2
-rw-r--r--lib/kernel/doc/src/inet.xml9
-rw-r--r--lib/kernel/test/code_SUITE.erl4
-rw-r--r--lib/kernel/test/global_SUITE.erl119
-rw-r--r--lib/kernel/test/interactive_shell_SUITE.erl213
-rw-r--r--lib/kernel/test/kernel.cover2
-rw-r--r--lib/kernel/test/wrap_log_reader_SUITE.erl2
-rw-r--r--lib/public_key/asn1/AuthenticationFramework.asn1367
-rw-r--r--lib/public_key/asn1/InformationFramework.asn1682
-rw-r--r--lib/public_key/asn1/Makefile12
-rw-r--r--lib/public_key/asn1/OTP-PUB-KEY.set.asn2
-rw-r--r--lib/public_key/asn1/PKCS-10.asn170
-rw-r--r--lib/public_key/asn1/PKCS-7.asn1387
-rw-r--r--lib/public_key/asn1/PKIX1Explicit88.asn15
-rw-r--r--lib/public_key/asn1/SelectedAttributeTypes.asn11575
-rw-r--r--lib/public_key/asn1/UsefulDefinitions.asn1234
-rw-r--r--lib/public_key/doc/src/cert_records.xml43
-rw-r--r--lib/public_key/doc/src/introduction.xml14
-rw-r--r--lib/public_key/doc/src/public_key.xml4
-rw-r--r--lib/public_key/include/public_key.hrl2
-rw-r--r--lib/public_key/src/pubkey_pem.erl19
-rw-r--r--lib/public_key/test/public_key_SUITE.erl39
-rw-r--r--lib/public_key/test/public_key_SUITE_data/pkcs7_cert.pem23
-rw-r--r--lib/public_key/vsn.mk2
-rw-r--r--lib/ssh/doc/src/ssh.xml9
-rw-r--r--lib/ssh/src/ssh.appup.src6
-rw-r--r--lib/ssh/src/ssh.erl78
-rw-r--r--lib/ssh/src/ssh_auth.erl67
-rw-r--r--lib/ssh/src/ssh_connection_handler.erl28
-rw-r--r--lib/ssh/src/ssh_connection_manager.erl43
-rw-r--r--lib/ssh/src/ssh_io.erl61
-rw-r--r--lib/ssh/src/ssh_transport.erl2
-rw-r--r--lib/ssh/vsn.mk2
-rw-r--r--lib/ssl/src/ssl.appup.src15
-rw-r--r--lib/ssl/src/ssl.erl8
-rw-r--r--lib/ssl/src/ssl_connection.erl237
-rw-r--r--lib/ssl/vsn.mk2
-rw-r--r--lib/stdlib/test/dict_SUITE.erl20
-rw-r--r--lib/stdlib/test/dict_test_lib.erl55
-rw-r--r--lib/stdlib/test/epp_SUITE.erl9
-rw-r--r--lib/stdlib/test/io_proto_SUITE.erl163
-rw-r--r--lib/stdlib/test/sets_SUITE.erl342
-rw-r--r--lib/stdlib/test/sets_test_lib.erl82
-rw-r--r--lib/stdlib/test/stdlib.cover15
-rw-r--r--lib/test_server/doc/src/test_server.xml14
-rw-r--r--lib/test_server/src/test_server.erl1030
-rw-r--r--lib/test_server/src/test_server_ctrl.erl727
-rw-r--r--lib/test_server/src/test_server_h.erl5
-rw-r--r--lib/test_server/src/test_server_internal.hrl4
-rw-r--r--lib/test_server/src/test_server_io.erl8
-rw-r--r--lib/test_server/src/test_server_node.erl256
-rw-r--r--lib/test_server/src/test_server_sup.erl32
-rw-r--r--lib/test_server/src/ts.erl73
-rw-r--r--lib/test_server/src/ts_run.erl2
-rw-r--r--lib/test_server/test/test_server.cover20
-rw-r--r--lib/test_server/test/test_server_SUITE_data/test_server_SUITE.erl14
-rw-r--r--lib/test_server/test/test_server_test_lib.erl9
-rw-r--r--lib/tools/doc/src/cover.xml20
-rw-r--r--lib/tools/src/cover.erl188
-rw-r--r--lib/tools/test/cover_SUITE.erl133
-rw-r--r--lib/tools/test/cover_SUITE_data/f.erl11
131 files changed, 8335 insertions, 7861 deletions
diff --git a/lib/asn1/test/asn1_SUITE.erl b/lib/asn1/test/asn1_SUITE.erl
index 501157b9b4..79c7bf1476 100644
--- a/lib/asn1/test/asn1_SUITE.erl
+++ b/lib/asn1/test/asn1_SUITE.erl
@@ -249,8 +249,7 @@ init_per_testcase(Func, Config) ->
[{case_dir, CaseDir}, {watchdog, Dog}|Config].
end_per_testcase(_Func, Config) ->
- code:del_path(?config(case_dir, Config)),
- test_server:timetrap_cancel(?config(watchdog, Config)).
+ code:del_path(?config(case_dir, Config)).
%%------------------------------------------------------------------------------
%% Test runners
@@ -1065,6 +1064,9 @@ test_modified_x420(Config) ->
asn1_test_lib:compile_all(Files, Config, [der]),
test_modified_x420:test_io(Config).
+
+testX420() ->
+ [{timetrap,{minutes,90}}].
testX420(Config) ->
test(Config, fun testX420/3, [ber, ber_bin, ber_bin_v2]).
testX420(Config, Rule, Opts) ->
diff --git a/lib/common_test/doc/src/cover_chapter.xml b/lib/common_test/doc/src/cover_chapter.xml
index 803a71de07..b2e64bfff0 100644
--- a/lib/common_test/doc/src/cover_chapter.xml
+++ b/lib/common_test/doc/src/cover_chapter.xml
@@ -109,6 +109,33 @@
</section>
<section>
+ <marker id="cover_stop"></marker>
+ <title>Stopping the cover tool when tests are completed</title>
+ <p>By default the Cover tool is automatically stopped when the
+ tests are completed. This causes the original (non cover
+ compiled) modules to be loaded back in to the test node. If a
+ process at this point is still running old code of any of the
+ modules that are cover compiled, meaning that it has not done
+ any fully qualified function call after the cover compilation,
+ the process will now be killed. To avoid this it is possible to
+ set the value of the <c>cover_stop</c> option to
+ <c>false</c>. This means that the modules will stay cover
+ compiled, and it is therefore only recommended if the erlang
+ node(s) under test is terminated after the test is completed
+ or if cover can be manually stopped.</p>
+
+ <p>The option can be set by using the <c>-cover_stop</c> flag with
+ <c>ct_run</c>, by adding <c>{cover_stop,true|false}</c> to the
+ Opts argument to <c><seealso
+ marker="ct#run_test-1">ct:run_test/1</seealso></c>, or by adding
+ a <c>cover_stop</c> term in your test specification (see chapter
+ about <seealso
+ marker="run_test_chapter#test_specifications">test
+ specifications</seealso>).</p>
+
+ </section>
+
+ <section>
<title>The cover specification file</title>
<p>These are the terms allowed in a cover specification file:</p>
diff --git a/lib/common_test/doc/src/ct_run.xml b/lib/common_test/doc/src/ct_run.xml
index 9cc5495af7..da18640df7 100644
--- a/lib/common_test/doc/src/ct_run.xml
+++ b/lib/common_test/doc/src/ct_run.xml
@@ -104,6 +104,7 @@
[-silent_connections [ConnType1 ConnType2 .. ConnTypeN]]
[-stylesheet CSSFile]
[-cover CoverCfgFile]
+ [-cover_stop Bool]
[-event_handler EvHandler1 EvHandler2 .. EvHandlerN] |
[-event_handler_init EvHandler1 InitArg1 and
EvHandler2 InitArg2 and .. EvHandlerN InitArgN]
@@ -138,6 +139,7 @@
[-silent_connections [ConnType1 ConnType2 .. ConnTypeN]]
[-stylesheet CSSFile]
[-cover CoverCfgFile]
+ [-cover_stop Bool]
[-event_handler EvHandler1 EvHandler2 .. EvHandlerN] |
[-event_handler_init EvHandler1 InitArg1 and
EvHandler2 InitArg2 and .. EvHandlerN InitArgN]
diff --git a/lib/common_test/doc/src/run_test_chapter.xml b/lib/common_test/doc/src/run_test_chapter.xml
index ea62df27cc..b5b914d506 100644
--- a/lib/common_test/doc/src/run_test_chapter.xml
+++ b/lib/common_test/doc/src/run_test_chapter.xml
@@ -153,6 +153,8 @@
<item><c><![CDATA[-stylesheet <css_file>]]></c>, points out a user HTML style sheet (see below).</item>
<item><c><![CDATA[-cover <cover_cfg_file>]]></c>, to perform code coverage test (see
<seealso marker="cover_chapter#cover">Code Coverage Analysis</seealso>).</item>
+ <item><c><![CDATA[-cover_stop <bool>]]></c>, to specify if the cover tool shall be stopped after the test is completed (see
+ <seealso marker="cover_chapter#cover_stop">Code Coverage Analysis</seealso>).</item>
<item><c><![CDATA[-event_handler <event_handlers>]]></c>, to install
<seealso marker="event_handler_chapter#event_handling">event handlers</seealso>.</item>
<item><c><![CDATA[-event_handler_init <event_handlers>]]></c>, to install
@@ -495,6 +497,9 @@
{cover, CoverSpecFile}.
{cover, NodeRefs, CoverSpecFile}.
+ {cover_stop, Bool}.
+ {cover_stop, NodeRefs, Bool}.
+
{include, IncludeDirs}.
{include, NodeRefs, IncludeDirs}.
diff --git a/lib/common_test/src/ct.erl b/lib/common_test/src/ct.erl
index 5014309c0f..6ecd0a022c 100644
--- a/lib/common_test/src/ct.erl
+++ b/lib/common_test/src/ct.erl
@@ -148,7 +148,7 @@ run(TestDirs) ->
%%% {config,CfgFiles} | {userconfig, UserConfig} |
%%% {allow_user_terms,Bool} | {logdir,LogDir} |
%%% {silent_connections,Conns} | {stylesheet,CSSFile} |
-%%% {cover,CoverSpecFile} | {step,StepOpts} |
+%%% {cover,CoverSpecFile} | {cover_stop,Bool} | {step,StepOpts} |
%%% {event_handler,EventHandlers} | {include,InclDirs} |
%%% {auto_compile,Bool} | {create_priv_dir,CreatePrivDir} |
%%% {multiply_timetraps,M} | {scale_timetraps,Bool} |
@@ -987,8 +987,9 @@ get_testdata(Key) ->
end.
%%%-----------------------------------------------------------------
-%%% @spec abort_current_testcase(Reason) -> ok | {error,no_testcase_running}
+%%% @spec abort_current_testcase(Reason) -> ok | {error,ErrorReason}
%%% Reason = term()
+%%% ErrorReason = no_testcase_running | parallel_group
%%%
%%% @doc <p>When calling this function, the currently executing test case will be aborted.
%%% It is the user's responsibility to know for sure which test case is currently
diff --git a/lib/common_test/src/ct_config.erl b/lib/common_test/src/ct_config.erl
index 30bf5925c0..06a8e12f55 100644
--- a/lib/common_test/src/ct_config.erl
+++ b/lib/common_test/src/ct_config.erl
@@ -532,7 +532,8 @@ do_require(Name,Key) ->
case get_key_from_name(Name) of
{error,_} ->
allocate(Name,Key);
- {ok,Key} ->
+ {ok,NameKey} when NameKey == Key;
+ is_tuple(Key) andalso element(1,Key) == NameKey ->
%% already allocated - check that it has all required subkeys
R = make_ref(),
case get_config(Key,R,[]) of
diff --git a/lib/common_test/src/ct_conn_log_h.erl b/lib/common_test/src/ct_conn_log_h.erl
index bf27238121..d7bd18606b 100644
--- a/lib/common_test/src/ct_conn_log_h.erl
+++ b/lib/common_test/src/ct_conn_log_h.erl
@@ -64,10 +64,16 @@ do_open_files([],Acc) ->
handle_event({_Type, GL, _Msg}, State) when node(GL) /= node() ->
{ok, State};
handle_event({_Type,_GL,{Pid,{ct_connection,Action,ConnName},Report}},State) ->
+ %% NOTE: if the format of this event is changed
+ %% ({ct_connection,Action,ConnName}) then remember to change
+ %% test_server_h:report_receiver as well!!!
Info = conn_info(Pid,#conn_log{name=ConnName,action=Action}),
write_report(now(),Info,Report,State),
{ok, State};
handle_event({_Type,_GL,{Pid,Info=#conn_log{},Report}},State) ->
+ %% NOTE: if the format of this event is changed
+ %% (Info=#conn_log{}) then remember to change
+ %% test_server_h:report_receiver as well!!!
write_report(now(),conn_info(Pid,Info),Report,State),
{ok, State};
handle_event({error_report,_,{Pid,_,[{ct_connection,ConnName}|R]}},State) ->
diff --git a/lib/common_test/src/ct_master.erl b/lib/common_test/src/ct_master.erl
index 99bec3ea09..f29eba605c 100644
--- a/lib/common_test/src/ct_master.erl
+++ b/lib/common_test/src/ct_master.erl
@@ -51,7 +51,7 @@
%%% {testcase,Cases} | {spec,TestSpecs} | {allow_user_terms,Bool} |
%%% {logdir,LogDir} | {event_handler,EventHandlers} |
%%% {silent_connections,Conns} | {cover,CoverSpecFile} |
-%%% {userconfig, UserCfgFiles}
+%%% {cover_stop,Bool} | {userconfig, UserCfgFiles}
%%% CfgFiles = string() | [string()]
%%% TestDirs = string() | [string()]
%%% Suites = atom() | [atom()]
diff --git a/lib/common_test/src/ct_master_logs.erl b/lib/common_test/src/ct_master_logs.erl
index d76288feef..84f175c0a9 100644
--- a/lib/common_test/src/ct_master_logs.erl
+++ b/lib/common_test/src/ct_master_logs.erl
@@ -204,7 +204,7 @@ open_ct_master_log(Dir) ->
{ok,Fd} = file:open(FullName,[write]),
io:put_chars(Fd,header("Common Test Master Log", {[],[1,2],[]})),
%% maybe add config info here later
- io:put_chars(config_table([])),
+ io:put_chars(Fd,config_table([])),
io:put_chars(Fd,
"<style>\n"
"div.ct_internal { background:lightgrey; color:black }\n"
diff --git a/lib/common_test/src/ct_netconfc.erl b/lib/common_test/src/ct_netconfc.erl
index 52fe9599ce..294b82bff6 100644
--- a/lib/common_test/src/ct_netconfc.erl
+++ b/lib/common_test/src/ct_netconfc.erl
@@ -968,7 +968,7 @@ close_session(Client) ->
%% @end
%%----------------------------------------------------------------------
close_session(Client, Timeout) ->
- call(Client,{send_rpc_op, close_session, [], Timeout}).
+ call(Client,{send_rpc_op, close_session, [], Timeout}, true).
%%----------------------------------------------------------------------
@@ -1121,17 +1121,38 @@ close(Client) ->
%% Internal functions
%%----------------------------------------------------------------------
call(Client, Msg) ->
- call(Client, Msg, infinity).
-call(Client, Msg, Timeout) ->
+ call(Client, Msg, infinity, false).
+call(Client, Msg, Timeout) when is_integer(Timeout); Timeout==infinity ->
+ call(Client, Msg, Timeout, false);
+call(Client, Msg, WaitStop) when is_boolean(WaitStop) ->
+ call(Client, Msg, infinity, WaitStop).
+call(Client, Msg, Timeout, WaitStop) ->
case get_handle(Client) of
{ok,Pid} ->
case ct_gen_conn:call(Pid,Msg,Timeout) of
- {error,{process_down,Client,noproc}} ->
+ {error,{process_down,Pid,noproc}} ->
{error,no_such_client};
- {error,{process_down,Client,normal}} ->
+ {error,{process_down,Pid,normal}} when WaitStop ->
+ %% This will happen when server closes connection
+ %% before clien received rpc-reply on
+ %% close-session.
+ ok;
+ {error,{process_down,Pid,normal}} ->
{error,closed};
- {error,{process_down,Client,Reason}} ->
+ {error,{process_down,Pid,Reason}} ->
{error,{closed,Reason}};
+ Other when WaitStop ->
+ MRef = erlang:monitor(process,Pid),
+ receive
+ {'DOWN',MRef,process,Pid,Normal} when Normal==normal;
+ Normal==noproc ->
+ Other;
+ {'DOWN',MRef,process,Pid,Reason} ->
+ {error,{{closed,Reason},Other}}
+ after Timeout ->
+ erlang:demonitor(MRef, [flush]),
+ {error,{timeout,Other}}
+ end;
Other ->
Other
end;
diff --git a/lib/common_test/src/ct_run.erl b/lib/common_test/src/ct_run.erl
index 4a6a3cdcac..ed7778c25a 100644
--- a/lib/common_test/src/ct_run.erl
+++ b/lib/common_test/src/ct_run.erl
@@ -58,6 +58,7 @@
vts,
shell,
cover,
+ cover_stop,
coverspec,
step,
logdir,
@@ -245,6 +246,7 @@ script_start1(Parent, Args) ->
Vts = get_start_opt(vts, true, Args),
Shell = get_start_opt(shell, true, Args),
Cover = get_start_opt(cover, fun([CoverFile]) -> ?abs(CoverFile) end, Args),
+ CoverStop = get_start_opt(cover_stop, fun([CS]) -> list_to_atom(CS) end, Args),
LogDir = get_start_opt(logdir, fun([LogD]) -> LogD end, Args),
LogOpts = get_start_opt(logopts, fun(Os) -> [list_to_atom(O) || O <- Os] end,
[], Args),
@@ -329,7 +331,8 @@ script_start1(Parent, Args) ->
end,
StartOpts = #opts{label = Label, profile = Profile,
- vts = Vts, shell = Shell, cover = Cover,
+ vts = Vts, shell = Shell,
+ cover = Cover, cover_stop = CoverStop,
logdir = LogDir, logopts = LogOpts,
basic_html = BasicHtml,
verbosity = Verbosity,
@@ -416,6 +419,9 @@ script_start2(StartOpts = #opts{vts = undefined,
Cover =
choose_val(StartOpts#opts.cover,
SpecStartOpts#opts.cover),
+ CoverStop =
+ choose_val(StartOpts#opts.cover_stop,
+ SpecStartOpts#opts.cover_stop),
MultTT =
choose_val(StartOpts#opts.multiply_timetraps,
SpecStartOpts#opts.multiply_timetraps),
@@ -475,6 +481,7 @@ script_start2(StartOpts = #opts{vts = undefined,
profile = Profile,
testspecs = Specs,
cover = Cover,
+ cover_stop = CoverStop,
logdir = LogDir,
logopts = AllLogOpts,
basic_html = BasicHtml,
@@ -723,6 +730,7 @@ script_usage() ->
"\n\t[-silent_connections [ConnType1 ConnType2 .. ConnTypeN]]"
"\n\t[-stylesheet CSSFile]"
"\n\t[-cover CoverCfgFile]"
+ "\n\t[-cover_stop Bool]"
"\n\t[-event_handler EvHandler1 EvHandler2 .. EvHandlerN]"
"\n\t[-ct_hooks CTHook1 CTHook2 .. CTHookN]"
"\n\t[-include InclDir1 InclDir2 .. InclDirN]"
@@ -745,6 +753,7 @@ script_usage() ->
"\n\t[-silent_connections [ConnType1 ConnType2 .. ConnTypeN]]"
"\n\t[-stylesheet CSSFile]"
"\n\t[-cover CoverCfgFile]"
+ "\n\t[-cover_stop Bool]"
"\n\t[-event_handler EvHandler1 EvHandler2 .. EvHandlerN]"
"\n\t[-ct_hooks CTHook1 CTHook2 .. CTHookN]"
"\n\t[-include InclDir1 InclDir2 .. InclDirN]"
@@ -938,6 +947,7 @@ run_test2(StartOpts) ->
%% code coverage
Cover = get_start_opt(cover,
fun(CoverFile) -> ?abs(CoverFile) end, StartOpts),
+ CoverStop = get_start_opt(cover_stop, value, StartOpts),
%% timetrap manipulation
MultiplyTT = get_start_opt(multiply_timetraps, value, 1, StartOpts),
@@ -1000,7 +1010,8 @@ run_test2(StartOpts) ->
Step = get_start_opt(step, value, StartOpts),
Opts = #opts{label = Label, profile = Profile,
- cover = Cover, step = Step, logdir = LogDir,
+ cover = Cover, cover_stop = CoverStop,
+ step = Step, logdir = LogDir,
logopts = LogOpts, basic_html = BasicHtml,
config = CfgFiles,
verbosity = Verbosity,
@@ -1063,6 +1074,8 @@ run_spec_file(Relaxed,
AllConfig = merge_vals([CfgFiles, SpecOpts#opts.config]),
Cover = choose_val(Opts#opts.cover,
SpecOpts#opts.cover),
+ CoverStop = choose_val(Opts#opts.cover_stop,
+ SpecOpts#opts.cover_stop),
MultTT = choose_val(Opts#opts.multiply_timetraps,
SpecOpts#opts.multiply_timetraps),
ScaleTT = choose_val(Opts#opts.scale_timetraps,
@@ -1103,6 +1116,7 @@ run_spec_file(Relaxed,
Opts1 = Opts#opts{label = Label,
profile = Profile,
cover = Cover,
+ cover_stop = CoverStop,
logdir = which(logdir, LogDir),
logopts = AllLogOpts,
stylesheet = Stylesheet,
@@ -1374,6 +1388,7 @@ get_data_for_node(#testspec{label = Labels,
verbosity = VLvls,
silent_connections = SilentConnsList,
cover = CoverFs,
+ cover_stop = CoverStops,
config = Cfgs,
userconfig = UsrCfgs,
event_handler = EvHs,
@@ -1405,6 +1420,7 @@ get_data_for_node(#testspec{label = Labels,
SCs -> SCs
end,
Cover = proplists:get_value(Node, CoverFs),
+ CoverStop = proplists:get_value(Node, CoverStops),
MT = proplists:get_value(Node, MTs),
ST = proplists:get_value(Node, STs),
CreatePrivDir = proplists:get_value(Node, PDs),
@@ -1423,6 +1439,7 @@ get_data_for_node(#testspec{label = Labels,
verbosity = Verbosity,
silent_connections = SilentConns,
cover = Cover,
+ cover_stop = CoverStop,
config = ConfigFiles,
event_handlers = EvHandlers,
ct_hooks = FiltCTHooks,
@@ -1576,14 +1593,7 @@ do_run(Tests, Misc, LogDir, LogOpts) when is_list(Misc),
StepOpts ->
#opts{step = StepOpts}
end,
- Opts1 =
- case proplists:get_value(cover, Misc) of
- undefined ->
- Opts;
- CoverFile ->
- Opts#opts{cover = CoverFile}
- end,
- do_run(Tests, [], Opts1#opts{logdir = LogDir}, []);
+ do_run(Tests, [], Opts#opts{logdir = LogDir}, []);
do_run(Tests, Skip, Opts, Args) when is_record(Opts, opts) ->
#opts{label = Label, profile = Profile, cover = Cover,
@@ -1617,7 +1627,13 @@ do_run(Tests, Skip, Opts, Args) when is_record(Opts, opts) ->
{error,Reason} ->
exit({error,Reason});
CoverSpec ->
- Opts#opts{coverspec = CoverSpec}
+ CoverStop =
+ case Opts#opts.cover_stop of
+ undefined -> true;
+ Stop -> Stop
+ end,
+ Opts#opts{coverspec = CoverSpec,
+ cover_stop = CoverStop}
end
end,
%% This env variable is used by test_server to determine
@@ -2120,7 +2136,8 @@ do_run_test(Tests, Skip, Opts) ->
%% tell test_server which modules should be cover compiled
%% note that actual compilation is done when tests start
test_server_ctrl:cover(CovApp, CovFile, CovExcl, CovIncl,
- CovCross, CovExport, CovLevel),
+ CovCross, CovExport, CovLevel,
+ Opts#opts.cover_stop),
%% save cover data (used e.g. to add nodes dynamically)
ct_util:set_testdata({cover,CovData}),
%% start cover on specified nodes
@@ -2584,6 +2601,9 @@ merge_arguments([LogDir={logdir,_}|Args], Merged) ->
merge_arguments([CoverFile={cover,_}|Args], Merged) ->
merge_arguments(Args, handle_arg(replace, CoverFile, Merged));
+merge_arguments([CoverStop={cover_stop,_}|Args], Merged) ->
+ merge_arguments(Args, handle_arg(replace, CoverStop, Merged));
+
merge_arguments([{'case',TC}|Args], Merged) ->
merge_arguments(Args, handle_arg(merge, {testcase,TC}, Merged));
diff --git a/lib/common_test/src/ct_slave.erl b/lib/common_test/src/ct_slave.erl
index aa3413fa89..cb05423497 100644
--- a/lib/common_test/src/ct_slave.erl
+++ b/lib/common_test/src/ct_slave.erl
@@ -1,7 +1,7 @@
%%--------------------------------------------------------------------
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -311,6 +311,13 @@ do_start(Host, Node, Options) ->
StartupTimeout = Options#options.startup_timeout,
Result = case wait_for_node_alive(ENode, BootTimeout) of
pong->
+ case test_server:is_cover() of
+ true ->
+ MainCoverNode = cover:get_main_node(),
+ rpc:call(MainCoverNode,cover,start,[ENode]);
+ false ->
+ ok
+ end,
call_functions(ENode, Functions2),
receive
{node_started, ENode}->
@@ -423,6 +430,13 @@ wait_for_node_alive(Node, N) ->
% call init:stop on a remote node
do_stop(ENode) ->
+ case test_server:is_cover() of
+ true ->
+ MainCoverNode = cover:get_main_node(),
+ rpc:call(MainCoverNode,cover,flush,[ENode]);
+ false ->
+ ok
+ end,
spawn(ENode, init, stop, []),
wait_for_node_dead(ENode, 5).
diff --git a/lib/common_test/src/ct_testspec.erl b/lib/common_test/src/ct_testspec.erl
index a8b67d0329..0221b87d49 100644
--- a/lib/common_test/src/ct_testspec.erl
+++ b/lib/common_test/src/ct_testspec.erl
@@ -903,6 +903,8 @@ handle_data(logdir,Node,Dir,Spec) ->
[{Node,ref2dir(Dir,Spec)}];
handle_data(cover,Node,File,Spec) ->
[{Node,get_absfile(File,Spec)}];
+handle_data(cover_stop,Node,Stop,_Spec) ->
+ [{Node,Stop}];
handle_data(include,Node,Dirs=[D|_],Spec) when is_list(D) ->
[{Node,ref2dir(Dir,Spec)} || Dir <- Dirs];
handle_data(include,Node,Dir=[Ch|_],Spec) when is_integer(Ch) ->
@@ -1258,6 +1260,8 @@ valid_terms() ->
{node,3},
{cover,2},
{cover,3},
+ {cover_stop,2},
+ {cover_stop,3},
{config,2},
{config,3},
{config,4},
diff --git a/lib/common_test/src/ct_util.hrl b/lib/common_test/src/ct_util.hrl
index 196b5e46d0..c9c6514fa4 100644
--- a/lib/common_test/src/ct_util.hrl
+++ b/lib/common_test/src/ct_util.hrl
@@ -38,6 +38,7 @@
verbosity=[],
silent_connections=[],
cover=[],
+ cover_stop=[],
config=[],
userconfig=[],
event_handler=[],
diff --git a/lib/common_test/src/cth_log_redirect.erl b/lib/common_test/src/cth_log_redirect.erl
index 77f57c6195..78ae70f37e 100644
--- a/lib/common_test/src/cth_log_redirect.erl
+++ b/lib/common_test/src/cth_log_redirect.erl
@@ -54,7 +54,7 @@ post_init_per_group(_Group, _Config, Result, State) ->
post_end_per_testcase(_TC, _Config, Result, State) ->
%% Make sure that the event queue is flushed
%% before ending this test case.
- gen_event:call(error_logger, ?MODULE, flush),
+ gen_event:call(error_logger, ?MODULE, flush, 300000),
{Result, State}.
pre_end_per_group(Group, Config, {ct_log, Group}) ->
diff --git a/lib/common_test/test/Makefile b/lib/common_test/test/Makefile
index 64e2cb6507..3de33688da 100644
--- a/lib/common_test/test/Makefile
+++ b/lib/common_test/test/Makefile
@@ -54,7 +54,8 @@ MODULES= \
ct_shell_SUITE \
ct_system_error_SUITE \
ct_snmp_SUITE \
- ct_group_leader_SUITE
+ ct_group_leader_SUITE \
+ ct_cover_SUITE
ERL_FILES= $(MODULES:%=%.erl)
@@ -108,7 +109,7 @@ release_spec: opt
release_tests_spec:
$(INSTALL_DIR) "$(RELSYSDIR)"
$(INSTALL_DATA) $(ERL_FILES) $(COVERFILE) "$(RELSYSDIR)"
- $(INSTALL_DATA) common_test.spec "$(RELSYSDIR)"
+ $(INSTALL_DATA) common_test.spec common_test.cover "$(RELSYSDIR)"
chmod -R u+w "$(RELSYSDIR)"
@tar cf - *_SUITE_data | (cd "$(RELSYSDIR)"; tar xf -)
diff --git a/lib/common_test/test/common_test.cover b/lib/common_test/test/common_test.cover
new file mode 100644
index 0000000000..66697854ea
--- /dev/null
+++ b/lib/common_test/test/common_test.cover
@@ -0,0 +1,10 @@
+%% -*- erlang -*-
+{incl_app,common_test,details}.
+{cross_apps,common_test,[erl2html2,
+ test_server,
+ test_server_ctrl,
+ test_server_gl,
+ test_server_h,
+ test_server_io,
+ test_server_node,
+ test_server_sup]}.
diff --git a/lib/common_test/test/ct_config_SUITE.erl b/lib/common_test/test/ct_config_SUITE.erl
index 0b1abae757..1e1df908db 100644
--- a/lib/common_test/test/ct_config_SUITE.erl
+++ b/lib/common_test/test/ct_config_SUITE.erl
@@ -251,6 +251,7 @@ expected_events(config_static_SUITE)->
?sok(test_alias_tclocal_nested,{14,0,{2,1}}),
?sok(test_alias_tclocal_nested_backward_compat,{15,0,{2,1}}),
?sok(test_alias_tclocal_nested_backward_compat_subvals,{16,0,{2,1}}),
+ ?sok(test_config_same_name_already_in_use,{17,0,{2,1}}),
{?eh,tc_start,{config_static_SUITE,end_per_suite}},
{?eh,tc_done,{config_static_SUITE,end_per_suite,ok}},
{?eh,test_done,{'DEF','STOP_TIME'}},
diff --git a/lib/common_test/test/ct_config_SUITE_data/config/test/config_static_SUITE.erl b/lib/common_test/test/ct_config_SUITE_data/config/test/config_static_SUITE.erl
index 2e1ad651e8..19f1dab4af 100644
--- a/lib/common_test/test/ct_config_SUITE_data/config/test/config_static_SUITE.erl
+++ b/lib/common_test/test/ct_config_SUITE_data/config/test/config_static_SUITE.erl
@@ -64,7 +64,8 @@ all() -> [test_get_config_simple, test_get_config_nested,
test_shadow_all,test_element,test_shadow_all_element,
test_internal_deep, test_alias_tclocal_nested,
test_alias_tclocal_nested_backward_compat,
- test_alias_tclocal_nested_backward_compat_subvals
+ test_alias_tclocal_nested_backward_compat_subvals,
+ test_config_same_name_already_in_use
].
init_per_testcase(_,Config) ->
@@ -124,6 +125,13 @@ test_config_name_already_in_use2(_) ->
ct:fail("Test should've been skipped, you shouldn't see this!"),
ok.
+
+test_config_same_name_already_in_use() ->
+ [].
+test_config_same_name_already_in_use(_) ->
+ ok = ct:require(x2,{gen_cfg,c}),
+ ok = ct:require(x2,{gen_cfg,c}).
+
%% test aliases
test_alias_tclocal() ->
[{require,newalias,gen_cfg}].
diff --git a/lib/common_test/test/ct_config_info_SUITE.erl b/lib/common_test/test/ct_config_info_SUITE.erl
index 40da377ee5..10fe8286dd 100644
--- a/lib/common_test/test/ct_config_info_SUITE.erl
+++ b/lib/common_test/test/ct_config_info_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2009-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2009-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -123,8 +123,7 @@ test_events(config_info) ->
{?eh,tc_done,{config_info_1_SUITE,init_per_suite,ok}},
[{?eh,tc_start,{config_info_1_SUITE,{init_per_group,g1,[]}}},
- {?eh,tc_done,{config_info_1_SUITE,
- {init_per_group,unknown,[]},
+ {?eh,tc_done,{config_info_1_SUITE,{init_per_group,g1,[]},
{failed,{timetrap_timeout,350}}}},
{?eh,tc_auto_skip,{config_info_1_SUITE,t11,
{failed,{config_info_1_SUITE,init_per_group,{timetrap_timeout,350}}}}},
@@ -136,14 +135,12 @@ test_events(config_info) ->
{?eh,tc_done,{config_info_1_SUITE,{init_per_group,g2,[]},ok}},
{?eh,tc_done,{config_info_1_SUITE,t21,ok}},
{?eh,tc_start,{config_info_1_SUITE,{end_per_group,g2,[]}}},
- {?eh,tc_done,{config_info_1_SUITE,
- {end_per_group,unknown,[]},
+ {?eh,tc_done,{config_info_1_SUITE,{end_per_group,g2,[]},
{failed,{timetrap_timeout,450}}}}],
[{?eh,tc_start,{config_info_1_SUITE,{init_per_group,g3,[]}}},
{?eh,tc_done,{config_info_1_SUITE,{init_per_group,g3,[]},ok}},
[{?eh,tc_start,{config_info_1_SUITE,{init_per_group,g4,[]}}},
- {?eh,tc_done,{config_info_1_SUITE,
- {init_per_group,unknown,[]},
+ {?eh,tc_done,{config_info_1_SUITE,{init_per_group,g4,[]},
{failed,{timetrap_timeout,400}}}},
{?eh,tc_auto_skip,{config_info_1_SUITE,t41,
{failed,{config_info_1_SUITE,init_per_group,
@@ -164,8 +161,7 @@ test_events(config_info) ->
{?eh,tc_done,{config_info_1_SUITE,{init_per_group,g5,[]},ok}},
{?eh,tc_done,{config_info_1_SUITE,t51,ok}},
{?eh,tc_start,{config_info_1_SUITE,{end_per_group,g5,[]}}},
- {?eh,tc_done,{config_info_1_SUITE,
- {end_per_group,unknown,[]},
+ {?eh,tc_done,{config_info_1_SUITE,{end_per_group,g5,[]},
{failed,{timetrap_timeout,400}}}}],
{?eh,tc_start,{config_info_1_SUITE,{end_per_group,g3,[]}}},
{?eh,tc_done,{config_info_1_SUITE,{end_per_group,g3,[]},ok}}],
diff --git a/lib/common_test/test/ct_cover_SUITE.erl b/lib/common_test/test/ct_cover_SUITE.erl
new file mode 100644
index 0000000000..bebfce70d0
--- /dev/null
+++ b/lib/common_test/test/ct_cover_SUITE.erl
@@ -0,0 +1,271 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2012. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%%-------------------------------------------------------------------
+%%% File: ct_cover_SUITE
+%%%
+%%% Description:
+%%% Test code cover analysis support
+%%%
+%%%-------------------------------------------------------------------
+-module(ct_cover_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("common_test/include/ct_event.hrl").
+
+-define(eh, ct_test_support_eh).
+-define(suite, cover_SUITE).
+-define(mod, cover_test_mod).
+
+%%--------------------------------------------------------------------
+%% TEST SERVER CALLBACK FUNCTIONS
+%%--------------------------------------------------------------------
+
+%%--------------------------------------------------------------------
+%% Description: Since Common Test starts another Test Server
+%% instance, the tests need to be performed on a separate node (or
+%% there will be clashes with logging processes etc).
+%%--------------------------------------------------------------------
+init_per_suite(Config) ->
+ case test_server:is_cover() of
+ true ->
+ {skip,"Test server is running cover already - skipping"};
+ false ->
+ ct_test_support:init_per_suite(Config)
+ end.
+
+end_per_suite(Config) ->
+ ct_test_support:end_per_suite(Config).
+
+init_per_testcase(TestCase, Config) ->
+ ct_test_support:init_per_testcase(TestCase, Config).
+
+end_per_testcase(TestCase, Config) ->
+ Node = fullname(existing_node),
+ case lists:member(Node,nodes()) of
+ true -> rpc:call(Node,erlang,halt,[]);
+ false -> ok
+ end,
+ ct_test_support:end_per_testcase(TestCase, Config).
+
+suite() -> [{ct_hooks,[ts_install_cth]}].
+
+all() ->
+ [
+ default,
+ cover_stop_true,
+ cover_stop_false,
+ slave,
+ slave_start_slave,
+ cover_node_option,
+ ct_cover_add_remove_nodes,
+ otp_9956
+ ].
+
+%%--------------------------------------------------------------------
+%% TEST CASES
+%%--------------------------------------------------------------------
+
+%% Check that cover is collected from test node
+%% Also check that cover is by default stopped after test is completed
+default(Config) ->
+ {ok,Events} = run_test(default,Config),
+ false = check_cover(Config),
+ check_calls(Events,1),
+ ok.
+
+%% Check that cover is stopped when cover_stop option is set to true
+cover_stop_true(Config) ->
+ {ok,_Events} = run_test(cover_stop_true,[{cover_stop,true}],Config),
+ false = check_cover(Config).
+
+%% Check that cover is not stopped when cover_stop option is set to false
+cover_stop_false(Config) ->
+ {ok,_Events} = run_test(cover_stop_false,[{cover_stop,false}],Config),
+ {true,[],[?mod]} = check_cover(Config),
+ CTNode = proplists:get_value(ct_node, Config),
+ ok = rpc:call(CTNode,cover,stop,[]),
+ false = check_cover(Config),
+ ok.
+
+%% Let test node start a slave node - check that cover is collected
+%% from both nodes
+slave(Config) ->
+ {ok,Events} = run_test(slave,slave,[],Config),
+ check_calls(Events,2),
+ ok.
+
+%% Let test node start a slave node which in turn starts another slave
+%% node - check that cover is collected from all three nodes
+slave_start_slave(Config) ->
+ {ok,Events} = run_test(slave_start_slave,slave_start_slave,[],Config),
+ check_calls(Events,3),
+ ok.
+
+%% Start a slave node before test starts - the node is listed in cover
+%% spec file.
+%% Check that cover is collected from test node and slave node.
+cover_node_option(Config) ->
+ {ok, HostStr}=inet:gethostname(),
+ Host = list_to_atom(HostStr),
+ DataDir = ?config(data_dir,Config),
+ {ok,Node} = ct_slave:start(Host,existing_node,
+ [{erl_flags,"-pa " ++ DataDir}]),
+ false = check_cover(Node),
+ CoverSpec = default_cover_file_content() ++ [{nodes,[Node]}],
+ CoverFile = create_cover_file(cover_node_option,CoverSpec,Config),
+ {ok,Events} = run_test(cover_node_option,cover_node_option,
+ [{cover,CoverFile}],Config),
+ check_calls(Events,2),
+ {ok,Node} = ct_slave:stop(existing_node),
+ ok.
+
+%% Test ct_cover:add_nodes/1 and ct_cover:remove_nodes/1
+%% Check that cover is collected from added node
+ct_cover_add_remove_nodes(Config) ->
+ {ok, HostStr}=inet:gethostname(),
+ Host = list_to_atom(HostStr),
+ DataDir = ?config(data_dir,Config),
+ {ok,Node} = ct_slave:start(Host,existing_node,
+ [{erl_flags,"-pa " ++ DataDir}]),
+ false = check_cover(Node),
+ {ok,Events} = run_test(ct_cover_add_remove_nodes,ct_cover_add_remove_nodes,
+ [],Config),
+ check_calls(Events,2),
+ {ok,Node} = ct_slave:stop(existing_node),
+ ok.
+
+%% Test that the test suite itself can be cover compiled and that
+%% data_dir is set correctly (OTP-9956)
+otp_9956(Config) ->
+ CoverFile = create_cover_file(otp_9956,[{incl_mods,[?suite]}],Config),
+ {ok,Events} = run_test(otp_9956,otp_9956,[{cover,CoverFile}],Config),
+ check_calls(Events,{?suite,otp_9956,1},1),
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% HELP FUNCTIONS
+%%%-----------------------------------------------------------------
+run_test(Label,Config) ->
+ run_test(Label,[],Config).
+run_test(Label,ExtraOpts,Config) ->
+ run_test(Label,default,ExtraOpts,Config).
+run_test(Label,Testcase,ExtraOpts,Config) ->
+ DataDir = ?config(data_dir, Config),
+ Suite = filename:join(DataDir, ?suite),
+ CoverFile =
+ case proplists:get_value(cover,ExtraOpts) of
+ undefined ->
+ create_default_cover_file(Label,Config);
+ CF ->
+ CF
+ end,
+ RestOpts = lists:keydelete(cover,1,ExtraOpts),
+ {Opts,ERPid} = setup([{suite,Suite},{testcase,Testcase},
+ {cover,CoverFile},{label,Label}] ++ RestOpts, Config),
+ execute(Label, Testcase, Opts, ERPid, Config).
+
+setup(Test, Config) ->
+ Opts0 = ct_test_support:get_opts(Config),
+ Level = ?config(trace_level, Config),
+ EvHArgs = [{cbm,ct_test_support},{trace_level,Level}],
+ Opts = Opts0 ++ [{event_handler,{?eh,EvHArgs}}|Test],
+ ERPid = ct_test_support:start_event_receiver(Config),
+ {Opts,ERPid}.
+
+execute(Name, Testcase, Opts, ERPid, Config) ->
+ ok = ct_test_support:run(Opts, Config),
+ Events = ct_test_support:get_events(ERPid, Config),
+
+ ct_test_support:log_events(Name,
+ reformat(Events, ?eh),
+ ?config(priv_dir, Config),
+ Opts),
+ TestEvents = events_to_check(Testcase),
+ R = ct_test_support:verify_events(TestEvents, Events, Config),
+ {R,Events}.
+
+reformat(Events, EH) ->
+ ct_test_support:reformat(Events, EH).
+
+events_to_check(Testcase) ->
+ OneTest =
+ [{?eh,start_logging,{'DEF','RUNDIR'}}] ++
+ [{?eh,tc_done,{?suite,Testcase,ok}}] ++
+ [{?eh,stop_logging,[]}],
+
+ %% 2 tests (ct:run_test + script_start) is default
+ OneTest ++ OneTest.
+
+check_cover(Config) when is_list(Config) ->
+ CTNode = proplists:get_value(ct_node, Config),
+ check_cover(CTNode);
+check_cover(Node) when is_atom(Node) ->
+ case rpc:call(Node,test_server,is_cover,[]) of
+ true ->
+ {true,
+ rpc:call(Node,cover,which_nodes,[]),
+ rpc:call(Node,cover,modules,[])};
+ false ->
+ false
+ end.
+
+%% Check that each coverlog includes N calls to ?mod:foo/0
+check_calls(Events,N) ->
+ check_calls(Events,{?mod,foo,0},N).
+check_calls(Events,MFA,N) ->
+ CoverLogs =
+ [filename:join(filename:dirname(TCLog),"all.coverdata") ||
+ {ct_test_support_eh,
+ {event,tc_logfile,ct@falco,
+ {{?suite,init_per_suite},TCLog}}} <- Events],
+ do_check_logs(CoverLogs,MFA,N).
+
+do_check_logs([CoverLog|CoverLogs],{Mod,_,_} = MFA,N) ->
+ {ok,_} = cover:start(),
+ ok = cover:import(CoverLog),
+ {ok,Calls} = cover:analyse(Mod,calls,function),
+ ok = cover:stop(),
+ {MFA,N} = lists:keyfind(MFA,1,Calls),
+ do_check_logs(CoverLogs,MFA,N);
+do_check_logs([],_,_) ->
+ ok.
+
+fullname(Name) ->
+ {ok,Host} = inet:gethostname(),
+ list_to_atom(atom_to_list(Name) ++ "@" ++ Host).
+
+default_cover_file_content() ->
+ [{incl_mods,[?mod]}].
+
+create_default_cover_file(Filename,Config) ->
+ create_cover_file(Filename,default_cover_file_content(),Config).
+
+create_cover_file(Filename,Terms,Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ File = filename:join(PrivDir,Filename) ++ ".cover",
+ {ok,Fd} = file:open(File,[write]),
+ lists:foreach(fun(Term) ->
+ file:write(Fd,io_lib:format("~p.~n",[Term]))
+ end,Terms),
+ ok = file:close(Fd),
+ File.
diff --git a/lib/common_test/test/ct_cover_SUITE_data/cover_SUITE.erl b/lib/common_test/test/ct_cover_SUITE_data/cover_SUITE.erl
new file mode 100644
index 0000000000..fdc3323f0a
--- /dev/null
+++ b/lib/common_test/test/ct_cover_SUITE_data/cover_SUITE.erl
@@ -0,0 +1,156 @@
+%%--------------------------------------------------------------------
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2012. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+%%----------------------------------------------------------------------
+%% File: cover_SUITE.erl
+%%
+%% Description:
+%% This file contains the test cases for the code coverage support
+%%
+%% @author Support
+%% @doc Test of code coverage support in common_test
+%% @end
+%%----------------------------------------------------------------------
+%%----------------------------------------------------------------------
+-module(cover_SUITE).
+-include_lib("common_test/include/ct.hrl").
+
+-compile(export_all).
+
+%% Default timetrap timeout (set in init_per_testcase).
+-define(default_timeout, ?t:minutes(1)).
+
+suite() ->
+ [].
+
+all() ->
+ [].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(Config) ->
+ Config.
+
+init_per_testcase(_Case, Config) ->
+ Dog = test_server:timetrap(?default_timeout),
+ [{watchdog, Dog}|Config].
+
+end_per_testcase(Case, Config) ->
+ %% try apply(?MODULE,Case,[cleanup,Config])
+ %% catch error:undef -> ok
+ %% end,
+
+ kill_slaves(Case,nodes()),
+ Dog=?config(watchdog, Config),
+ test_server:timetrap_cancel(Dog),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Test cases
+break(_Config) ->
+ test_server:break(""),
+ ok.
+
+default(Config) ->
+ cover_compiled = code:which(cover_test_mod),
+ cover_test_mod:foo(),
+ ok.
+
+slave(Config) ->
+ cover_compiled = code:which(cover_test_mod),
+ cover_test_mod:foo(),
+ N1 = nodename(slave,1),
+ {ok,Node} = ct_slave:start(N1),
+ cover_compiled = rpc:call(Node,code,which,[cover_test_mod]),
+ rpc:call(Node,cover_test_mod,foo,[]),
+ {ok,Node} = ct_slave:stop(N1),
+ ok.
+
+slave_start_slave(Config) ->
+ cover_compiled = code:which(cover_test_mod),
+ cover_test_mod:foo(),
+ N1 = nodename(slave_start_slave,1),
+ N2 = nodename(slave_start_slave,2),
+ {ok,Node} = ct_slave:start(N1),
+ cover_compiled = rpc:call(Node,code,which,[cover_test_mod]),
+ rpc:call(Node,cover_test_mod,foo,[]),
+ {ok,Node2} = rpc:call(Node,ct_slave,start,[N2]),
+ rpc:call(Node2,cover_test_mod,foo,[]),
+ {ok,Node2} = rpc:call(Node,ct_slave,stop,[N2]),
+ {ok,Node} = ct_slave:stop(N1),
+ ok.
+
+cover_node_option(Config) ->
+ cover_compiled = code:which(cover_test_mod),
+ cover_test_mod:foo(),
+ Node = fullname(existing_node),
+ cover_compiled = rpc:call(Node,code,which,[cover_test_mod]),
+ rpc:call(Node,cover_test_mod,foo,[]),
+ ok.
+
+ct_cover_add_remove_nodes(Config) ->
+ cover_compiled = code:which(cover_test_mod),
+ cover_test_mod:foo(),
+ Node = fullname(existing_node),
+ Beam = rpc:call(Node,code,which,[cover_test_mod]),
+ false = (Beam == cover_compiled),
+
+ rpc:call(Node,cover_test_mod,foo,[]), % should not be collected
+ {ok,[Node]} = ct_cover:add_nodes([Node]),
+ cover_compiled = rpc:call(Node,code,which,[cover_test_mod]),
+ rpc:call(Node,cover_test_mod,foo,[]), % should be collected
+ ok = ct_cover:remove_nodes([Node]),
+ rpc:call(Node,cover_test_mod,foo,[]), % should not be collected
+
+ Beam = rpc:call(Node,code,which,[cover_test_mod]),
+
+ ok.
+
+otp_9956(Config) ->
+ cover_compiled = code:which(?MODULE),
+ DataDir = ?config(data_dir,Config),
+ absolute = filename:pathtype(DataDir),
+ true = filelib:is_dir(DataDir),
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% Internal
+nodename(Case,N) ->
+ list_to_atom(nodeprefix(Case) ++ integer_to_list(N)).
+
+nodeprefix(Case) ->
+ atom_to_list(?MODULE) ++ "_" ++ atom_to_list(Case) ++ "_node".
+
+
+fullname(Name) ->
+ {ok,Host} = inet:gethostname(),
+ list_to_atom(atom_to_list(Name) ++ "@" ++ Host).
+
+kill_slaves(Case, [Node|Nodes]) ->
+ Prefix = nodeprefix(Case),
+ case lists:prefix(Prefix,atom_to_list(Node)) of
+ true ->
+ rpc:call(Node,erlang,halt,[]);
+ _ ->
+ ok
+ end,
+ kill_slaves(Case,Nodes);
+kill_slaves(_,[]) ->
+ ok.
diff --git a/lib/common_test/test/ct_cover_SUITE_data/cover_SUITE_data/.gitignore b/lib/common_test/test/ct_cover_SUITE_data/cover_SUITE_data/.gitignore
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/lib/common_test/test/ct_cover_SUITE_data/cover_SUITE_data/.gitignore
diff --git a/lib/common_test/test/ct_cover_SUITE_data/cover_test_mod.erl b/lib/common_test/test/ct_cover_SUITE_data/cover_test_mod.erl
new file mode 100644
index 0000000000..d4f69452c3
--- /dev/null
+++ b/lib/common_test/test/ct_cover_SUITE_data/cover_test_mod.erl
@@ -0,0 +1,4 @@
+-module(cover_test_mod).
+-compile(export_all).
+foo() ->
+ ok.
diff --git a/lib/common_test/test/ct_error_SUITE.erl b/lib/common_test/test/ct_error_SUITE.erl
index 338e76264e..6d90b29f41 100644
--- a/lib/common_test/test/ct_error_SUITE.erl
+++ b/lib/common_test/test/ct_error_SUITE.erl
@@ -61,7 +61,7 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[cfg_error, lib_error, no_compile, timetrap_end_conf,
timetrap_normal, timetrap_extended, timetrap_parallel,
- timetrap_fun, misc_errors].
+ timetrap_fun, timetrap_fun_group, misc_errors].
groups() ->
[].
@@ -251,6 +251,24 @@ timetrap_fun(Config) when is_list(Config) ->
%%%-----------------------------------------------------------------
%%%
+timetrap_fun_group(Config) when is_list(Config) ->
+ DataDir = ?config(data_dir, Config),
+ Join = fun(D, S) -> filename:join(D, "error/test/"++S) end,
+ Suites = [Join(DataDir, "timetrap_8_SUITE")],
+ {Opts,ERPid} = setup([{suite,Suites}], Config),
+ ok = ct_test_support:run(Opts, Config),
+ Events = ct_test_support:get_events(ERPid, Config),
+
+ ct_test_support:log_events(timetrap_fun_group,
+ reformat(Events, ?eh),
+ ?config(priv_dir, Config),
+ Opts),
+
+ TestEvents = events_to_check(timetrap_fun_group),
+ ok = ct_test_support:verify_events(TestEvents, Events, Config).
+
+%%%-----------------------------------------------------------------
+%%%
misc_errors(Config) when is_list(Config) ->
DataDir = ?config(data_dir, Config),
Join = fun(D, S) -> filename:join(D, "error/test/"++S) end,
@@ -429,8 +447,7 @@ test_events(cfg_error) ->
{'EXIT',{init_per_group_fails,g1}}}}}}],
[{?eh,tc_start,{cfg_error_8_SUITE,{init_per_group,g2,[]}}},
- {?eh,tc_done,{cfg_error_8_SUITE,
- {init_per_group,unknown,[]},
+ {?eh,tc_done,{cfg_error_8_SUITE,{init_per_group,g2,[]},
{failed,{timetrap_timeout,2000}}}},
{?eh,tc_auto_skip,{cfg_error_8_SUITE,tc1,
{failed,{cfg_error_8_SUITE,init_per_group,
@@ -500,7 +517,7 @@ test_events(cfg_error) ->
{?eh,tc_done,{cfg_error_8_SUITE,tc1,ok}},
{?eh,test_stats,{9,0,{0,14}}},
{?eh,tc_start,{cfg_error_8_SUITE,{end_per_group,g12,[]}}},
- {?eh,tc_done,{cfg_error_8_SUITE,{end_per_group,unknown,[]},
+ {?eh,tc_done,{cfg_error_8_SUITE,{end_per_group,g12,[]},
{failed,{timetrap_timeout,2000}}}}],
{?eh,tc_start,{cfg_error_8_SUITE,end_per_suite}},
@@ -971,11 +988,423 @@ test_events(timetrap_fun) ->
{?eh,stop_logging,[]}
];
+test_events(timetrap_fun_group) ->
+ [
+ {?eh,start_logging,{'DEF','RUNDIR'}},
+ {?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}},
+ {?eh,start_info,{1,1,58}},
+ {?eh,tc_start,{timetrap_8_SUITE,init_per_suite}},
+ {?eh,tc_done,{timetrap_8_SUITE,init_per_suite,ok}},
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g0,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g0,[]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_timeout,{'$approx',1000}}}}},
+ {?eh,test_stats,{0,1,{0,0}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{0,2,{0,0}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,g0,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,g0,[]},ok}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g1,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g1,[]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_timeout,{'$approx',1000}}}}},
+ {?eh,test_stats,{0,3,{0,0}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{0,4,{0,0}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,g1,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,g1,[]},ok}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g2,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g2,[]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc1}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc1,
+ {failed,{timetrap_timeout,{'$approx',1000}}}}},
+ {?eh,test_stats,{0,5,{0,0}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{0,6,{0,0}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,g2,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,g2,[]},ok}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g3,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g3,[]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc4}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc4,
+ {failed,{timetrap_timeout,{'$approx',2000}}}}},
+ {?eh,test_stats,{0,7,{0,0}}},
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g1,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g1,[]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_timeout,{'$approx',1000}}}}},
+ {?eh,test_stats,{0,8,{0,0}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{0,9,{0,0}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,g1,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,g1,[]},ok}}],
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g2,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g2,[]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc1}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc1,
+ {failed,{timetrap_timeout,{'$approx',1000}}}}},
+ {?eh,test_stats,{0,10,{0,0}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{0,11,{0,0}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,g2,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,g2,[]},ok}}],
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,g3,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,g3,[]},ok}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g4,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g4,[]},
+ {user_timetrap_error,{kaboom,'_'}}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}},
+ {?eh,test_stats,{0,11,{0,1}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}},
+ {?eh,test_stats,{0,11,{0,2}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,end_per_group,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g5,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g5,[]},
+ {user_timetrap_error,{kaboom,'_'}}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}},
+ {?eh,test_stats,{0,11,{0,3}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}},
+ {?eh,test_stats,{0,11,{0,4}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,end_per_group,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g6,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g6,[]},
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {timetrap_timeout,'_'}}}}},
+ {?eh,test_stats,{0,11,{0,5}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {timetrap_timeout,'_'}}}}},
+ {?eh,test_stats,{0,11,{0,6}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,end_per_group,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {timetrap_timeout,'_'}}}}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g7,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g7,[]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc5}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc5,ok}},
+ {?eh,test_stats,{1,11,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,g7,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,g7,[]},
+ {user_timetrap_error,{kaboom,'_'}}}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g8,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g8,[]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc5}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc5,ok}},
+ {?eh,test_stats,{2,11,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,g8,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,g8,[]},
+ {failed,{timetrap_timeout,{'$approx',500}}}}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g9,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g9,[]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc5}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc5,ok}},
+ {?eh,test_stats,{3,11,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {user_timetrap_error,{kaboom,'_'}}}},
+ {?eh,test_stats,{3,12,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,g9,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,g9,[]},ok}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g10,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g10,[]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {user_timetrap_error,{kaboom,'_'}}}},
+ {?eh,test_stats,{3,13,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc5}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc5,ok}},
+ {?eh,test_stats,{4,13,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,g10,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,g10,[]},ok}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,g11,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,g11,[]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc3}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc3,
+ {failed,{timetrap_timeout,{'$approx',4000}}}}},
+ {?eh,test_stats,{4,14,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{4,15,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,g11,[]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,g11,[]},ok}}],
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg0,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg0,[parallel]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_timeout,{'$approx',1000}}}}},
+ {?eh,test_stats,{4,16,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{4,17,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,pg0,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,pg0,[parallel]},ok}}]},
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg1,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg1,[parallel]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_timeout,{'$approx',1000}}}}},
+ {?eh,test_stats,{4,18,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{4,19,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,pg1,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,pg1,[parallel]},ok}}]},
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg2,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg2,[parallel]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc1}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc1,
+ {failed,{timetrap_timeout,{'$approx',1000}}}}},
+ {?eh,test_stats,{4,20,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{4,21,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,pg2,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,pg2,[parallel]},ok}}]},
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg3,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg3,[parallel]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc4}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc4,
+ {failed,{timetrap_timeout,{'$approx',2000}}}}},
+ {?eh,test_stats,{4,22,{0,6}}},
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg1,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg1,[parallel]},
+ ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_timeout,{'$approx',1000}}}}},
+ {?eh,test_stats,{4,23,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{4,24,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,pg1,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,pg1,[parallel]},
+ ok}}]},
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg2,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg2,[parallel]},
+ ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc1}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc1,
+ {failed,{timetrap_timeout,{'$approx',1000}}}}},
+ {?eh,test_stats,{4,25,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{4,26,{0,6}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,pg2,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,pg2,[parallel]},
+ ok}}]},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,pg3,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,pg3,[parallel]},ok}}]},
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg4,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg4,[parallel]},
+ {user_timetrap_error,{kaboom,'_'}}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}},
+ {?eh,test_stats,{4,26,{0,7}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}},
+ {?eh,test_stats,{4,26,{0,8}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,end_per_group,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}}]},
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg5,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg5,[parallel]},
+ {user_timetrap_error,{kaboom,'_'}}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}},
+ {?eh,test_stats,{4,26,{0,9}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}},
+ {?eh,test_stats,{4,26,{0,10}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,end_per_group,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {user_timetrap_error,{kaboom,'_'}}}}}}]},
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg6,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg6,[parallel]},
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {timetrap_timeout,'_'}}}}},
+ {?eh,test_stats,{4,26,{0,11}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {timetrap_timeout,'_'}}}}},
+ {?eh,test_stats,{4,26,{0,12}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,end_per_group,
+ {failed,{timetrap_8_SUITE,init_per_group,
+ {timetrap_timeout,'_'}}}}}]},
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg7,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg7,[parallel]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc5}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc5,ok}},
+ {?eh,test_stats,{5,26,{0,12}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,pg7,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,pg7,[parallel]},
+ {user_timetrap_error,{kaboom,'_'}}}}]},
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg8,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg8,[parallel]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc5}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc5,ok}},
+ {?eh,test_stats,{6,26,{0,12}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,pg8,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,pg8,[parallel]},
+ {failed,{timetrap_timeout,{'$approx',500}}}}}]},
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg9,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg9,[parallel]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc5}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc5,ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {user_timetrap_error,{kaboom,'_'}}}},
+ %% Due to parallelism only checking final test stat in group
+ {?eh,test_stats,{7,27,{0,12}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,pg9,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,pg9,[parallel]},ok}}]},
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg10,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg10,[parallel]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {user_timetrap_error,{kaboom,'_'}}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc5}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc5,ok}},
+ %% Due to parallelism only checking final test stat in group
+ {?eh,test_stats,{8,28,{0,12}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,pg10,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,pg10,[parallel]},ok}}]},
+
+ {parallel,
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,pg11,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,pg11,[parallel]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc3}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc3,
+ {failed,{timetrap_timeout,{'$approx',4000}}}}},
+ {?eh,test_stats,{8,29,{0,12}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc2}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_timeout,{'$approx',500}}}}},
+ {?eh,test_stats,{8,30,{0,12}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,pg11,[parallel]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,pg11,[parallel]},ok}}]},
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,sg1,[sequence]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,sg1,[sequence]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc5}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc5,ok}},
+ {?eh,test_stats,{9,30,{0,12}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {user_timetrap_error,{kaboom,'_'}}}},
+ {?eh,test_stats,{9,31,{0,12}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc1,
+ {failed,{timetrap_8_SUITE,tc0}}}},
+ {?eh,test_stats,{9,31,{0,13}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_8_SUITE,tc0}}}},
+ {?eh,test_stats,{9,31,{0,14}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,sg1,[sequence]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,sg1,[sequence]},ok}}],
+
+ [{?eh,tc_start,{timetrap_8_SUITE,{init_per_group,sg2,[sequence]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{init_per_group,sg2,[sequence]},ok}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc5}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc5,ok}},
+ {?eh,test_stats,{10,31,{0,14}}},
+ {?eh,tc_start,{timetrap_8_SUITE,tc0}},
+ {?eh,tc_done,{timetrap_8_SUITE,tc0,
+ {failed,{timetrap_timeout,{'$approx',1000}}}}},
+ {?eh,test_stats,{10,32,{0,14}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc1,
+ {failed,{timetrap_8_SUITE,tc0}}}},
+ {?eh,test_stats,{10,32,{0,15}}},
+ {?eh,tc_auto_skip,{timetrap_8_SUITE,tc2,
+ {failed,{timetrap_8_SUITE,tc0}}}},
+ {?eh,test_stats,{10,32,{0,16}}},
+ {?eh,tc_start,{timetrap_8_SUITE,{end_per_group,sg2,[sequence]}}},
+ {?eh,tc_done,{timetrap_8_SUITE,{end_per_group,sg2,[sequence]},ok}}],
+
+ {?eh,tc_start,{timetrap_8_SUITE,end_per_suite}},
+ {?eh,tc_done,{timetrap_8_SUITE,end_per_suite,ok}},
+ {?eh,test_done,{'DEF','STOP_TIME'}},
+ {?eh,stop_logging,[]}
+ ];
+
test_events(misc_errors) ->
[
{?eh,start_logging,{'DEF','RUNDIR'}},
{?eh,test_start,{'DEF',{'START_TIME','LOGDIR'}}},
- {?eh,start_info,{1,1,7}},
+ {?eh,start_info,{1,1,9}},
{?eh,tc_start,{misc_error_1_SUITE,ct_fail_1}},
{?eh,tc_done,{misc_error_1_SUITE,ct_fail_1,
{failed,{error,{test_case_failed,{error,this_is_expected}}}}}},
@@ -1002,7 +1431,12 @@ test_events(misc_errors) ->
{?eh,tc_start,{misc_error_1_SUITE,killed_by_signal_2}},
{?eh,tc_done,{misc_error_1_SUITE,killed_by_signal_2,
{failed,testcase_aborted_or_killed}}},
- {?eh,test_stats,{0,7,{0,0}}},
+ {parallel,
+ [{?eh,tc_start,{misc_error_1_SUITE,p1}},
+ {?eh,tc_done,{misc_error_1_SUITE,p1,ok}},
+ {?eh,tc_start,{misc_error_1_SUITE,p2}},
+ {?eh,tc_done,{misc_error_1_SUITE,p2,ok}}]},
+ {?eh,test_stats,{2,7,{0,0}}},
{?eh,test_done,{'DEF','STOP_TIME'}},
{?eh,stop_logging,[]}
].
diff --git a/lib/common_test/test/ct_error_SUITE_data/error/test/misc_error_1_SUITE.erl b/lib/common_test/test/ct_error_SUITE_data/error/test/misc_error_1_SUITE.erl
index 99c3ed05ec..61f3fa7e59 100644
--- a/lib/common_test/test/ct_error_SUITE_data/error/test/misc_error_1_SUITE.erl
+++ b/lib/common_test/test/ct_error_SUITE_data/error/test/misc_error_1_SUITE.erl
@@ -96,7 +96,7 @@ end_per_testcase(_TestCase, _Config) ->
%% N = integer() | forever
%%--------------------------------------------------------------------
groups() ->
- [].
+ [{p,[parallel],[p1,p2]}].
%%--------------------------------------------------------------------
%% Function: all() -> GroupsAndTestCases | {skip,Reason}
@@ -107,7 +107,8 @@ groups() ->
%%--------------------------------------------------------------------
all() ->
[ct_fail_1, ct_fail_2, ct_fail_3, ts_fail_1, ts_fail_2,
- killed_by_signal_1, killed_by_signal_2].
+ killed_by_signal_1, killed_by_signal_2,
+ {group,p}].
ct_fail_1(_) ->
ct:fail({error,this_is_expected}),
@@ -152,3 +153,10 @@ killed_by_signal_2(_) ->
end),
ct:sleep(1000),
exit(this_should_not_be_seen).
+
+p1(_) ->
+ {error,parallel_group} = ct:abort_current_testcase(aborted),
+ ok.
+
+p2(_) ->
+ receive after 1000 -> ok end.
diff --git a/lib/common_test/test/ct_error_SUITE_data/error/test/timetrap_8_SUITE.erl b/lib/common_test/test/ct_error_SUITE_data/error/test/timetrap_8_SUITE.erl
new file mode 100644
index 0000000000..ff138f38b5
--- /dev/null
+++ b/lib/common_test/test/ct_error_SUITE_data/error/test/timetrap_8_SUITE.erl
@@ -0,0 +1,258 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2012. All Rights Reserved.
+%%
+%% The contents of this file are subject to the Erlang Public License,
+%% Version 1.1, (the "License"); you may not use this file except in
+%% compliance with the License. You should have received a copy of the
+%% Erlang Public License along with this software. If not, it can be
+%% retrieved online at http://www.erlang.org/.
+%%
+%% Software distributed under the License is distributed on an "AS IS"
+%% basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+%% the License for the specific language governing rights and limitations
+%% under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(timetrap_8_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+
+-define(TO, 4).
+
+%%--------------------------------------------------------------------
+%% Function: suite() -> Info
+%% Info = [tuple()]
+%%--------------------------------------------------------------------
+suite() ->
+ [{timetrap,{timetrap_utils,timetrap_val,[{seconds,?TO}]}}].
+
+%%--------------------------------------------------------------------
+%% Function: init_per_suite(Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%%--------------------------------------------------------------------
+init_per_suite(Config) ->
+ Config.
+
+%%--------------------------------------------------------------------
+%% Function: end_per_suite(Config0) -> void() | {save_config,Config1}
+%% Config0 = Config1 = [tuple()]
+%%--------------------------------------------------------------------
+end_per_suite(_Config) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% Function: init_per_group(GroupName, Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+%% GroupName = atom()
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%%--------------------------------------------------------------------
+init_per_group(G6, Config) when G6==g6; G6==pg6 ->
+ ct:sleep({seconds,1}),
+ Config;
+init_per_group(_GroupName, Config) ->
+ Config.
+
+%%--------------------------------------------------------------------
+%% Function: end_per_group(GroupName, Config0) ->
+%% void() | {save_config,Config1}
+%% GroupName = atom()
+%% Config0 = Config1 = [tuple()]
+%%--------------------------------------------------------------------
+end_per_group(G7or8, _Config) when G7or8==g7; G7or8==pg7; G7or8==g8; G7or8==pg8 ->
+ ct:sleep({seconds,5}),
+ ok;
+end_per_group(_GroupName, _Config) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% Function: init_per_testcase(TestCase, Config0) ->
+%% Config1 | {skip,Reason} | {skip_and_save,Reason,Config1}
+%% TestCase = atom()
+%% Config0 = Config1 = [tuple()]
+%% Reason = term()
+%%--------------------------------------------------------------------
+init_per_testcase(_, Config) ->
+ Config.
+
+%%--------------------------------------------------------------------
+%% Function: end_per_testcase(TestCase, Config0) ->
+%% void() | {save_config,Config1}
+%% TestCase = atom()
+%% Config0 = Config1 = [tuple()]
+%%--------------------------------------------------------------------
+end_per_testcase(_, _Config) ->
+ ok.
+
+%%--------------------------------------------------------------------
+%% Function: groups() -> [Group]
+%% Group = {GroupName,Properties,GroupsAndTestCases}
+%% GroupName = atom()
+%% Properties = [parallel | sequence | Shuffle | {RepeatType,N}]
+%% GroupsAndTestCases = [Group | {group,GroupName} | TestCase]
+%% TestCase = atom()
+%% Shuffle = shuffle | {shuffle,{integer(),integer(),integer()}}
+%% RepeatType = repeat | repeat_until_all_ok | repeat_until_all_fail |
+%% repeat_until_any_ok | repeat_until_any_fail
+%% N = integer() | forever
+%%--------------------------------------------------------------------
+groups() ->
+ [
+ {g0,[],[tc0,tc2]}, % group override suite and tc overrides group
+ {g1,[],[tc0,tc2]}, % group override suite and tc overrides group
+ {g2,[],[tc1,tc2]}, % tc override group
+ {g3,[],[tc4,{group,g1},{group,g2}]}, % subgroup override group
+ {g4,[],[tc0,tc2]}, % exit during init_per_group
+ {g5,[],[tc0,tc2]}, % exit during init_per_group
+ {g6,[],[tc0,tc2]}, % timeout during init_per_group
+ {g7,[],[tc5]}, % exit during end_per_group
+ {g8,[],[tc5]}, % timeout during end_per_group
+ {g9,[],[tc5,tc0]}, % exit during testcase
+ {g10,[],[tc0,tc5]}, % exit during testcase
+ {g11,[],[tc3,tc2]}, % suite is valid if nothing else is specified
+ {pg0,[parallel],[tc0,tc2]}, % group override suite and tc overrides group
+ {pg1,[parallel],[tc0,tc2]}, % group override suite and tc overrides group
+ {pg2,[parallel],[tc1,tc2]}, % tc override group
+ {pg3,[parallel],[tc4,{group,pg1},{group,pg2}]}, % subgroup override group
+ {pg4,[parallel],[tc0,tc2]}, % exit during init_per_group
+ {pg5,[parallel],[tc0,tc2]}, % exit during init_per_group
+ {pg6,[parallel],[tc0,tc2]}, % timeout during init_per_group
+ {pg7,[parallel],[tc5]}, % exit during end_per_group
+ {pg8,[parallel],[tc5]}, % timeout during end_per_group
+ {pg9,[parallel],[tc5,tc0]}, % exit during testcase
+ {pg10,[parallel],[tc0,tc5]},% exit during testcase
+ {pg11,[parallel],[tc3,tc2]},% suite is valid if nothing else is specified
+ {sg1,[sequence],[tc5,tc0,tc1,tc2]}, % exit during sequencial testcase
+ {sg2,[sequence],[tc5,tc0,tc1,tc2]}].% timeout during sequencial testcase
+
+group(g0) ->
+ [{timetrap,{timetrap_utils,timetrap_val,[{seconds,1}]}}];
+group(g1) ->
+ [{timetrap,fun() -> timetrap_utils:timetrap_val(1000) end}];
+group(g2) ->
+ [{timetrap,fun() -> timetrap_utils:timetrap_val(3000) end}];
+group(g3) ->
+ [{timetrap,fun() -> timetrap_utils:timetrap_val(2000) end}];
+group(g4) ->
+ [{timetrap,{timetrap_utils,timetrap_exit,[kaboom]}}];
+group(g5) ->
+ [{timetrap,fun() -> exit(kaboom) end}];
+group(g6) ->
+ [{timetrap,{timetrap_utils,timetrap_val,[500]}}];
+group(g7) ->
+ [{timetrap,fun() -> ct:sleep(1000),exit(kaboom) end}];
+group(g8) ->
+ [{timetrap,{timetrap_utils,timetrap_val,[500]}}];
+group(g9) ->
+ [{timetrap,fun() -> ct:sleep(1000),exit(kaboom) end}];
+group(g10) ->
+ [{timetrap,fun() -> ct:sleep(1000),exit(kaboom) end}];
+group(g11) ->
+ [];
+group(pg0) ->
+ [{timetrap,{timetrap_utils,timetrap_val,[{seconds,1}]}}];
+group(pg1) ->
+ [{timetrap,fun() -> timetrap_utils:timetrap_val(1000) end}];
+group(pg2) ->
+ [{timetrap,fun() -> timetrap_utils:timetrap_val(3000) end}];
+group(pg3) ->
+ [{timetrap,fun() -> timetrap_utils:timetrap_val(2000) end}];
+group(pg4) ->
+ [{timetrap,{timetrap_utils,timetrap_exit,[kaboom]}}];
+group(pg5) ->
+ [{timetrap,fun() -> exit(kaboom) end}];
+group(pg6) ->
+ [{timetrap,{timetrap_utils,timetrap_val,[500]}}];
+group(pg7) ->
+ [{timetrap,fun() -> ct:sleep(1000),exit(kaboom) end}];
+group(pg8) ->
+ [{timetrap,{timetrap_utils,timetrap_val,[500]}}];
+group(pg9) ->
+ [{timetrap,fun() -> ct:sleep(1000),exit(kaboom) end}];
+group(pg10) ->
+ [{timetrap,fun() -> ct:sleep(1000),exit(kaboom) end}];
+group(pg11) ->
+ [];
+group(sg1) ->
+ [{timetrap,fun() -> ct:sleep(1000),exit(kaboom) end}];
+group(sg2) ->
+ [{timetrap,{timetrap_utils,timetrap_val,[{seconds,1}]}}].
+
+
+%%--------------------------------------------------------------------
+%% Function: all() -> GroupsAndTestCases | {skip,Reason}
+%% GroupsAndTestCases = [{group,GroupName} | TestCase]
+%% GroupName = atom()
+%% TestCase = atom()
+%% Reason = term()
+%%--------------------------------------------------------------------
+all() ->
+ [
+ {group,g0},
+ {group,g1},
+ {group,g2},
+ {group,g3},
+ {group,g4},
+ {group,g5},
+ {group,g6},
+ {group,g7},
+ {group,g8},
+ {group,g9},
+ {group,g10},
+ {group,g11},
+ {group,pg0},
+ {group,pg1},
+ {group,pg2},
+ {group,pg3},
+ {group,pg4},
+ {group,pg5},
+ {group,pg6},
+ {group,pg7},
+ {group,pg8},
+ {group,pg9},
+ {group,pg10},
+ {group,pg11},
+ {group,sg1},
+ {group,sg2}].
+
+
+
+tc0(_) ->
+ ct:comment("TO set by group"),
+ ct:sleep({seconds,5}),
+ ok.
+
+tc1() ->
+ [{timetrap,{timetrap_utils,timetrap_val,[1000]}}].
+tc1(_) ->
+ ct:comment("TO after 1 sec"),
+ ct:sleep({seconds,2}),
+ ok.
+
+tc2() ->
+ [{timetrap,fun() -> timetrap_utils:timetrap_val(500) end}].
+tc2(_) ->
+ ct:comment("TO after 0.5 sec"),
+ ct:sleep({seconds,2}),
+ ok.
+
+tc3(_) ->
+ ct:comment(io_lib:format("TO after ~w sec", [?TO])),
+ ct:sleep({seconds,5}),
+ ok.
+
+tc4(_) ->
+ ct:comment("TO set by group"),
+ ct:sleep({seconds,5}),
+ ok.
+
+tc5(_) ->
+ ct:comment("No TO in this testcase, maybe later"),
+ ok.
diff --git a/lib/common_test/test/ct_netconfc_SUITE.erl b/lib/common_test/test/ct_netconfc_SUITE.erl
index e6e8d5b09c..3042a924fe 100644
--- a/lib/common_test/test/ct_netconfc_SUITE.erl
+++ b/lib/common_test/test/ct_netconfc_SUITE.erl
@@ -44,7 +44,12 @@
%%--------------------------------------------------------------------
init_per_suite(Config) ->
Config1 = ct_test_support:init_per_suite(Config),
- Config1.
+ case application:load(crypto) of
+ {error,Reason} ->
+ {skip, Reason};
+ _ ->
+ Config1
+ end.
end_per_suite(Config) ->
ct_test_support:end_per_suite(Config).
@@ -108,7 +113,7 @@ reformat(Events, EH) ->
%%%-----------------------------------------------------------------
%%% TEST EVENTS
%%%-----------------------------------------------------------------
-events_to_check(Test,Config) ->
+events_to_check(default,Config) ->
{module,_} = code:load_abs(filename:join(?config(data_dir,Config),
netconfc1_SUITE)),
TCs = netconfc1_SUITE:all(),
diff --git a/lib/common_test/test/ct_test_support.erl b/lib/common_test/test/ct_test_support.erl
index 80cca4a1cc..901e53c71d 100644
--- a/lib/common_test/test/ct_test_support.erl
+++ b/lib/common_test/test/ct_test_support.erl
@@ -117,7 +117,10 @@ end_per_suite(Config) ->
CTNode = proplists:get_value(ct_node, Config),
PrivDir = proplists:get_value(priv_dir, Config),
true = rpc:call(CTNode, code, del_path, [filename:join(PrivDir,"")]),
- cover:stop(CTNode),
+ case test_server:is_cover() of
+ true -> cover:flush(CTNode);
+ false -> ok
+ end,
slave:stop(CTNode),
ok.
@@ -149,7 +152,10 @@ end_per_testcase(_TestCase, Config) ->
case wait_for_ct_stop(CTNode) of
%% Common test was not stopped to we restart node.
false ->
- cover:stop(CTNode),
+ case test_server:is_cover() of
+ true -> cover:flush(CTNode);
+ false -> ok
+ end,
slave:stop(CTNode),
start_slave(Config,proplists:get_value(trace_level,Config)),
{fail, "Could not stop common_test"};
@@ -612,8 +618,11 @@ locate({parallel,TEvs}, Node, Evs, Config) ->
fun({EH,#event{name=tc_auto_skip,
node=EvNode,
data={Mod,end_per_group,Reason}}}) when
- EH == TEH, EvNode == Node, Mod == M, Reason == R ->
- false;
+ EH == TEH, EvNode == Node, Mod == M ->
+ case match_data(R, Reason) of
+ match -> false;
+ _ -> true
+ end;
({EH,#event{name=stop_logging,
node=EvNode,data=_}}) when
EH == TEH, EvNode == Node ->
@@ -627,23 +636,12 @@ locate({parallel,TEvs}, Node, Evs, Config) ->
[_AutoSkip | RemEvs2] ->
{Done,RemEvs2,length(RemEvs2)}
end;
- %% match other event than test case
- (TEv={TEH,N,D}, Acc) when D == '_' ->
- case [E || E={EH,#event{name=Name,
- node=EvNode,
- data=_}} <- Evs1,
- EH == TEH, EvNode == Node, Name == N] of
- [] ->
- exit({unmatched,TEv});
- _ ->
- test_server:format("Found ~p!", [TEv]),
- Acc
- end;
(TEv={TEH,N,D}, Acc) ->
case [E || E={EH,#event{name=Name,
node=EvNode,
data=Data}} <- Evs1,
- EH == TEH, EvNode == Node, Name == N, Data == D] of
+ EH == TEH, EvNode == Node, Name == N,
+ match == match_data(D,Data)] of
[] ->
exit({unmatched,TEv});
_ ->
@@ -1002,33 +1000,39 @@ locate({TEH,Name,Data}, Node, [{TEH,#event{name=Name,
data = EvData,
node = Node}}|Evs],
Config) ->
- try match_data(Data, EvData) of
+ case match_data(Data, EvData) of
match ->
- {Config,Evs}
- catch _:_ ->
+ {Config,Evs};
+ _ ->
nomatch
end;
locate({_TEH,_Name,_Data}, _Node, [_|_Evs], _Config) ->
nomatch.
-match_data(D,D) ->
+match_data(Data, EvData) ->
+ try do_match_data(Data, EvData)
+ catch _:_ ->
+ nomatch
+ end.
+
+do_match_data(D,D) ->
match;
-match_data('_',_) ->
+do_match_data('_',_) ->
match;
-match_data(Fun,Data) when is_function(Fun) ->
+do_match_data(Fun,Data) when is_function(Fun) ->
Fun(Data);
-match_data('$proplist',Proplist) ->
- match_data(
+do_match_data('$proplist',Proplist) ->
+ do_match_data(
fun(List) ->
lists:foreach(fun({_,_}) -> ok end,List)
end,Proplist);
-match_data([H1|MatchT],[H2|ValT]) ->
- match_data(H1,H2),
- match_data(MatchT,ValT);
-match_data(Tuple1,Tuple2) when is_tuple(Tuple1),is_tuple(Tuple2) ->
- match_data(tuple_to_list(Tuple1),tuple_to_list(Tuple2));
-match_data([],[]) ->
+do_match_data([H1|MatchT],[H2|ValT]) ->
+ do_match_data(H1,H2),
+ do_match_data(MatchT,ValT);
+do_match_data(Tuple1,Tuple2) when is_tuple(Tuple1),is_tuple(Tuple2) ->
+ do_match_data(tuple_to_list(Tuple1),tuple_to_list(Tuple2));
+do_match_data([],[]) ->
match.
result_match({SkipOrFail,{ErrorInd,{Why,'_'}}},
@@ -1043,6 +1047,9 @@ result_match({failed,{timetrap_timeout,{'$approx',Num}}},
Value =< trunc(Num+0.02*Num) -> true;
true -> false
end;
+result_match({user_timetrap_error,{Why,'_'}},
+ {user_timetrap_error,{Why,_Stack}}) ->
+ true;
result_match(Result, Result) ->
true;
result_match(_, _) ->
diff --git a/lib/compiler/src/v3_codegen.erl b/lib/compiler/src/v3_codegen.erl
index 3b73269545..6a13495523 100644
--- a/lib/compiler/src/v3_codegen.erl
+++ b/lib/compiler/src/v3_codegen.erl
@@ -379,6 +379,7 @@ bsm_rename_ctx(#l{ke={test,_,_}}=L, _, _, _) -> L;
bsm_rename_ctx(#l{ke={bif,_,_,_}}=L, _, _, _) -> L;
bsm_rename_ctx(#l{ke={gc_bif,_,_,_}}=L, _, _, _) -> L;
bsm_rename_ctx(#l{ke={set,_,_}}=L, _, _, _) -> L;
+bsm_rename_ctx(#l{ke={call,_,_,_}}=L, _, _, _) -> L;
bsm_rename_ctx(#l{ke={block,_}}=L, Old, _, false) ->
%% This block is not inside a protected. The match context variable cannot
%% possibly be live inside the block.
diff --git a/lib/compiler/test/bs_match_SUITE.erl b/lib/compiler/test/bs_match_SUITE.erl
index 1bef409be0..bcceb889ed 100644
--- a/lib/compiler/test/bs_match_SUITE.erl
+++ b/lib/compiler/test/bs_match_SUITE.erl
@@ -802,12 +802,29 @@ matching_and_andalso(Config) when is_list(Config) ->
?line {'EXIT',{function_clause,_}} = (catch matching_and_andalso_1(<<1,2,3>>, -8)),
?line {'EXIT',{function_clause,_}} = (catch matching_and_andalso_1(<<1,2,3>>, blurf)),
?line {'EXIT',{function_clause,_}} = (catch matching_and_andalso_1(<<1,2,3>>, 19)),
+
+ {"abc",<<"xyz">>} = matching_and_andalso_2("abc", <<"-xyz">>),
+ {"abc",<<"">>} = matching_and_andalso_2("abc", <<($a-1)>>),
+ {"abc",<<"">>} = matching_and_andalso_2("abc", <<($z+1)>>),
+ {"abc",<<"">>} = matching_and_andalso_2("abc", <<($A-1)>>),
+ {"abc",<<"">>} = matching_and_andalso_2("abc", <<($Z+1)>>),
+ error = matching_and_andalso_2([], <<>>),
+ error = matching_and_andalso_2([], <<$A>>),
+ error = matching_and_andalso_2([], <<$Z>>),
+ error = matching_and_andalso_2([], <<$a>>),
+ error = matching_and_andalso_2([], <<$z>>),
ok.
matching_and_andalso_1(<<Bitmap/binary>>, K)
when is_integer(K) andalso size(Bitmap) >= K andalso 0 < K ->
ok.
+matching_and_andalso_2(Datetime, <<H,T/binary>>)
+ when not ((H >= $a) andalso (H =< $z)) andalso
+ not ((H >= $A) andalso (H =< $Z)) ->
+ {Datetime,T};
+matching_and_andalso_2(_, _) -> error.
+
%% Thanks to Tomas Stejskal.
otp_7188(Config) when is_list(Config) ->
MP3 = <<84,65,71,68,117,154,105,232,107,121,0,0,0,0,0,0,0,0,0,0,
diff --git a/lib/compiler/test/receive_SUITE.erl b/lib/compiler/test/receive_SUITE.erl
index 55aaa0e5d9..82c823b789 100644
--- a/lib/compiler/test/receive_SUITE.erl
+++ b/lib/compiler/test/receive_SUITE.erl
@@ -43,8 +43,8 @@ all() ->
[{group,p}].
groups() ->
- {p,test_lib:parallel(),
- [recv,coverage,otp_7980,ref_opt,export]}.
+ [{p,test_lib:parallel(),
+ [recv,coverage,otp_7980,ref_opt,export]}].
init_per_suite(Config) ->
diff --git a/lib/diameter/.gitignore b/lib/diameter/.gitignore
index 5afcbedc23..8b13789179 100644
--- a/lib/diameter/.gitignore
+++ b/lib/diameter/.gitignore
@@ -1,58 +1 @@
-# Match at any level.
-*~
-autom4te.cache
-
-# Compiler derivatives
-#
-# Do not use too creative wildcards.
-# Those might ignore files that should not be ignored.
-
-i686-pc-linux-gnu
-x86_64-unknown-linux-gnu
-i386-apple-darwin[0-9]*.[0-9]*.[0-9]*
-sparc-sun-solaris[0-9]*.[0-9]*
-i386-pc-solaris[0-9]*.[0-9]*
-i386-unknown-freebsd[0-9]*.[0-9]*
-tile-tilera-linux-gnu
-powerpc-unknown-linux-gnu
-
-# Mac OS X
-a.out.dSYM/
-
-# Anchored from $DIAMETER_TOP
-/config.log
-/config.status
-
-/Makefile
-/configure
-
-
-# General patterns for applications in lib.
-#
-# Assume that all test/Emakefiles are generated.
-#
-# Any application with a checked-in test/Emakefile should
-# use a negative pattern in its own .gitignore.
-
-#
-# Files generated by configure.
-#
-
-/configure
-/config.log
-/config.status
-
-
-#
-# Generated documentation. (ie. not doc/src)
-#
-
-/doc/[^s]*
-
-
-#
-# Files generated when building/running tests
-#
-
-/test/*.log
diff --git a/lib/diameter/Makefile b/lib/diameter/Makefile
new file mode 100644
index 0000000000..9961d627cf
--- /dev/null
+++ b/lib/diameter/Makefile
@@ -0,0 +1,32 @@
+#
+# %CopyrightBegin%
+#
+# Copyright Ericsson AB 2010-2012. All Rights Reserved.
+#
+# The contents of this file are subject to the Erlang Public License,
+# Version 1.1, (the "License"); you may not use this file except in
+# compliance with the License. You should have received a copy of the
+# Erlang Public License along with this software. If not, it can be
+# retrieved online at http://www.erlang.org/.
+#
+# Software distributed under the License is distributed on an "AS IS"
+# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
+# the License for the specific language governing rights and limitations
+# under the License.
+#
+# %CopyrightEnd%
+
+include $(ERL_TOP)/make/target.mk
+include $(ERL_TOP)/make/$(TARGET)/otp.mk
+include vsn.mk
+include subdirs.mk
+
+SUB_DIRECTORIES = $(SUB_DIRS) doc/src
+SPECIAL_TARGETS =
+
+include $(ERL_TOP)/make/otp_subdir.mk
+
+info:
+ @echo "APP_VSN = $(APP_VSN)"
+
+.PHONY: info
diff --git a/lib/diameter/Makefile.in b/lib/diameter/Makefile.in
deleted file mode 100644
index cf38c26045..0000000000
--- a/lib/diameter/Makefile.in
+++ /dev/null
@@ -1,88 +0,0 @@
-#
-# %CopyrightBegin%
-#
-# Copyright Ericsson AB 2010-2011. All Rights Reserved.
-#
-# The contents of this file are subject to the Erlang Public License,
-# Version 1.1, (the "License"); you may not use this file except in
-# compliance with the License. You should have received a copy of the
-# Erlang Public License along with this software. If not, it can be
-# retrieved online at http://www.erlang.org/.
-#
-# Software distributed under the License is distributed on an "AS IS"
-# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-# the License for the specific language governing rights and limitations
-# under the License.
-#
-# %CopyrightEnd%
-
-ifneq ($(ERL_TOP),)
-include $(ERL_TOP)/make/target.mk
-include $(ERL_TOP)/make/$(TARGET)/otp.mk
-else
-include $(DIAMETER_TOP)/make/target.mk
-include $(DIAMETER_TOP)/make/$(TARGET)/rules.mk
-endif
-
-# ----------------------------------------------------
-# Application version
-# ----------------------------------------------------
-include vsn.mk
-VSN=$(DIAMETER_VSN)
-
-DIAMETER_TOP = @DIAMETER_TOP@
-
-
-# ----------------------------------------------------
-# Common Macros
-# ----------------------------------------------------
-
-include subdirs.mk
-
-SUB_DIRECTORIES = $(SUB_DIRS) doc/src
-
-SPECIAL_TARGETS =
-
-ifneq ($(ERL_TOP),)
-ifneq ($(PREFIX),)
-CONFIGURE_OPTS += --prefix=$(PREFIX)
-endif
-endif
-
-
-# ----------------------------------------------------
-# Default Subdir Targets
-# ----------------------------------------------------
-ifneq ($(ERL_TOP),)
-include $(ERL_TOP)/make/otp_subdir.mk
-else
-include $(DIAMETER_TOP)/make/subdir.mk
-endif
-
-.PHONY: reconf conf info version dialyzer
-
-reconf:
- autoconf
-
-conf: do_configure
-
-do_configure: configure
- ./configure $(CONFIGURE_OPTS)
-
-configure: configure.in
- autoconf
-
-info:
- @echo "APP_VSN: $(APP_VSN)"
- @echo "DIAMETER_VSN: $(DIAMETER_VSN)"
-
-version:
- @echo "$(VSN)"
-
-
-dialyzer:
- (cd ./ebin; \
- dialyzer --build_plt \
- --output_plt ../priv/diameter.plt \
- -r ../../diameter/ebin \
- --verbose)
diff --git a/lib/diameter/aclocal.m4 b/lib/diameter/aclocal.m4
deleted file mode 100644
index 2abb47dba2..0000000000
--- a/lib/diameter/aclocal.m4
+++ /dev/null
@@ -1,65 +0,0 @@
-dnl
-dnl %CopyrightBegin%
-dnl
-dnl Copyright Ericsson AB 1998-2011. All Rights Reserved.
-dnl
-dnl The contents of this file are subject to the Erlang Public License,
-dnl Version 1.1, (the "License"); you may not use this file except in
-dnl compliance with the License. You should have received a copy of the
-dnl Erlang Public License along with this software. If not, it can be
-dnl retrieved online at http://www.erlang.org/.
-dnl
-dnl Software distributed under the License is distributed on an "AS IS"
-dnl basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-dnl the License for the specific language governing rights and limitations
-dnl under the License.
-dnl
-dnl %CopyrightEnd%
-dnl
-
-dnl
-dnl aclocal.m4
-dnl
-dnl Local macros used in configure.in. The Local Macros which
-dnl could/should be part of autoconf are prefixed LM_, macros specific
-dnl to the Erlang system are prefixed ERL_.
-dnl
-
-dnl ----------------------------------------------------------------------
-dnl
-dnl LM_PROG_INSTALL_DIR
-dnl
-dnl Figure out how to create directories with parents.
-dnl (In my opinion INSTALL_DIR is a bad name, MKSUBDIRS or something is better)
-dnl
-dnl We prefer 'install -d', but use 'mkdir -p' if it exists.
-dnl If none of these methods works, we give up.
-dnl
-
-
-AC_DEFUN(LM_PROG_INSTALL_DIR,
-[AC_CACHE_CHECK(how to create a directory including parents,
-ac_cv_prog_mkdir_p,
-[
-temp_name_base=config.$$
-temp_name=$temp_name_base/x/y/z
-$INSTALL -d $temp_name >/dev/null 2>&1
-ac_cv_prog_mkdir_p=none
-if test -d $temp_name; then
- ac_cv_prog_mkdir_p="$INSTALL -d"
-else
- mkdir -p $temp_name >/dev/null 2>&1
- if test -d $temp_name; then
- ac_cv_prog_mkdir_p="mkdir -p"
- fi
-fi
-rm -fr $temp_name_base
-])
-
-case "${ac_cv_prog_mkdir_p}" in
- none) AC_MSG_ERROR(don't know how create directories with parents) ;;
- *) INSTALL_DIR="$ac_cv_prog_mkdir_p" AC_SUBST(INSTALL_DIR) ;;
-esac
-])
-
-
diff --git a/lib/diameter/autoconf/config.guess b/lib/diameter/autoconf/config.guess
deleted file mode 100755
index 38a833903b..0000000000
--- a/lib/diameter/autoconf/config.guess
+++ /dev/null
@@ -1,1519 +0,0 @@
-#! /bin/sh
-# Attempt to guess a canonical system name.
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation,
-# Inc.
-
-timestamp='2007-05-17'
-
-# This file is free software; you can redistribute it and/or modify it
-# under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful, but
-# WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-# General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
-# 02110-1301, USA.
-#
-# As a special exception to the GNU General Public License, if you
-# distribute this file as part of a program that contains a
-# configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that program.
-
-
-# Originally written by Per Bothner <[email protected]>.
-# Please send patches to <[email protected]>. Submit a context
-# diff and a properly formatted ChangeLog entry.
-#
-# This script attempts to guess a canonical system name similar to
-# config.sub. If it succeeds, it prints the system name on stdout, and
-# exits with 0. Otherwise, it exits with 1.
-#
-# The plan is that this can be called by configure scripts if you
-# don't specify an explicit build system type.
-
-me=`echo "$0" | sed -e 's,.*/,,'`
-
-usage="\
-Usage: $0 [OPTION]
-
-Output the configuration name of the system \`$me' is run on.
-
-Operation modes:
- -h, --help print this help, then exit
- -t, --time-stamp print date of last modification, then exit
- -v, --version print version number, then exit
-
-Report bugs and patches to <[email protected]>."
-
-version="\
-GNU config.guess ($timestamp)
-
-Originally written by Per Bothner.
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
-Free Software Foundation, Inc.
-
-This is free software; see the source for copying conditions. There is NO
-warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
-
-help="
-Try \`$me --help' for more information."
-
-# Parse command line
-while test $# -gt 0 ; do
- case $1 in
- --time-stamp | --time* | -t )
- echo "$timestamp" ; exit ;;
- --version | -v )
- echo "$version" ; exit ;;
- --help | --h* | -h )
- echo "$usage"; exit ;;
- -- ) # Stop option processing
- shift; break ;;
- - ) # Use stdin as input.
- break ;;
- -* )
- echo "$me: invalid option $1$help" >&2
- exit 1 ;;
- * )
- break ;;
- esac
-done
-
-if test $# != 0; then
- echo "$me: too many arguments$help" >&2
- exit 1
-fi
-
-trap 'exit 1' 1 2 15
-
-# CC_FOR_BUILD -- compiler used by this script. Note that the use of a
-# compiler to aid in system detection is discouraged as it requires
-# temporary files to be created and, as you can see below, it is a
-# headache to deal with in a portable fashion.
-
-# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still
-# use `HOST_CC' if defined, but it is deprecated.
-
-# Portable tmp directory creation inspired by the Autoconf team.
-
-set_cc_for_build='
-trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ;
-trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ;
-: ${TMPDIR=/tmp} ;
- { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } ||
- { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } ||
- { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } ||
- { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ;
-dummy=$tmp/dummy ;
-tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ;
-case $CC_FOR_BUILD,$HOST_CC,$CC in
- ,,) echo "int x;" > $dummy.c ;
- for c in cc gcc c89 c99 ; do
- if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then
- CC_FOR_BUILD="$c"; break ;
- fi ;
- done ;
- if test x"$CC_FOR_BUILD" = x ; then
- CC_FOR_BUILD=no_compiler_found ;
- fi
- ;;
- ,,*) CC_FOR_BUILD=$CC ;;
- ,*,*) CC_FOR_BUILD=$HOST_CC ;;
-esac ; set_cc_for_build= ;'
-
-# This is needed to find uname on a Pyramid OSx when run in the BSD universe.
-# ([email protected] 1994-08-24)
-if (test -f /.attbin/uname) >/dev/null 2>&1 ; then
- PATH=$PATH:/.attbin ; export PATH
-fi
-
-UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown
-UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown
-UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown
-UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown
-
-# Note: order is significant - the case branches are not exclusive.
-
-case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in
- *:NetBSD:*:*)
- # NetBSD (nbsd) targets should (where applicable) match one or
- # more of the tupples: *-*-netbsdelf*, *-*-netbsdaout*,
- # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently
- # switched to ELF, *-*-netbsd* would select the old
- # object file format. This provides both forward
- # compatibility and a consistent mechanism for selecting the
- # object file format.
- #
- # Note: NetBSD doesn't particularly care about the vendor
- # portion of the name. We always set it to "unknown".
- sysctl="sysctl -n hw.machine_arch"
- UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \
- /usr/sbin/$sysctl 2>/dev/null || echo unknown)`
- case "${UNAME_MACHINE_ARCH}" in
- armeb) machine=armeb-unknown ;;
- arm*) machine=arm-unknown ;;
- sh3el) machine=shl-unknown ;;
- sh3eb) machine=sh-unknown ;;
- sh5el) machine=sh5le-unknown ;;
- *) machine=${UNAME_MACHINE_ARCH}-unknown ;;
- esac
- # The Operating System including object format, if it has switched
- # to ELF recently, or will in the future.
- case "${UNAME_MACHINE_ARCH}" in
- arm*|i386|m68k|ns32k|sh3*|sparc|vax)
- eval $set_cc_for_build
- if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \
- | grep __ELF__ >/dev/null
- then
- # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout).
- # Return netbsd for either. FIX?
- os=netbsd
- else
- os=netbsdelf
- fi
- ;;
- *)
- os=netbsd
- ;;
- esac
- # The OS release
- # Debian GNU/NetBSD machines have a different userland, and
- # thus, need a distinct triplet. However, they do not need
- # kernel version information, so it can be replaced with a
- # suitable tag, in the style of linux-gnu.
- case "${UNAME_VERSION}" in
- Debian*)
- release='-gnu'
- ;;
- *)
- release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'`
- ;;
- esac
- # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM:
- # contains redundant information, the shorter form:
- # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used.
- echo "${machine}-${os}${release}"
- exit ;;
- *:OpenBSD:*:*)
- UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'`
- echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE}
- exit ;;
- *:ekkoBSD:*:*)
- echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE}
- exit ;;
- *:SolidBSD:*:*)
- echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE}
- exit ;;
- macppc:MirBSD:*:*)
- echo powerpc-unknown-mirbsd${UNAME_RELEASE}
- exit ;;
- *:MirBSD:*:*)
- echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE}
- exit ;;
- alpha:OSF1:*:*)
- case $UNAME_RELEASE in
- *4.0)
- UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'`
- ;;
- *5.*)
- UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'`
- ;;
- esac
- # According to Compaq, /usr/sbin/psrinfo has been available on
- # OSF/1 and Tru64 systems produced since 1995. I hope that
- # covers most systems running today. This code pipes the CPU
- # types through head -n 1, so we only detect the type of CPU 0.
- ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1`
- case "$ALPHA_CPU_TYPE" in
- "EV4 (21064)")
- UNAME_MACHINE="alpha" ;;
- "EV4.5 (21064)")
- UNAME_MACHINE="alpha" ;;
- "LCA4 (21066/21068)")
- UNAME_MACHINE="alpha" ;;
- "EV5 (21164)")
- UNAME_MACHINE="alphaev5" ;;
- "EV5.6 (21164A)")
- UNAME_MACHINE="alphaev56" ;;
- "EV5.6 (21164PC)")
- UNAME_MACHINE="alphapca56" ;;
- "EV5.7 (21164PC)")
- UNAME_MACHINE="alphapca57" ;;
- "EV6 (21264)")
- UNAME_MACHINE="alphaev6" ;;
- "EV6.7 (21264A)")
- UNAME_MACHINE="alphaev67" ;;
- "EV6.8CB (21264C)")
- UNAME_MACHINE="alphaev68" ;;
- "EV6.8AL (21264B)")
- UNAME_MACHINE="alphaev68" ;;
- "EV6.8CX (21264D)")
- UNAME_MACHINE="alphaev68" ;;
- "EV6.9A (21264/EV69A)")
- UNAME_MACHINE="alphaev69" ;;
- "EV7 (21364)")
- UNAME_MACHINE="alphaev7" ;;
- "EV7.9 (21364A)")
- UNAME_MACHINE="alphaev79" ;;
- esac
- # A Pn.n version is a patched version.
- # A Vn.n version is a released version.
- # A Tn.n version is a released field test version.
- # A Xn.n version is an unreleased experimental baselevel.
- # 1.2 uses "1.2" for uname -r.
- echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
- exit ;;
- Alpha\ *:Windows_NT*:*)
- # How do we know it's Interix rather than the generic POSIX subsystem?
- # Should we change UNAME_MACHINE based on the output of uname instead
- # of the specific Alpha model?
- echo alpha-pc-interix
- exit ;;
- 21064:Windows_NT:50:3)
- echo alpha-dec-winnt3.5
- exit ;;
- Amiga*:UNIX_System_V:4.0:*)
- echo m68k-unknown-sysv4
- exit ;;
- *:[Aa]miga[Oo][Ss]:*:*)
- echo ${UNAME_MACHINE}-unknown-amigaos
- exit ;;
- *:[Mm]orph[Oo][Ss]:*:*)
- echo ${UNAME_MACHINE}-unknown-morphos
- exit ;;
- *:OS/390:*:*)
- echo i370-ibm-openedition
- exit ;;
- *:z/VM:*:*)
- echo s390-ibm-zvmoe
- exit ;;
- *:OS400:*:*)
- echo powerpc-ibm-os400
- exit ;;
- arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*)
- echo arm-acorn-riscix${UNAME_RELEASE}
- exit ;;
- arm:riscos:*:*|arm:RISCOS:*:*)
- echo arm-unknown-riscos
- exit ;;
- SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*)
- echo hppa1.1-hitachi-hiuxmpp
- exit ;;
- Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*)
- # [email protected] (Earle F. Ake) contributed MIS and NILE.
- if test "`(/bin/universe) 2>/dev/null`" = att ; then
- echo pyramid-pyramid-sysv3
- else
- echo pyramid-pyramid-bsd
- fi
- exit ;;
- NILE*:*:*:dcosx)
- echo pyramid-pyramid-svr4
- exit ;;
- DRS?6000:unix:4.0:6*)
- echo sparc-icl-nx6
- exit ;;
- DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*)
- case `/usr/bin/uname -p` in
- sparc) echo sparc-icl-nx7; exit ;;
- esac ;;
- sun4H:SunOS:5.*:*)
- echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*)
- echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- i86pc:SunOS:5.*:* | ix86xen:SunOS:5.*:*)
- echo i386-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- sun4*:SunOS:6*:*)
- # According to config.sub, this is the proper way to canonicalize
- # SunOS6. Hard to guess exactly what SunOS6 will be like, but
- # it's likely to be more like Solaris than SunOS4.
- echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- sun4*:SunOS:*:*)
- case "`/usr/bin/arch -k`" in
- Series*|S4*)
- UNAME_RELEASE=`uname -v`
- ;;
- esac
- # Japanese Language versions have a version number like `4.1.3-JL'.
- echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'`
- exit ;;
- sun3*:SunOS:*:*)
- echo m68k-sun-sunos${UNAME_RELEASE}
- exit ;;
- sun*:*:4.2BSD:*)
- UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null`
- test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3
- case "`/bin/arch`" in
- sun3)
- echo m68k-sun-sunos${UNAME_RELEASE}
- ;;
- sun4)
- echo sparc-sun-sunos${UNAME_RELEASE}
- ;;
- esac
- exit ;;
- aushp:SunOS:*:*)
- echo sparc-auspex-sunos${UNAME_RELEASE}
- exit ;;
- # The situation for MiNT is a little confusing. The machine name
- # can be virtually everything (everything which is not
- # "atarist" or "atariste" at least should have a processor
- # > m68000). The system name ranges from "MiNT" over "FreeMiNT"
- # to the lowercase version "mint" (or "freemint"). Finally
- # the system name "TOS" denotes a system which is actually not
- # MiNT. But MiNT is downward compatible to TOS, so this should
- # be no problem.
- atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*)
- echo m68k-atari-mint${UNAME_RELEASE}
- exit ;;
- atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*)
- echo m68k-atari-mint${UNAME_RELEASE}
- exit ;;
- *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*)
- echo m68k-atari-mint${UNAME_RELEASE}
- exit ;;
- milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*)
- echo m68k-milan-mint${UNAME_RELEASE}
- exit ;;
- hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*)
- echo m68k-hades-mint${UNAME_RELEASE}
- exit ;;
- *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*)
- echo m68k-unknown-mint${UNAME_RELEASE}
- exit ;;
- m68k:machten:*:*)
- echo m68k-apple-machten${UNAME_RELEASE}
- exit ;;
- powerpc:machten:*:*)
- echo powerpc-apple-machten${UNAME_RELEASE}
- exit ;;
- RISC*:Mach:*:*)
- echo mips-dec-mach_bsd4.3
- exit ;;
- RISC*:ULTRIX:*:*)
- echo mips-dec-ultrix${UNAME_RELEASE}
- exit ;;
- VAX*:ULTRIX*:*:*)
- echo vax-dec-ultrix${UNAME_RELEASE}
- exit ;;
- 2020:CLIX:*:* | 2430:CLIX:*:*)
- echo clipper-intergraph-clix${UNAME_RELEASE}
- exit ;;
- mips:*:*:UMIPS | mips:*:*:RISCos)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
-#ifdef __cplusplus
-#include <stdio.h> /* for printf() prototype */
- int main (int argc, char *argv[]) {
-#else
- int main (argc, argv) int argc; char *argv[]; {
-#endif
- #if defined (host_mips) && defined (MIPSEB)
- #if defined (SYSTYPE_SYSV)
- printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0);
- #endif
- #if defined (SYSTYPE_SVR4)
- printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0);
- #endif
- #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD)
- printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0);
- #endif
- #endif
- exit (-1);
- }
-EOF
- $CC_FOR_BUILD -o $dummy $dummy.c &&
- dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` &&
- SYSTEM_NAME=`$dummy $dummyarg` &&
- { echo "$SYSTEM_NAME"; exit; }
- echo mips-mips-riscos${UNAME_RELEASE}
- exit ;;
- Motorola:PowerMAX_OS:*:*)
- echo powerpc-motorola-powermax
- exit ;;
- Motorola:*:4.3:PL8-*)
- echo powerpc-harris-powermax
- exit ;;
- Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*)
- echo powerpc-harris-powermax
- exit ;;
- Night_Hawk:Power_UNIX:*:*)
- echo powerpc-harris-powerunix
- exit ;;
- m88k:CX/UX:7*:*)
- echo m88k-harris-cxux7
- exit ;;
- m88k:*:4*:R4*)
- echo m88k-motorola-sysv4
- exit ;;
- m88k:*:3*:R3*)
- echo m88k-motorola-sysv3
- exit ;;
- AViiON:dgux:*:*)
- # DG/UX returns AViiON for all architectures
- UNAME_PROCESSOR=`/usr/bin/uname -p`
- if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ]
- then
- if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \
- [ ${TARGET_BINARY_INTERFACE}x = x ]
- then
- echo m88k-dg-dgux${UNAME_RELEASE}
- else
- echo m88k-dg-dguxbcs${UNAME_RELEASE}
- fi
- else
- echo i586-dg-dgux${UNAME_RELEASE}
- fi
- exit ;;
- M88*:DolphinOS:*:*) # DolphinOS (SVR3)
- echo m88k-dolphin-sysv3
- exit ;;
- M88*:*:R3*:*)
- # Delta 88k system running SVR3
- echo m88k-motorola-sysv3
- exit ;;
- XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3)
- echo m88k-tektronix-sysv3
- exit ;;
- Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD)
- echo m68k-tektronix-bsd
- exit ;;
- *:IRIX*:*:*)
- echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'`
- exit ;;
- ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX.
- echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id
- exit ;; # Note that: echo "'`uname -s`'" gives 'AIX '
- i*86:AIX:*:*)
- echo i386-ibm-aix
- exit ;;
- ia64:AIX:*:*)
- if [ -x /usr/bin/oslevel ] ; then
- IBM_REV=`/usr/bin/oslevel`
- else
- IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
- fi
- echo ${UNAME_MACHINE}-ibm-aix${IBM_REV}
- exit ;;
- *:AIX:2:3)
- if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #include <sys/systemcfg.h>
-
- main()
- {
- if (!__power_pc())
- exit(1);
- puts("powerpc-ibm-aix3.2.5");
- exit(0);
- }
-EOF
- if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy`
- then
- echo "$SYSTEM_NAME"
- else
- echo rs6000-ibm-aix3.2.5
- fi
- elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then
- echo rs6000-ibm-aix3.2.4
- else
- echo rs6000-ibm-aix3.2
- fi
- exit ;;
- *:AIX:*:[45])
- IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'`
- if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then
- IBM_ARCH=rs6000
- else
- IBM_ARCH=powerpc
- fi
- if [ -x /usr/bin/oslevel ] ; then
- IBM_REV=`/usr/bin/oslevel`
- else
- IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE}
- fi
- echo ${IBM_ARCH}-ibm-aix${IBM_REV}
- exit ;;
- *:AIX:*:*)
- echo rs6000-ibm-aix
- exit ;;
- ibmrt:4.4BSD:*|romp-ibm:BSD:*)
- echo romp-ibm-bsd4.4
- exit ;;
- ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and
- echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to
- exit ;; # report: romp-ibm BSD 4.3
- *:BOSX:*:*)
- echo rs6000-bull-bosx
- exit ;;
- DPX/2?00:B.O.S.:*:*)
- echo m68k-bull-sysv3
- exit ;;
- 9000/[34]??:4.3bsd:1.*:*)
- echo m68k-hp-bsd
- exit ;;
- hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*)
- echo m68k-hp-bsd4.4
- exit ;;
- 9000/[34678]??:HP-UX:*:*)
- HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
- case "${UNAME_MACHINE}" in
- 9000/31? ) HP_ARCH=m68000 ;;
- 9000/[34]?? ) HP_ARCH=m68k ;;
- 9000/[678][0-9][0-9])
- if [ -x /usr/bin/getconf ]; then
- sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null`
- sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null`
- case "${sc_cpu_version}" in
- 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0
- 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1
- 532) # CPU_PA_RISC2_0
- case "${sc_kernel_bits}" in
- 32) HP_ARCH="hppa2.0n" ;;
- 64) HP_ARCH="hppa2.0w" ;;
- '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20
- esac ;;
- esac
- fi
- if [ "${HP_ARCH}" = "" ]; then
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
-
- #define _HPUX_SOURCE
- #include <stdlib.h>
- #include <unistd.h>
-
- int main ()
- {
- #if defined(_SC_KERNEL_BITS)
- long bits = sysconf(_SC_KERNEL_BITS);
- #endif
- long cpu = sysconf (_SC_CPU_VERSION);
-
- switch (cpu)
- {
- case CPU_PA_RISC1_0: puts ("hppa1.0"); break;
- case CPU_PA_RISC1_1: puts ("hppa1.1"); break;
- case CPU_PA_RISC2_0:
- #if defined(_SC_KERNEL_BITS)
- switch (bits)
- {
- case 64: puts ("hppa2.0w"); break;
- case 32: puts ("hppa2.0n"); break;
- default: puts ("hppa2.0"); break;
- } break;
- #else /* !defined(_SC_KERNEL_BITS) */
- puts ("hppa2.0"); break;
- #endif
- default: puts ("hppa1.0"); break;
- }
- exit (0);
- }
-EOF
- (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy`
- test -z "$HP_ARCH" && HP_ARCH=hppa
- fi ;;
- esac
- if [ ${HP_ARCH} = "hppa2.0w" ]
- then
- eval $set_cc_for_build
-
- # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating
- # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler
- # generating 64-bit code. GNU and HP use different nomenclature:
- #
- # $ CC_FOR_BUILD=cc ./config.guess
- # => hppa2.0w-hp-hpux11.23
- # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess
- # => hppa64-hp-hpux11.23
-
- if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) |
- grep __LP64__ >/dev/null
- then
- HP_ARCH="hppa2.0w"
- else
- HP_ARCH="hppa64"
- fi
- fi
- echo ${HP_ARCH}-hp-hpux${HPUX_REV}
- exit ;;
- ia64:HP-UX:*:*)
- HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'`
- echo ia64-hp-hpux${HPUX_REV}
- exit ;;
- 3050*:HI-UX:*:*)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #include <unistd.h>
- int
- main ()
- {
- long cpu = sysconf (_SC_CPU_VERSION);
- /* The order matters, because CPU_IS_HP_MC68K erroneously returns
- true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct
- results, however. */
- if (CPU_IS_PA_RISC (cpu))
- {
- switch (cpu)
- {
- case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break;
- case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break;
- case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break;
- default: puts ("hppa-hitachi-hiuxwe2"); break;
- }
- }
- else if (CPU_IS_HP_MC68K (cpu))
- puts ("m68k-hitachi-hiuxwe2");
- else puts ("unknown-hitachi-hiuxwe2");
- exit (0);
- }
-EOF
- $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` &&
- { echo "$SYSTEM_NAME"; exit; }
- echo unknown-hitachi-hiuxwe2
- exit ;;
- 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* )
- echo hppa1.1-hp-bsd
- exit ;;
- 9000/8??:4.3bsd:*:*)
- echo hppa1.0-hp-bsd
- exit ;;
- *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*)
- echo hppa1.0-hp-mpeix
- exit ;;
- hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* )
- echo hppa1.1-hp-osf
- exit ;;
- hp8??:OSF1:*:*)
- echo hppa1.0-hp-osf
- exit ;;
- i*86:OSF1:*:*)
- if [ -x /usr/sbin/sysversion ] ; then
- echo ${UNAME_MACHINE}-unknown-osf1mk
- else
- echo ${UNAME_MACHINE}-unknown-osf1
- fi
- exit ;;
- parisc*:Lites*:*:*)
- echo hppa1.1-hp-lites
- exit ;;
- C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*)
- echo c1-convex-bsd
- exit ;;
- C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*)
- if getsysinfo -f scalar_acc
- then echo c32-convex-bsd
- else echo c2-convex-bsd
- fi
- exit ;;
- C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*)
- echo c34-convex-bsd
- exit ;;
- C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*)
- echo c38-convex-bsd
- exit ;;
- C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*)
- echo c4-convex-bsd
- exit ;;
- CRAY*Y-MP:*:*:*)
- echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- CRAY*[A-Z]90:*:*:*)
- echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \
- | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \
- -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \
- -e 's/\.[^.]*$/.X/'
- exit ;;
- CRAY*TS:*:*:*)
- echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- CRAY*T3E:*:*:*)
- echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- CRAY*SV1:*:*:*)
- echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- *:UNICOS/mp:*:*)
- echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/'
- exit ;;
- F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*)
- FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'`
- FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
- FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'`
- echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
- exit ;;
- 5000:UNIX_System_V:4.*:*)
- FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'`
- FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'`
- echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}"
- exit ;;
- i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*)
- echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE}
- exit ;;
- sparc*:BSD/OS:*:*)
- echo sparc-unknown-bsdi${UNAME_RELEASE}
- exit ;;
- *:BSD/OS:*:*)
- echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE}
- exit ;;
- *:FreeBSD:*:*)
- case ${UNAME_MACHINE} in
- pc98)
- echo i386-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
- amd64)
- echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
- *)
- echo ${UNAME_MACHINE}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;;
- esac
- exit ;;
- i*:CYGWIN*:*)
- echo ${UNAME_MACHINE}-pc-cygwin
- exit ;;
- *:MINGW*:*)
- echo ${UNAME_MACHINE}-pc-mingw32
- exit ;;
- i*:windows32*:*)
- # uname -m includes "-pc" on this system.
- echo ${UNAME_MACHINE}-mingw32
- exit ;;
- i*:PW*:*)
- echo ${UNAME_MACHINE}-pc-pw32
- exit ;;
- *:Interix*:[3456]*)
- case ${UNAME_MACHINE} in
- x86)
- echo i586-pc-interix${UNAME_RELEASE}
- exit ;;
- EM64T | authenticamd)
- echo x86_64-unknown-interix${UNAME_RELEASE}
- exit ;;
- esac ;;
- [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*)
- echo i${UNAME_MACHINE}-pc-mks
- exit ;;
- i*:Windows_NT*:* | Pentium*:Windows_NT*:*)
- # How do we know it's Interix rather than the generic POSIX subsystem?
- # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we
- # UNAME_MACHINE based on the output of uname instead of i386?
- echo i586-pc-interix
- exit ;;
- i*:UWIN*:*)
- echo ${UNAME_MACHINE}-pc-uwin
- exit ;;
- amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*)
- echo x86_64-unknown-cygwin
- exit ;;
- p*:CYGWIN*:*)
- echo powerpcle-unknown-cygwin
- exit ;;
- prep*:SunOS:5.*:*)
- echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'`
- exit ;;
- *:GNU:*:*)
- # the GNU system
- echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'`
- exit ;;
- *:GNU/*:*:*)
- # other systems with GNU libc and userland
- echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu
- exit ;;
- i*86:Minix:*:*)
- echo ${UNAME_MACHINE}-pc-minix
- exit ;;
- arm*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- avr32*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- cris:Linux:*:*)
- echo cris-axis-linux-gnu
- exit ;;
- crisv32:Linux:*:*)
- echo crisv32-axis-linux-gnu
- exit ;;
- frv:Linux:*:*)
- echo frv-unknown-linux-gnu
- exit ;;
- ia64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- m32r*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- m68*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- mips:Linux:*:*)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #undef CPU
- #undef mips
- #undef mipsel
- #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
- CPU=mipsel
- #else
- #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
- CPU=mips
- #else
- CPU=
- #endif
- #endif
-EOF
- eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
- /^CPU/{
- s: ::g
- p
- }'`"
- test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
- ;;
- mips64:Linux:*:*)
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #undef CPU
- #undef mips64
- #undef mips64el
- #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL)
- CPU=mips64el
- #else
- #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB)
- CPU=mips64
- #else
- CPU=
- #endif
- #endif
-EOF
- eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
- /^CPU/{
- s: ::g
- p
- }'`"
- test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; }
- ;;
- or32:Linux:*:*)
- echo or32-unknown-linux-gnu
- exit ;;
- ppc:Linux:*:*)
- echo powerpc-unknown-linux-gnu
- exit ;;
- ppc64:Linux:*:*)
- echo powerpc64-unknown-linux-gnu
- exit ;;
- alpha:Linux:*:*)
- case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in
- EV5) UNAME_MACHINE=alphaev5 ;;
- EV56) UNAME_MACHINE=alphaev56 ;;
- PCA56) UNAME_MACHINE=alphapca56 ;;
- PCA57) UNAME_MACHINE=alphapca56 ;;
- EV6) UNAME_MACHINE=alphaev6 ;;
- EV67) UNAME_MACHINE=alphaev67 ;;
- EV68*) UNAME_MACHINE=alphaev68 ;;
- esac
- objdump --private-headers /bin/sh | grep ld.so.1 >/dev/null
- if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi
- echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC}
- exit ;;
- parisc:Linux:*:* | hppa:Linux:*:*)
- # Look for CPU level
- case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in
- PA7*) echo hppa1.1-unknown-linux-gnu ;;
- PA8*) echo hppa2.0-unknown-linux-gnu ;;
- *) echo hppa-unknown-linux-gnu ;;
- esac
- exit ;;
- parisc64:Linux:*:* | hppa64:Linux:*:*)
- echo hppa64-unknown-linux-gnu
- exit ;;
- s390:Linux:*:* | s390x:Linux:*:*)
- echo ${UNAME_MACHINE}-ibm-linux
- exit ;;
- sh64*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- sh*:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- sparc:Linux:*:* | sparc64:Linux:*:*)
- echo ${UNAME_MACHINE}-unknown-linux-gnu
- exit ;;
- tile:Linux:*:*)
- echo tile-unknown-linux-gnu
- exit ;;
- vax:Linux:*:*)
- echo ${UNAME_MACHINE}-dec-linux-gnu
- exit ;;
- x86_64:Linux:*:*)
- echo x86_64-unknown-linux-gnu
- exit ;;
- xtensa:Linux:*:*)
- echo xtensa-unknown-linux-gnu
- exit ;;
- i*86:Linux:*:*)
- # The BFD linker knows what the default object file format is, so
- # first see if it will tell us. cd to the root directory to prevent
- # problems with other programs or directories called `ld' in the path.
- # Set LC_ALL=C to ensure ld outputs messages in English.
- ld_supported_targets=`cd /; LC_ALL=C ld --help 2>&1 \
- | sed -ne '/supported targets:/!d
- s/[ ][ ]*/ /g
- s/.*supported targets: *//
- s/ .*//
- p'`
- case "$ld_supported_targets" in
- elf32-i386)
- TENTATIVE="${UNAME_MACHINE}-pc-linux-gnu"
- ;;
- a.out-i386-linux)
- echo "${UNAME_MACHINE}-pc-linux-gnuaout"
- exit ;;
- coff-i386)
- echo "${UNAME_MACHINE}-pc-linux-gnucoff"
- exit ;;
- "")
- # Either a pre-BFD a.out linker (linux-gnuoldld) or
- # one that does not give us useful --help.
- echo "${UNAME_MACHINE}-pc-linux-gnuoldld"
- exit ;;
- esac
- # Determine whether the default compiler is a.out or elf
- eval $set_cc_for_build
- sed 's/^ //' << EOF >$dummy.c
- #include <features.h>
- #ifdef __ELF__
- # ifdef __GLIBC__
- # if __GLIBC__ >= 2
- LIBC=gnu
- # else
- LIBC=gnulibc1
- # endif
- # else
- LIBC=gnulibc1
- # endif
- #else
- #if defined(__INTEL_COMPILER) || defined(__PGI) || defined(__SUNPRO_C) || defined(__SUNPRO_CC)
- LIBC=gnu
- #else
- LIBC=gnuaout
- #endif
- #endif
- #ifdef __dietlibc__
- LIBC=dietlibc
- #endif
-EOF
- eval "`$CC_FOR_BUILD -E $dummy.c 2>/dev/null | sed -n '
- /^LIBC/{
- s: ::g
- p
- }'`"
- test x"${LIBC}" != x && {
- echo "${UNAME_MACHINE}-pc-linux-${LIBC}"
- exit
- }
- test x"${TENTATIVE}" != x && { echo "${TENTATIVE}"; exit; }
- ;;
- i*86:DYNIX/ptx:4*:*)
- # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there.
- # earlier versions are messed up and put the nodename in both
- # sysname and nodename.
- echo i386-sequent-sysv4
- exit ;;
- i*86:UNIX_SV:4.2MP:2.*)
- # Unixware is an offshoot of SVR4, but it has its own version
- # number series starting with 2...
- # I am not positive that other SVR4 systems won't match this,
- # I just have to hope. -- rms.
- # Use sysv4.2uw... so that sysv4* matches it.
- echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION}
- exit ;;
- i*86:OS/2:*:*)
- # If we were able to find `uname', then EMX Unix compatibility
- # is probably installed.
- echo ${UNAME_MACHINE}-pc-os2-emx
- exit ;;
- i*86:XTS-300:*:STOP)
- echo ${UNAME_MACHINE}-unknown-stop
- exit ;;
- i*86:atheos:*:*)
- echo ${UNAME_MACHINE}-unknown-atheos
- exit ;;
- i*86:syllable:*:*)
- echo ${UNAME_MACHINE}-pc-syllable
- exit ;;
- i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.0*:*)
- echo i386-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- i*86:*DOS:*:*)
- echo ${UNAME_MACHINE}-pc-msdosdjgpp
- exit ;;
- i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*)
- UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'`
- if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then
- echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL}
- else
- echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL}
- fi
- exit ;;
- i*86:*:5:[678]*)
- # UnixWare 7.x, OpenUNIX and OpenServer 6.
- case `/bin/uname -X | grep "^Machine"` in
- *486*) UNAME_MACHINE=i486 ;;
- *Pentium) UNAME_MACHINE=i586 ;;
- *Pent*|*Celeron) UNAME_MACHINE=i686 ;;
- esac
- echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION}
- exit ;;
- i*86:*:3.2:*)
- if test -f /usr/options/cb.name; then
- UNAME_REL=`sed -n 's/.*Version //p' </usr/options/cb.name`
- echo ${UNAME_MACHINE}-pc-isc$UNAME_REL
- elif /bin/uname -X 2>/dev/null >/dev/null ; then
- UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')`
- (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486
- (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \
- && UNAME_MACHINE=i586
- (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \
- && UNAME_MACHINE=i686
- (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \
- && UNAME_MACHINE=i686
- echo ${UNAME_MACHINE}-pc-sco$UNAME_REL
- else
- echo ${UNAME_MACHINE}-pc-sysv32
- fi
- exit ;;
- pc:*:*:*)
- # Left here for compatibility:
- # uname -m prints for DJGPP always 'pc', but it prints nothing about
- # the processor, so we play safe by assuming i386.
- echo i386-pc-msdosdjgpp
- exit ;;
- Intel:Mach:3*:*)
- echo i386-pc-mach3
- exit ;;
- paragon:*:*:*)
- echo i860-intel-osf1
- exit ;;
- i860:*:4.*:*) # i860-SVR4
- if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then
- echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4
- else # Add other i860-SVR4 vendors below as they are discovered.
- echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4
- fi
- exit ;;
- mini*:CTIX:SYS*5:*)
- # "miniframe"
- echo m68010-convergent-sysv
- exit ;;
- mc68k:UNIX:SYSTEM5:3.51m)
- echo m68k-convergent-sysv
- exit ;;
- M680?0:D-NIX:5.3:*)
- echo m68k-diab-dnix
- exit ;;
- M68*:*:R3V[5678]*:*)
- test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;;
- 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0)
- OS_REL=''
- test -r /etc/.relid \
- && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid`
- /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
- && { echo i486-ncr-sysv4.3${OS_REL}; exit; }
- /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \
- && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;;
- 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*)
- /bin/uname -p 2>/dev/null | grep 86 >/dev/null \
- && { echo i486-ncr-sysv4; exit; } ;;
- m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*)
- echo m68k-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- mc68030:UNIX_System_V:4.*:*)
- echo m68k-atari-sysv4
- exit ;;
- TSUNAMI:LynxOS:2.*:*)
- echo sparc-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- rs6000:LynxOS:2.*:*)
- echo rs6000-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.0*:*)
- echo powerpc-unknown-lynxos${UNAME_RELEASE}
- exit ;;
- SM[BE]S:UNIX_SV:*:*)
- echo mips-dde-sysv${UNAME_RELEASE}
- exit ;;
- RM*:ReliantUNIX-*:*:*)
- echo mips-sni-sysv4
- exit ;;
- RM*:SINIX-*:*:*)
- echo mips-sni-sysv4
- exit ;;
- *:SINIX-*:*:*)
- if uname -p 2>/dev/null >/dev/null ; then
- UNAME_MACHINE=`(uname -p) 2>/dev/null`
- echo ${UNAME_MACHINE}-sni-sysv4
- else
- echo ns32k-sni-sysv
- fi
- exit ;;
- PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort
- echo i586-unisys-sysv4
- exit ;;
- *:UNIX_System_V:4*:FTX*)
- # From Gerald Hewes <[email protected]>.
- # How about differentiating between stratus architectures? -djm
- echo hppa1.1-stratus-sysv4
- exit ;;
- *:*:*:FTX*)
- echo i860-stratus-sysv4
- exit ;;
- i*86:VOS:*:*)
- echo ${UNAME_MACHINE}-stratus-vos
- exit ;;
- *:VOS:*:*)
- echo hppa1.1-stratus-vos
- exit ;;
- mc68*:A/UX:*:*)
- echo m68k-apple-aux${UNAME_RELEASE}
- exit ;;
- news*:NEWS-OS:6*:*)
- echo mips-sony-newsos6
- exit ;;
- R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*)
- if [ -d /usr/nec ]; then
- echo mips-nec-sysv${UNAME_RELEASE}
- else
- echo mips-unknown-sysv${UNAME_RELEASE}
- fi
- exit ;;
- BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only.
- echo powerpc-be-beos
- exit ;;
- BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only.
- echo powerpc-apple-beos
- exit ;;
- BePC:BeOS:*:*) # BeOS running on Intel PC compatible.
- echo i586-pc-beos
- exit ;;
- SX-4:SUPER-UX:*:*)
- echo sx4-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-5:SUPER-UX:*:*)
- echo sx5-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-6:SUPER-UX:*:*)
- echo sx6-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-7:SUPER-UX:*:*)
- echo sx7-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-8:SUPER-UX:*:*)
- echo sx8-nec-superux${UNAME_RELEASE}
- exit ;;
- SX-8R:SUPER-UX:*:*)
- echo sx8r-nec-superux${UNAME_RELEASE}
- exit ;;
- Power*:Rhapsody:*:*)
- echo powerpc-apple-rhapsody${UNAME_RELEASE}
- exit ;;
- *:Rhapsody:*:*)
- echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE}
- exit ;;
- *:Darwin:*:*)
- UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown
- case $UNAME_PROCESSOR in
- unknown) UNAME_PROCESSOR=powerpc ;;
- esac
- echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE}
- exit ;;
- *:procnto*:*:* | *:QNX:[0123456789]*:*)
- UNAME_PROCESSOR=`uname -p`
- if test "$UNAME_PROCESSOR" = "x86"; then
- UNAME_PROCESSOR=i386
- UNAME_MACHINE=pc
- fi
- echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE}
- exit ;;
- *:QNX:*:4*)
- echo i386-pc-qnx
- exit ;;
- NSE-?:NONSTOP_KERNEL:*:*)
- echo nse-tandem-nsk${UNAME_RELEASE}
- exit ;;
- NSR-?:NONSTOP_KERNEL:*:*)
- echo nsr-tandem-nsk${UNAME_RELEASE}
- exit ;;
- *:NonStop-UX:*:*)
- echo mips-compaq-nonstopux
- exit ;;
- BS2000:POSIX*:*:*)
- echo bs2000-siemens-sysv
- exit ;;
- DS/*:UNIX_System_V:*:*)
- echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE}
- exit ;;
- *:Plan9:*:*)
- # "uname -m" is not consistent, so use $cputype instead. 386
- # is converted to i386 for consistency with other x86
- # operating systems.
- if test "$cputype" = "386"; then
- UNAME_MACHINE=i386
- else
- UNAME_MACHINE="$cputype"
- fi
- echo ${UNAME_MACHINE}-unknown-plan9
- exit ;;
- *:TOPS-10:*:*)
- echo pdp10-unknown-tops10
- exit ;;
- *:TENEX:*:*)
- echo pdp10-unknown-tenex
- exit ;;
- KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*)
- echo pdp10-dec-tops20
- exit ;;
- XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*)
- echo pdp10-xkl-tops20
- exit ;;
- *:TOPS-20:*:*)
- echo pdp10-unknown-tops20
- exit ;;
- *:ITS:*:*)
- echo pdp10-unknown-its
- exit ;;
- SEI:*:*:SEIUX)
- echo mips-sei-seiux${UNAME_RELEASE}
- exit ;;
- *:DragonFly:*:*)
- echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`
- exit ;;
- *:*VMS:*:*)
- UNAME_MACHINE=`(uname -p) 2>/dev/null`
- case "${UNAME_MACHINE}" in
- A*) echo alpha-dec-vms ; exit ;;
- I*) echo ia64-dec-vms ; exit ;;
- V*) echo vax-dec-vms ; exit ;;
- esac ;;
- *:XENIX:*:SysV)
- echo i386-pc-xenix
- exit ;;
- i*86:skyos:*:*)
- echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//'
- exit ;;
- i*86:rdos:*:*)
- echo ${UNAME_MACHINE}-pc-rdos
- exit ;;
-esac
-
-#echo '(No uname command or uname output not recognized.)' 1>&2
-#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2
-
-eval $set_cc_for_build
-cat >$dummy.c <<EOF
-#ifdef _SEQUENT_
-# include <sys/types.h>
-# include <sys/utsname.h>
-#endif
-main ()
-{
-#if defined (sony)
-#if defined (MIPSEB)
- /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed,
- I don't know.... */
- printf ("mips-sony-bsd\n"); exit (0);
-#else
-#include <sys/param.h>
- printf ("m68k-sony-newsos%s\n",
-#ifdef NEWSOS4
- "4"
-#else
- ""
-#endif
- ); exit (0);
-#endif
-#endif
-
-#if defined (__arm) && defined (__acorn) && defined (__unix)
- printf ("arm-acorn-riscix\n"); exit (0);
-#endif
-
-#if defined (hp300) && !defined (hpux)
- printf ("m68k-hp-bsd\n"); exit (0);
-#endif
-
-#if defined (NeXT)
-#if !defined (__ARCHITECTURE__)
-#define __ARCHITECTURE__ "m68k"
-#endif
- int version;
- version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`;
- if (version < 4)
- printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version);
- else
- printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version);
- exit (0);
-#endif
-
-#if defined (MULTIMAX) || defined (n16)
-#if defined (UMAXV)
- printf ("ns32k-encore-sysv\n"); exit (0);
-#else
-#if defined (CMU)
- printf ("ns32k-encore-mach\n"); exit (0);
-#else
- printf ("ns32k-encore-bsd\n"); exit (0);
-#endif
-#endif
-#endif
-
-#if defined (__386BSD__)
- printf ("i386-pc-bsd\n"); exit (0);
-#endif
-
-#if defined (sequent)
-#if defined (i386)
- printf ("i386-sequent-dynix\n"); exit (0);
-#endif
-#if defined (ns32000)
- printf ("ns32k-sequent-dynix\n"); exit (0);
-#endif
-#endif
-
-#if defined (_SEQUENT_)
- struct utsname un;
-
- uname(&un);
-
- if (strncmp(un.version, "V2", 2) == 0) {
- printf ("i386-sequent-ptx2\n"); exit (0);
- }
- if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */
- printf ("i386-sequent-ptx1\n"); exit (0);
- }
- printf ("i386-sequent-ptx\n"); exit (0);
-
-#endif
-
-#if defined (vax)
-# if !defined (ultrix)
-# include <sys/param.h>
-# if defined (BSD)
-# if BSD == 43
- printf ("vax-dec-bsd4.3\n"); exit (0);
-# else
-# if BSD == 199006
- printf ("vax-dec-bsd4.3reno\n"); exit (0);
-# else
- printf ("vax-dec-bsd\n"); exit (0);
-# endif
-# endif
-# else
- printf ("vax-dec-bsd\n"); exit (0);
-# endif
-# else
- printf ("vax-dec-ultrix\n"); exit (0);
-# endif
-#endif
-
-#if defined (alliant) && defined (i860)
- printf ("i860-alliant-bsd\n"); exit (0);
-#endif
-
- exit (1);
-}
-EOF
-
-$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` &&
- { echo "$SYSTEM_NAME"; exit; }
-
-# Apollos put the system type in the environment.
-
-test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; }
-
-# Convex versions that predate uname can use getsysinfo(1)
-
-if [ -x /usr/convex/getsysinfo ]
-then
- case `getsysinfo -f cpu_type` in
- c1*)
- echo c1-convex-bsd
- exit ;;
- c2*)
- if getsysinfo -f scalar_acc
- then echo c32-convex-bsd
- else echo c2-convex-bsd
- fi
- exit ;;
- c34*)
- echo c34-convex-bsd
- exit ;;
- c38*)
- echo c38-convex-bsd
- exit ;;
- c4*)
- echo c4-convex-bsd
- exit ;;
- esac
-fi
-
-cat >&2 <<EOF
-$0: unable to guess system type
-
-This script, last modified $timestamp, has failed to recognize
-the operating system you are using. It is advised that you
-download the most up to date version of the config scripts from
-
- http://savannah.gnu.org/cgi-bin/viewcvs/*checkout*/config/config/config.guess
-and
- http://savannah.gnu.org/cgi-bin/viewcvs/*checkout*/config/config/config.sub
-
-If the version you run ($0) is already up to date, please
-send the following data and any information you think might be
-pertinent to <[email protected]> in order to provide the needed
-information to handle your system.
-
-config.guess timestamp = $timestamp
-
-uname -m = `(uname -m) 2>/dev/null || echo unknown`
-uname -r = `(uname -r) 2>/dev/null || echo unknown`
-uname -s = `(uname -s) 2>/dev/null || echo unknown`
-uname -v = `(uname -v) 2>/dev/null || echo unknown`
-
-/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null`
-/bin/uname -X = `(/bin/uname -X) 2>/dev/null`
-
-hostinfo = `(hostinfo) 2>/dev/null`
-/bin/universe = `(/bin/universe) 2>/dev/null`
-/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null`
-/bin/arch = `(/bin/arch) 2>/dev/null`
-/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null`
-/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null`
-
-UNAME_MACHINE = ${UNAME_MACHINE}
-UNAME_RELEASE = ${UNAME_RELEASE}
-UNAME_SYSTEM = ${UNAME_SYSTEM}
-UNAME_VERSION = ${UNAME_VERSION}
-EOF
-
-exit 1
-
-# Local variables:
-# eval: (add-hook 'write-file-hooks 'time-stamp)
-# time-stamp-start: "timestamp='"
-# time-stamp-format: "%:y-%02m-%02d"
-# time-stamp-end: "'"
-# End:
diff --git a/lib/diameter/autoconf/config.sub b/lib/diameter/autoconf/config.sub
deleted file mode 100755
index f43233b104..0000000000
--- a/lib/diameter/autoconf/config.sub
+++ /dev/null
@@ -1,1630 +0,0 @@
-#! /bin/sh
-# Configuration validation subroutine script.
-# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999,
-# 2000, 2001, 2002, 2003, 2004, 2005, 2006 Free Software Foundation,
-# Inc.
-
-timestamp='2007-04-29'
-
-# This file is (in principle) common to ALL GNU software.
-# The presence of a machine in this file suggests that SOME GNU software
-# can handle that machine. It does not imply ALL GNU software can.
-#
-# This file is free software; you can redistribute it and/or modify
-# it under the terms of the GNU General Public License as published by
-# the Free Software Foundation; either version 2 of the License, or
-# (at your option) any later version.
-#
-# This program is distributed in the hope that it will be useful,
-# but WITHOUT ANY WARRANTY; without even the implied warranty of
-# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-# GNU General Public License for more details.
-#
-# You should have received a copy of the GNU General Public License
-# along with this program; if not, write to the Free Software
-# Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA
-# 02110-1301, USA.
-#
-# As a special exception to the GNU General Public License, if you
-# distribute this file as part of a program that contains a
-# configuration script generated by Autoconf, you may include it under
-# the same distribution terms that you use for the rest of that program.
-
-
-# Please send patches to <[email protected]>. Submit a context
-# diff and a properly formatted ChangeLog entry.
-#
-# Configuration subroutine to validate and canonicalize a configuration type.
-# Supply the specified configuration type as an argument.
-# If it is invalid, we print an error message on stderr and exit with code 1.
-# Otherwise, we print the canonical config type on stdout and succeed.
-
-# This file is supposed to be the same for all GNU packages
-# and recognize all the CPU types, system types and aliases
-# that are meaningful with *any* GNU software.
-# Each package is responsible for reporting which valid configurations
-# it does not support. The user should be able to distinguish
-# a failure to support a valid configuration from a meaningless
-# configuration.
-
-# The goal of this file is to map all the various variations of a given
-# machine specification into a single specification in the form:
-# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM
-# or in some cases, the newer four-part form:
-# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM
-# It is wrong to echo any other type of specification.
-
-me=`echo "$0" | sed -e 's,.*/,,'`
-
-usage="\
-Usage: $0 [OPTION] CPU-MFR-OPSYS
- $0 [OPTION] ALIAS
-
-Canonicalize a configuration name.
-
-Operation modes:
- -h, --help print this help, then exit
- -t, --time-stamp print date of last modification, then exit
- -v, --version print version number, then exit
-
-Report bugs and patches to <[email protected]>."
-
-version="\
-GNU config.sub ($timestamp)
-
-Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, 2005
-Free Software Foundation, Inc.
-
-This is free software; see the source for copying conditions. There is NO
-warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE."
-
-help="
-Try \`$me --help' for more information."
-
-# Parse command line
-while test $# -gt 0 ; do
- case $1 in
- --time-stamp | --time* | -t )
- echo "$timestamp" ; exit ;;
- --version | -v )
- echo "$version" ; exit ;;
- --help | --h* | -h )
- echo "$usage"; exit ;;
- -- ) # Stop option processing
- shift; break ;;
- - ) # Use stdin as input.
- break ;;
- -* )
- echo "$me: invalid option $1$help"
- exit 1 ;;
-
- *local*)
- # First pass through any local machine types.
- echo $1
- exit ;;
-
- * )
- break ;;
- esac
-done
-
-case $# in
- 0) echo "$me: missing argument$help" >&2
- exit 1;;
- 1) ;;
- *) echo "$me: too many arguments$help" >&2
- exit 1;;
-esac
-
-# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any).
-# Here we must recognize all the valid KERNEL-OS combinations.
-maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'`
-case $maybe_os in
- nto-qnx* | linux-gnu* | linux-dietlibc | linux-newlib* | linux-uclibc* | \
- uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | knetbsd*-gnu* | netbsd*-gnu* | \
- storm-chaos* | os2-emx* | rtmk-nova*)
- os=-$maybe_os
- basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`
- ;;
- *)
- basic_machine=`echo $1 | sed 's/-[^-]*$//'`
- if [ $basic_machine != $1 ]
- then os=`echo $1 | sed 's/.*-/-/'`
- else os=; fi
- ;;
-esac
-
-### Let's recognize common machines as not being operating systems so
-### that things like config.sub decstation-3100 work. We also
-### recognize some manufacturers as not being operating systems, so we
-### can provide default operating systems below.
-case $os in
- -sun*os*)
- # Prevent following clause from handling this invalid input.
- ;;
- -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \
- -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \
- -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \
- -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\
- -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \
- -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \
- -apple | -axis | -knuth | -cray)
- os=
- basic_machine=$1
- ;;
- -sim | -cisco | -oki | -wec | -winbond)
- os=
- basic_machine=$1
- ;;
- -scout)
- ;;
- -wrs)
- os=-vxworks
- basic_machine=$1
- ;;
- -chorusos*)
- os=-chorusos
- basic_machine=$1
- ;;
- -chorusrdb)
- os=-chorusrdb
- basic_machine=$1
- ;;
- -hiux*)
- os=-hiuxwe2
- ;;
- -sco6)
- os=-sco5v6
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco5)
- os=-sco3.2v5
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco4)
- os=-sco3.2v4
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco3.2.[4-9]*)
- os=`echo $os | sed -e 's/sco3.2./sco3.2v/'`
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco3.2v[4-9]*)
- # Don't forget version if it is 3.2v4 or newer.
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco5v6*)
- # Don't forget version if it is 3.2v4 or newer.
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -sco*)
- os=-sco3.2v2
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -udk*)
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -isc)
- os=-isc2.2
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -clix*)
- basic_machine=clipper-intergraph
- ;;
- -isc*)
- basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'`
- ;;
- -lynx*)
- os=-lynxos
- ;;
- -ptx*)
- basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'`
- ;;
- -windowsnt*)
- os=`echo $os | sed -e 's/windowsnt/winnt/'`
- ;;
- -psos*)
- os=-psos
- ;;
- -mint | -mint[0-9]*)
- basic_machine=m68k-atari
- os=-mint
- ;;
-esac
-
-# Decode aliases for certain CPU-COMPANY combinations.
-case $basic_machine in
- # Recognize the basic CPU types without company name.
- # Some are omitted here because they have special meanings below.
- 1750a | 580 \
- | a29k \
- | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \
- | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \
- | am33_2.0 \
- | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \
- | bfin \
- | c4x | clipper \
- | d10v | d30v | dlx | dsp16xx \
- | fido | fr30 | frv \
- | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \
- | i370 | i860 | i960 | ia64 \
- | ip2k | iq2000 \
- | m32c | m32r | m32rle | m68000 | m68k | m88k \
- | maxq | mb | microblaze | mcore | mep \
- | mips | mipsbe | mipseb | mipsel | mipsle \
- | mips16 \
- | mips64 | mips64el \
- | mips64vr | mips64vrel \
- | mips64orion | mips64orionel \
- | mips64vr4100 | mips64vr4100el \
- | mips64vr4300 | mips64vr4300el \
- | mips64vr5000 | mips64vr5000el \
- | mips64vr5900 | mips64vr5900el \
- | mipsisa32 | mipsisa32el \
- | mipsisa32r2 | mipsisa32r2el \
- | mipsisa64 | mipsisa64el \
- | mipsisa64r2 | mipsisa64r2el \
- | mipsisa64sb1 | mipsisa64sb1el \
- | mipsisa64sr71k | mipsisa64sr71kel \
- | mipstx39 | mipstx39el \
- | mn10200 | mn10300 \
- | mt \
- | msp430 \
- | nios | nios2 \
- | ns16k | ns32k \
- | or32 \
- | pdp10 | pdp11 | pj | pjl \
- | powerpc | powerpc64 | powerpc64le | powerpcle | ppcbe \
- | pyramid \
- | score \
- | sh | sh[1234] | sh[24]a | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \
- | sh64 | sh64le \
- | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \
- | sparcv8 | sparcv9 | sparcv9b | sparcv9v \
- | spu | strongarm \
- | tahoe | thumb | tic4x | tic80 | tron \
- | v850 | v850e \
- | we32k \
- | x86 | xc16x | xscale | xscalee[bl] | xstormy16 | xtensa \
- | z8k)
- basic_machine=$basic_machine-unknown
- ;;
- m6811 | m68hc11 | m6812 | m68hc12)
- # Motorola 68HC11/12.
- basic_machine=$basic_machine-unknown
- os=-none
- ;;
- m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k)
- ;;
- ms1)
- basic_machine=mt-unknown
- ;;
-
- # We use `pc' rather than `unknown'
- # because (1) that's what they normally are, and
- # (2) the word "unknown" tends to confuse beginning users.
- i*86 | x86_64)
- basic_machine=$basic_machine-pc
- ;;
- # Object if more than one company name word.
- *-*-*)
- echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
- exit 1
- ;;
- # Recognize the basic CPU types with company name.
- 580-* \
- | a29k-* \
- | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \
- | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \
- | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \
- | arm-* | armbe-* | armle-* | armeb-* | armv*-* \
- | avr-* | avr32-* \
- | bfin-* | bs2000-* \
- | c[123]* | c30-* | [cjt]90-* | c4x-* | c54x-* | c55x-* | c6x-* \
- | clipper-* | craynv-* | cydra-* \
- | d10v-* | d30v-* | dlx-* \
- | elxsi-* \
- | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \
- | h8300-* | h8500-* \
- | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \
- | i*86-* | i860-* | i960-* | ia64-* \
- | ip2k-* | iq2000-* \
- | m32c-* | m32r-* | m32rle-* \
- | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \
- | m88110-* | m88k-* | maxq-* | mcore-* \
- | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \
- | mips16-* \
- | mips64-* | mips64el-* \
- | mips64vr-* | mips64vrel-* \
- | mips64orion-* | mips64orionel-* \
- | mips64vr4100-* | mips64vr4100el-* \
- | mips64vr4300-* | mips64vr4300el-* \
- | mips64vr5000-* | mips64vr5000el-* \
- | mips64vr5900-* | mips64vr5900el-* \
- | mipsisa32-* | mipsisa32el-* \
- | mipsisa32r2-* | mipsisa32r2el-* \
- | mipsisa64-* | mipsisa64el-* \
- | mipsisa64r2-* | mipsisa64r2el-* \
- | mipsisa64sb1-* | mipsisa64sb1el-* \
- | mipsisa64sr71k-* | mipsisa64sr71kel-* \
- | mipstx39-* | mipstx39el-* \
- | mmix-* \
- | mt-* \
- | msp430-* \
- | nios-* | nios2-* \
- | none-* | np1-* | ns16k-* | ns32k-* \
- | orion-* \
- | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \
- | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* | ppcbe-* \
- | pyramid-* \
- | romp-* | rs6000-* \
- | sh-* | sh[1234]-* | sh[24]a-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \
- | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \
- | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \
- | sparclite-* \
- | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | strongarm-* | sv1-* | sx?-* \
- | tahoe-* | thumb-* \
- | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \
- | tron-* \
- | v850-* | v850e-* | vax-* \
- | we32k-* \
- | x86-* | x86_64-* | xc16x-* | xps100-* | xscale-* | xscalee[bl]-* \
- | xstormy16-* | xtensa-* \
- | ymp-* \
- | z8k-*)
- ;;
- # Recognize the various machine names and aliases which stand
- # for a CPU type and a company and sometimes even an OS.
- 386bsd)
- basic_machine=i386-unknown
- os=-bsd
- ;;
- 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc)
- basic_machine=m68000-att
- ;;
- 3b*)
- basic_machine=we32k-att
- ;;
- a29khif)
- basic_machine=a29k-amd
- os=-udi
- ;;
- abacus)
- basic_machine=abacus-unknown
- ;;
- adobe68k)
- basic_machine=m68010-adobe
- os=-scout
- ;;
- alliant | fx80)
- basic_machine=fx80-alliant
- ;;
- altos | altos3068)
- basic_machine=m68k-altos
- ;;
- am29k)
- basic_machine=a29k-none
- os=-bsd
- ;;
- amd64)
- basic_machine=x86_64-pc
- ;;
- amd64-*)
- basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- amdahl)
- basic_machine=580-amdahl
- os=-sysv
- ;;
- amiga | amiga-*)
- basic_machine=m68k-unknown
- ;;
- amigaos | amigados)
- basic_machine=m68k-unknown
- os=-amigaos
- ;;
- amigaunix | amix)
- basic_machine=m68k-unknown
- os=-sysv4
- ;;
- apollo68)
- basic_machine=m68k-apollo
- os=-sysv
- ;;
- apollo68bsd)
- basic_machine=m68k-apollo
- os=-bsd
- ;;
- aux)
- basic_machine=m68k-apple
- os=-aux
- ;;
- balance)
- basic_machine=ns32k-sequent
- os=-dynix
- ;;
- c90)
- basic_machine=c90-cray
- os=-unicos
- ;;
- convex-c1)
- basic_machine=c1-convex
- os=-bsd
- ;;
- convex-c2)
- basic_machine=c2-convex
- os=-bsd
- ;;
- convex-c32)
- basic_machine=c32-convex
- os=-bsd
- ;;
- convex-c34)
- basic_machine=c34-convex
- os=-bsd
- ;;
- convex-c38)
- basic_machine=c38-convex
- os=-bsd
- ;;
- cray | j90)
- basic_machine=j90-cray
- os=-unicos
- ;;
- craynv)
- basic_machine=craynv-cray
- os=-unicosmp
- ;;
- cr16c)
- basic_machine=cr16c-unknown
- os=-elf
- ;;
- crds | unos)
- basic_machine=m68k-crds
- ;;
- crisv32 | crisv32-* | etraxfs*)
- basic_machine=crisv32-axis
- ;;
- cris | cris-* | etrax*)
- basic_machine=cris-axis
- ;;
- crx)
- basic_machine=crx-unknown
- os=-elf
- ;;
- da30 | da30-*)
- basic_machine=m68k-da30
- ;;
- decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn)
- basic_machine=mips-dec
- ;;
- decsystem10* | dec10*)
- basic_machine=pdp10-dec
- os=-tops10
- ;;
- decsystem20* | dec20*)
- basic_machine=pdp10-dec
- os=-tops20
- ;;
- delta | 3300 | motorola-3300 | motorola-delta \
- | 3300-motorola | delta-motorola)
- basic_machine=m68k-motorola
- ;;
- delta88)
- basic_machine=m88k-motorola
- os=-sysv3
- ;;
- djgpp)
- basic_machine=i586-pc
- os=-msdosdjgpp
- ;;
- dpx20 | dpx20-*)
- basic_machine=rs6000-bull
- os=-bosx
- ;;
- dpx2* | dpx2*-bull)
- basic_machine=m68k-bull
- os=-sysv3
- ;;
- ebmon29k)
- basic_machine=a29k-amd
- os=-ebmon
- ;;
- elxsi)
- basic_machine=elxsi-elxsi
- os=-bsd
- ;;
- encore | umax | mmax)
- basic_machine=ns32k-encore
- ;;
- es1800 | OSE68k | ose68k | ose | OSE)
- basic_machine=m68k-ericsson
- os=-ose
- ;;
- fx2800)
- basic_machine=i860-alliant
- ;;
- genix)
- basic_machine=ns32k-ns
- ;;
- gmicro)
- basic_machine=tron-gmicro
- os=-sysv
- ;;
- go32)
- basic_machine=i386-pc
- os=-go32
- ;;
- h3050r* | hiux*)
- basic_machine=hppa1.1-hitachi
- os=-hiuxwe2
- ;;
- h8300hms)
- basic_machine=h8300-hitachi
- os=-hms
- ;;
- h8300xray)
- basic_machine=h8300-hitachi
- os=-xray
- ;;
- h8500hms)
- basic_machine=h8500-hitachi
- os=-hms
- ;;
- harris)
- basic_machine=m88k-harris
- os=-sysv3
- ;;
- hp300-*)
- basic_machine=m68k-hp
- ;;
- hp300bsd)
- basic_machine=m68k-hp
- os=-bsd
- ;;
- hp300hpux)
- basic_machine=m68k-hp
- os=-hpux
- ;;
- hp3k9[0-9][0-9] | hp9[0-9][0-9])
- basic_machine=hppa1.0-hp
- ;;
- hp9k2[0-9][0-9] | hp9k31[0-9])
- basic_machine=m68000-hp
- ;;
- hp9k3[2-9][0-9])
- basic_machine=m68k-hp
- ;;
- hp9k6[0-9][0-9] | hp6[0-9][0-9])
- basic_machine=hppa1.0-hp
- ;;
- hp9k7[0-79][0-9] | hp7[0-79][0-9])
- basic_machine=hppa1.1-hp
- ;;
- hp9k78[0-9] | hp78[0-9])
- # FIXME: really hppa2.0-hp
- basic_machine=hppa1.1-hp
- ;;
- hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893)
- # FIXME: really hppa2.0-hp
- basic_machine=hppa1.1-hp
- ;;
- hp9k8[0-9][13679] | hp8[0-9][13679])
- basic_machine=hppa1.1-hp
- ;;
- hp9k8[0-9][0-9] | hp8[0-9][0-9])
- basic_machine=hppa1.0-hp
- ;;
- hppa-next)
- os=-nextstep3
- ;;
- hppaosf)
- basic_machine=hppa1.1-hp
- os=-osf
- ;;
- hppro)
- basic_machine=hppa1.1-hp
- os=-proelf
- ;;
- i370-ibm* | ibm*)
- basic_machine=i370-ibm
- ;;
-# I'm not sure what "Sysv32" means. Should this be sysv3.2?
- i*86v32)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-sysv32
- ;;
- i*86v4*)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-sysv4
- ;;
- i*86v)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-sysv
- ;;
- i*86sol2)
- basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'`
- os=-solaris2
- ;;
- i386mach)
- basic_machine=i386-mach
- os=-mach
- ;;
- i386-vsta | vsta)
- basic_machine=i386-unknown
- os=-vsta
- ;;
- iris | iris4d)
- basic_machine=mips-sgi
- case $os in
- -irix*)
- ;;
- *)
- os=-irix4
- ;;
- esac
- ;;
- isi68 | isi)
- basic_machine=m68k-isi
- os=-sysv
- ;;
- m88k-omron*)
- basic_machine=m88k-omron
- ;;
- magnum | m3230)
- basic_machine=mips-mips
- os=-sysv
- ;;
- merlin)
- basic_machine=ns32k-utek
- os=-sysv
- ;;
- mingw32)
- basic_machine=i386-pc
- os=-mingw32
- ;;
- mingw32ce)
- basic_machine=arm-unknown
- os=-mingw32ce
- ;;
- miniframe)
- basic_machine=m68000-convergent
- ;;
- *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*)
- basic_machine=m68k-atari
- os=-mint
- ;;
- mips3*-*)
- basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`
- ;;
- mips3*)
- basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown
- ;;
- monitor)
- basic_machine=m68k-rom68k
- os=-coff
- ;;
- morphos)
- basic_machine=powerpc-unknown
- os=-morphos
- ;;
- msdos)
- basic_machine=i386-pc
- os=-msdos
- ;;
- ms1-*)
- basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'`
- ;;
- mvs)
- basic_machine=i370-ibm
- os=-mvs
- ;;
- ncr3000)
- basic_machine=i486-ncr
- os=-sysv4
- ;;
- netbsd386)
- basic_machine=i386-unknown
- os=-netbsd
- ;;
- netwinder)
- basic_machine=armv4l-rebel
- os=-linux
- ;;
- news | news700 | news800 | news900)
- basic_machine=m68k-sony
- os=-newsos
- ;;
- news1000)
- basic_machine=m68030-sony
- os=-newsos
- ;;
- news-3600 | risc-news)
- basic_machine=mips-sony
- os=-newsos
- ;;
- necv70)
- basic_machine=v70-nec
- os=-sysv
- ;;
- next | m*-next )
- basic_machine=m68k-next
- case $os in
- -nextstep* )
- ;;
- -ns2*)
- os=-nextstep2
- ;;
- *)
- os=-nextstep3
- ;;
- esac
- ;;
- nh3000)
- basic_machine=m68k-harris
- os=-cxux
- ;;
- nh[45]000)
- basic_machine=m88k-harris
- os=-cxux
- ;;
- nindy960)
- basic_machine=i960-intel
- os=-nindy
- ;;
- mon960)
- basic_machine=i960-intel
- os=-mon960
- ;;
- nonstopux)
- basic_machine=mips-compaq
- os=-nonstopux
- ;;
- np1)
- basic_machine=np1-gould
- ;;
- nsr-tandem)
- basic_machine=nsr-tandem
- ;;
- op50n-* | op60c-*)
- basic_machine=hppa1.1-oki
- os=-proelf
- ;;
- openrisc | openrisc-*)
- basic_machine=or32-unknown
- ;;
- os400)
- basic_machine=powerpc-ibm
- os=-os400
- ;;
- OSE68000 | ose68000)
- basic_machine=m68000-ericsson
- os=-ose
- ;;
- os68k)
- basic_machine=m68k-none
- os=-os68k
- ;;
- pa-hitachi)
- basic_machine=hppa1.1-hitachi
- os=-hiuxwe2
- ;;
- paragon)
- basic_machine=i860-intel
- os=-osf
- ;;
- pbd)
- basic_machine=sparc-tti
- ;;
- pbb)
- basic_machine=m68k-tti
- ;;
- pc532 | pc532-*)
- basic_machine=ns32k-pc532
- ;;
- pc98)
- basic_machine=i386-pc
- ;;
- pc98-*)
- basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentium | p5 | k5 | k6 | nexgen | viac3)
- basic_machine=i586-pc
- ;;
- pentiumpro | p6 | 6x86 | athlon | athlon_*)
- basic_machine=i686-pc
- ;;
- pentiumii | pentium2 | pentiumiii | pentium3)
- basic_machine=i686-pc
- ;;
- pentium4)
- basic_machine=i786-pc
- ;;
- pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*)
- basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentiumpro-* | p6-* | 6x86-* | athlon-*)
- basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*)
- basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pentium4-*)
- basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- pn)
- basic_machine=pn-gould
- ;;
- power) basic_machine=power-ibm
- ;;
- ppc) basic_machine=powerpc-unknown
- ;;
- ppc-*) basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- ppcle | powerpclittle | ppc-le | powerpc-little)
- basic_machine=powerpcle-unknown
- ;;
- ppcle-* | powerpclittle-*)
- basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- ppc64) basic_machine=powerpc64-unknown
- ;;
- ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- ppc64le | powerpc64little | ppc64-le | powerpc64-little)
- basic_machine=powerpc64le-unknown
- ;;
- ppc64le-* | powerpc64little-*)
- basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'`
- ;;
- ps2)
- basic_machine=i386-ibm
- ;;
- pw32)
- basic_machine=i586-unknown
- os=-pw32
- ;;
- rdos)
- basic_machine=i386-pc
- os=-rdos
- ;;
- rom68k)
- basic_machine=m68k-rom68k
- os=-coff
- ;;
- rm[46]00)
- basic_machine=mips-siemens
- ;;
- rtpc | rtpc-*)
- basic_machine=romp-ibm
- ;;
- s390 | s390-*)
- basic_machine=s390-ibm
- ;;
- s390x | s390x-*)
- basic_machine=s390x-ibm
- ;;
- sa29200)
- basic_machine=a29k-amd
- os=-udi
- ;;
- sb1)
- basic_machine=mipsisa64sb1-unknown
- ;;
- sb1el)
- basic_machine=mipsisa64sb1el-unknown
- ;;
- sde)
- basic_machine=mipsisa32-sde
- os=-elf
- ;;
- sei)
- basic_machine=mips-sei
- os=-seiux
- ;;
- sequent)
- basic_machine=i386-sequent
- ;;
- sh)
- basic_machine=sh-hitachi
- os=-hms
- ;;
- sh5el)
- basic_machine=sh5le-unknown
- ;;
- sh64)
- basic_machine=sh64-unknown
- ;;
- sparclite-wrs | simso-wrs)
- basic_machine=sparclite-wrs
- os=-vxworks
- ;;
- sps7)
- basic_machine=m68k-bull
- os=-sysv2
- ;;
- spur)
- basic_machine=spur-unknown
- ;;
- st2000)
- basic_machine=m68k-tandem
- ;;
- stratus)
- basic_machine=i860-stratus
- os=-sysv4
- ;;
- sun2)
- basic_machine=m68000-sun
- ;;
- sun2os3)
- basic_machine=m68000-sun
- os=-sunos3
- ;;
- sun2os4)
- basic_machine=m68000-sun
- os=-sunos4
- ;;
- sun3os3)
- basic_machine=m68k-sun
- os=-sunos3
- ;;
- sun3os4)
- basic_machine=m68k-sun
- os=-sunos4
- ;;
- sun4os3)
- basic_machine=sparc-sun
- os=-sunos3
- ;;
- sun4os4)
- basic_machine=sparc-sun
- os=-sunos4
- ;;
- sun4sol2)
- basic_machine=sparc-sun
- os=-solaris2
- ;;
- sun3 | sun3-*)
- basic_machine=m68k-sun
- ;;
- sun4)
- basic_machine=sparc-sun
- ;;
- sun386 | sun386i | roadrunner)
- basic_machine=i386-sun
- ;;
- sv1)
- basic_machine=sv1-cray
- os=-unicos
- ;;
- symmetry)
- basic_machine=i386-sequent
- os=-dynix
- ;;
- t3e)
- basic_machine=alphaev5-cray
- os=-unicos
- ;;
- t90)
- basic_machine=t90-cray
- os=-unicos
- ;;
- tic54x | c54x*)
- basic_machine=tic54x-unknown
- os=-coff
- ;;
- tic55x | c55x*)
- basic_machine=tic55x-unknown
- os=-coff
- ;;
- tic6x | c6x*)
- basic_machine=tic6x-unknown
- os=-coff
- ;;
- tx39)
- basic_machine=mipstx39-unknown
- ;;
- tx39el)
- basic_machine=mipstx39el-unknown
- ;;
- tile*)
- basic_machine=tile-tilera
- os=-linux-gnu
- ;;
- toad1)
- basic_machine=pdp10-xkl
- os=-tops20
- ;;
- tower | tower-32)
- basic_machine=m68k-ncr
- ;;
- tpf)
- basic_machine=s390x-ibm
- os=-tpf
- ;;
- udi29k)
- basic_machine=a29k-amd
- os=-udi
- ;;
- ultra3)
- basic_machine=a29k-nyu
- os=-sym1
- ;;
- v810 | necv810)
- basic_machine=v810-nec
- os=-none
- ;;
- vaxv)
- basic_machine=vax-dec
- os=-sysv
- ;;
- vms)
- basic_machine=vax-dec
- os=-vms
- ;;
- vpp*|vx|vx-*)
- basic_machine=f301-fujitsu
- ;;
- vxworks960)
- basic_machine=i960-wrs
- os=-vxworks
- ;;
- vxworks68)
- basic_machine=m68k-wrs
- os=-vxworks
- ;;
- vxworks29k)
- basic_machine=a29k-wrs
- os=-vxworks
- ;;
- w65*)
- basic_machine=w65-wdc
- os=-none
- ;;
- w89k-*)
- basic_machine=hppa1.1-winbond
- os=-proelf
- ;;
- xbox)
- basic_machine=i686-pc
- os=-mingw32
- ;;
- xps | xps100)
- basic_machine=xps100-honeywell
- ;;
- ymp)
- basic_machine=ymp-cray
- os=-unicos
- ;;
- z8k-*-coff)
- basic_machine=z8k-unknown
- os=-sim
- ;;
- none)
- basic_machine=none-none
- os=-none
- ;;
-
-# Here we handle the default manufacturer of certain CPU types. It is in
-# some cases the only manufacturer, in others, it is the most popular.
- w89k)
- basic_machine=hppa1.1-winbond
- ;;
- op50n)
- basic_machine=hppa1.1-oki
- ;;
- op60c)
- basic_machine=hppa1.1-oki
- ;;
- romp)
- basic_machine=romp-ibm
- ;;
- mmix)
- basic_machine=mmix-knuth
- ;;
- rs6000)
- basic_machine=rs6000-ibm
- ;;
- vax)
- basic_machine=vax-dec
- ;;
- pdp10)
- # there are many clones, so DEC is not a safe bet
- basic_machine=pdp10-unknown
- ;;
- pdp11)
- basic_machine=pdp11-dec
- ;;
- we32k)
- basic_machine=we32k-att
- ;;
- sh[1234] | sh[24]a | sh[34]eb | sh[1234]le | sh[23]ele)
- basic_machine=sh-unknown
- ;;
- sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v)
- basic_machine=sparc-sun
- ;;
- cydra)
- basic_machine=cydra-cydrome
- ;;
- orion)
- basic_machine=orion-highlevel
- ;;
- orion105)
- basic_machine=clipper-highlevel
- ;;
- mac | mpw | mac-mpw)
- basic_machine=m68k-apple
- ;;
- pmac | pmac-mpw)
- basic_machine=powerpc-apple
- ;;
- *-unknown)
- # Make sure to match an already-canonicalized machine name.
- ;;
- *)
- echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2
- exit 1
- ;;
-esac
-
-# Here we canonicalize certain aliases for manufacturers.
-case $basic_machine in
- *-digital*)
- basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'`
- ;;
- *-commodore*)
- basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'`
- ;;
- *)
- ;;
-esac
-
-# Decode manufacturer-specific aliases for certain operating systems.
-
-if [ x"$os" != x"" ]
-then
-case $os in
- # First match some system type aliases
- # that might get confused with valid system types.
- # -solaris* is a basic system type, with this one exception.
- -solaris1 | -solaris1.*)
- os=`echo $os | sed -e 's|solaris1|sunos4|'`
- ;;
- -solaris)
- os=-solaris2
- ;;
- -svr4*)
- os=-sysv4
- ;;
- -unixware*)
- os=-sysv4.2uw
- ;;
- -gnu/linux*)
- os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'`
- ;;
- # First accept the basic system types.
- # The portable systems comes first.
- # Each alternative MUST END IN A *, to match a version number.
- # -sysv* is not here because it comes later, after sysvr4.
- -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \
- | -*vms* | -sco* | -esix* | -isc* | -aix* | -sunos | -sunos[34]*\
- | -hpux* | -unos* | -osf* | -luna* | -dgux* | -solaris* | -sym* \
- | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \
- | -aos* \
- | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \
- | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \
- | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \
- | -openbsd* | -solidbsd* \
- | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \
- | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \
- | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \
- | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \
- | -chorusos* | -chorusrdb* \
- | -cygwin* | -pe* | -psos* | -moss* | -proelf* | -rtems* \
- | -mingw32* | -linux-gnu* | -linux-newlib* | -linux-uclibc* \
- | -uxpv* | -beos* | -mpeix* | -udk* \
- | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \
- | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \
- | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \
- | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \
- | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \
- | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \
- | -skyos* | -haiku* | -rdos* | -toppers* | -drops*)
- # Remember, each alternative MUST END IN *, to match a version number.
- ;;
- -qnx*)
- case $basic_machine in
- x86-* | i*86-*)
- ;;
- *)
- os=-nto$os
- ;;
- esac
- ;;
- -nto-qnx*)
- ;;
- -nto*)
- os=`echo $os | sed -e 's|nto|nto-qnx|'`
- ;;
- -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \
- | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \
- | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*)
- ;;
- -mac*)
- os=`echo $os | sed -e 's|mac|macos|'`
- ;;
- -linux-dietlibc)
- os=-linux-dietlibc
- ;;
- -linux*)
- os=`echo $os | sed -e 's|linux|linux-gnu|'`
- ;;
- -sunos5*)
- os=`echo $os | sed -e 's|sunos5|solaris2|'`
- ;;
- -sunos6*)
- os=`echo $os | sed -e 's|sunos6|solaris3|'`
- ;;
- -opened*)
- os=-openedition
- ;;
- -os400*)
- os=-os400
- ;;
- -wince*)
- os=-wince
- ;;
- -osfrose*)
- os=-osfrose
- ;;
- -osf*)
- os=-osf
- ;;
- -utek*)
- os=-bsd
- ;;
- -dynix*)
- os=-bsd
- ;;
- -acis*)
- os=-aos
- ;;
- -atheos*)
- os=-atheos
- ;;
- -syllable*)
- os=-syllable
- ;;
- -386bsd)
- os=-bsd
- ;;
- -ctix* | -uts*)
- os=-sysv
- ;;
- -nova*)
- os=-rtmk-nova
- ;;
- -ns2 )
- os=-nextstep2
- ;;
- -nsk*)
- os=-nsk
- ;;
- # Preserve the version number of sinix5.
- -sinix5.*)
- os=`echo $os | sed -e 's|sinix|sysv|'`
- ;;
- -sinix*)
- os=-sysv4
- ;;
- -tpf*)
- os=-tpf
- ;;
- -triton*)
- os=-sysv3
- ;;
- -oss*)
- os=-sysv3
- ;;
- -svr4)
- os=-sysv4
- ;;
- -svr3)
- os=-sysv3
- ;;
- -sysvr4)
- os=-sysv4
- ;;
- # This must come after -sysvr4.
- -sysv*)
- ;;
- -ose*)
- os=-ose
- ;;
- -es1800*)
- os=-ose
- ;;
- -xenix)
- os=-xenix
- ;;
- -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
- os=-mint
- ;;
- -aros*)
- os=-aros
- ;;
- -kaos*)
- os=-kaos
- ;;
- -zvmoe)
- os=-zvmoe
- ;;
- -none)
- ;;
- *)
- # Get rid of the `-' at the beginning of $os.
- os=`echo $os | sed 's/[^-]*-//'`
- echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2
- exit 1
- ;;
-esac
-else
-
-# Here we handle the default operating systems that come with various machines.
-# The value should be what the vendor currently ships out the door with their
-# machine or put another way, the most popular os provided with the machine.
-
-# Note that if you're going to try to match "-MANUFACTURER" here (say,
-# "-sun"), then you have to tell the case statement up towards the top
-# that MANUFACTURER isn't an operating system. Otherwise, code above
-# will signal an error saying that MANUFACTURER isn't an operating
-# system, and we'll never get to this point.
-
-case $basic_machine in
- score-*)
- os=-elf
- ;;
- spu-*)
- os=-elf
- ;;
- *-acorn)
- os=-riscix1.2
- ;;
- arm*-rebel)
- os=-linux
- ;;
- arm*-semi)
- os=-aout
- ;;
- c4x-* | tic4x-*)
- os=-coff
- ;;
- # This must come before the *-dec entry.
- pdp10-*)
- os=-tops20
- ;;
- pdp11-*)
- os=-none
- ;;
- *-dec | vax-*)
- os=-ultrix4.2
- ;;
- m68*-apollo)
- os=-domain
- ;;
- i386-sun)
- os=-sunos4.0.2
- ;;
- m68000-sun)
- os=-sunos3
- # This also exists in the configure program, but was not the
- # default.
- # os=-sunos4
- ;;
- m68*-cisco)
- os=-aout
- ;;
- mep-*)
- os=-elf
- ;;
- mips*-cisco)
- os=-elf
- ;;
- mips*-*)
- os=-elf
- ;;
- or32-*)
- os=-coff
- ;;
- *-tti) # must be before sparc entry or we get the wrong os.
- os=-sysv3
- ;;
- sparc-* | *-sun)
- os=-sunos4.1.1
- ;;
- *-be)
- os=-beos
- ;;
- *-haiku)
- os=-haiku
- ;;
- *-ibm)
- os=-aix
- ;;
- *-knuth)
- os=-mmixware
- ;;
- *-wec)
- os=-proelf
- ;;
- *-winbond)
- os=-proelf
- ;;
- *-oki)
- os=-proelf
- ;;
- *-hp)
- os=-hpux
- ;;
- *-hitachi)
- os=-hiux
- ;;
- i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent)
- os=-sysv
- ;;
- *-cbm)
- os=-amigaos
- ;;
- *-dg)
- os=-dgux
- ;;
- *-dolphin)
- os=-sysv3
- ;;
- m68k-ccur)
- os=-rtu
- ;;
- m88k-omron*)
- os=-luna
- ;;
- *-next )
- os=-nextstep
- ;;
- *-sequent)
- os=-ptx
- ;;
- *-crds)
- os=-unos
- ;;
- *-ns)
- os=-genix
- ;;
- i370-*)
- os=-mvs
- ;;
- *-next)
- os=-nextstep3
- ;;
- *-gould)
- os=-sysv
- ;;
- *-highlevel)
- os=-bsd
- ;;
- *-encore)
- os=-bsd
- ;;
- *-sgi)
- os=-irix
- ;;
- *-siemens)
- os=-sysv4
- ;;
- *-masscomp)
- os=-rtu
- ;;
- f30[01]-fujitsu | f700-fujitsu)
- os=-uxpv
- ;;
- *-rom68k)
- os=-coff
- ;;
- *-*bug)
- os=-coff
- ;;
- *-apple)
- os=-macos
- ;;
- *-atari*)
- os=-mint
- ;;
- *)
- os=-none
- ;;
-esac
-fi
-
-# Here we handle the case where we know the os, and the CPU type, but not the
-# manufacturer. We pick the logical manufacturer.
-vendor=unknown
-case $basic_machine in
- *-unknown)
- case $os in
- -riscix*)
- vendor=acorn
- ;;
- -sunos*)
- vendor=sun
- ;;
- -aix*)
- vendor=ibm
- ;;
- -beos*)
- vendor=be
- ;;
- -hpux*)
- vendor=hp
- ;;
- -mpeix*)
- vendor=hp
- ;;
- -hiux*)
- vendor=hitachi
- ;;
- -unos*)
- vendor=crds
- ;;
- -dgux*)
- vendor=dg
- ;;
- -luna*)
- vendor=omron
- ;;
- -genix*)
- vendor=ns
- ;;
- -mvs* | -opened*)
- vendor=ibm
- ;;
- -os400*)
- vendor=ibm
- ;;
- -ptx*)
- vendor=sequent
- ;;
- -tpf*)
- vendor=ibm
- ;;
- -vxsim* | -vxworks* | -windiss*)
- vendor=wrs
- ;;
- -aux*)
- vendor=apple
- ;;
- -hms*)
- vendor=hitachi
- ;;
- -mpw* | -macos*)
- vendor=apple
- ;;
- -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*)
- vendor=atari
- ;;
- -vos*)
- vendor=stratus
- ;;
- esac
- basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"`
- ;;
-esac
-
-echo $basic_machine$os
-exit
-
-# Local variables:
-# eval: (add-hook 'write-file-hooks 'time-stamp)
-# time-stamp-start: "timestamp='"
-# time-stamp-format: "%:y-%02m-%02d"
-# time-stamp-end: "'"
-# End:
diff --git a/lib/diameter/autoconf/install-sh b/lib/diameter/autoconf/install-sh
deleted file mode 100755
index a5897de6ea..0000000000
--- a/lib/diameter/autoconf/install-sh
+++ /dev/null
@@ -1,519 +0,0 @@
-#!/bin/sh
-# install - install a program, script, or datafile
-
-scriptversion=2006-12-25.00
-
-# This originates from X11R5 (mit/util/scripts/install.sh), which was
-# later released in X11R6 (xc/config/util/install.sh) with the
-# following copyright and license.
-#
-# Copyright (C) 1994 X Consortium
-#
-# Permission is hereby granted, free of charge, to any person obtaining a copy
-# of this software and associated documentation files (the "Software"), to
-# deal in the Software without restriction, including without limitation the
-# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-# sell copies of the Software, and to permit persons to whom the Software is
-# furnished to do so, subject to the following conditions:
-#
-# The above copyright notice and this permission notice shall be included in
-# all copies or substantial portions of the Software.
-#
-# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
-# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC-
-# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-#
-# Except as contained in this notice, the name of the X Consortium shall not
-# be used in advertising or otherwise to promote the sale, use or other deal-
-# ings in this Software without prior written authorization from the X Consor-
-# tium.
-#
-#
-# FSF changes to this file are in the public domain.
-#
-# Calling this script install-sh is preferred over install.sh, to prevent
-# `make' implicit rules from creating a file called install from it
-# when there is no Makefile.
-#
-# This script is compatible with the BSD install script, but was written
-# from scratch.
-
-nl='
-'
-IFS=" "" $nl"
-
-# set DOITPROG to echo to test this script
-
-# Don't use :- since 4.3BSD and earlier shells don't like it.
-doit=${DOITPROG-}
-if test -z "$doit"; then
- doit_exec=exec
-else
- doit_exec=$doit
-fi
-
-# Put in absolute file names if you don't have them in your path;
-# or use environment vars.
-
-chgrpprog=${CHGRPPROG-chgrp}
-chmodprog=${CHMODPROG-chmod}
-chownprog=${CHOWNPROG-chown}
-cmpprog=${CMPPROG-cmp}
-cpprog=${CPPROG-cp}
-mkdirprog=${MKDIRPROG-mkdir}
-mvprog=${MVPROG-mv}
-rmprog=${RMPROG-rm}
-stripprog=${STRIPPROG-strip}
-
-posix_glob='?'
-initialize_posix_glob='
- test "$posix_glob" != "?" || {
- if (set -f) 2>/dev/null; then
- posix_glob=
- else
- posix_glob=:
- fi
- }
-'
-
-posix_mkdir=
-
-# Desired mode of installed file.
-mode=0755
-
-chgrpcmd=
-chmodcmd=$chmodprog
-chowncmd=
-mvcmd=$mvprog
-rmcmd="$rmprog -f"
-stripcmd=
-
-src=
-dst=
-dir_arg=
-dst_arg=
-
-copy_on_change=false
-no_target_directory=
-
-usage="\
-Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE
- or: $0 [OPTION]... SRCFILES... DIRECTORY
- or: $0 [OPTION]... -t DIRECTORY SRCFILES...
- or: $0 [OPTION]... -d DIRECTORIES...
-
-In the 1st form, copy SRCFILE to DSTFILE.
-In the 2nd and 3rd, copy all SRCFILES to DIRECTORY.
-In the 4th, create DIRECTORIES.
-
-Options:
- --help display this help and exit.
- --version display version info and exit.
-
- -c (ignored)
- -C install only if different (preserve the last data modification time)
- -d create directories instead of installing files.
- -g GROUP $chgrpprog installed files to GROUP.
- -m MODE $chmodprog installed files to MODE.
- -o USER $chownprog installed files to USER.
- -s $stripprog installed files.
- -t DIRECTORY install into DIRECTORY.
- -T report an error if DSTFILE is a directory.
-
-Environment variables override the default commands:
- CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG
- RMPROG STRIPPROG
-"
-
-while test $# -ne 0; do
- case $1 in
- -c) ;;
-
- -C) copy_on_change=true;;
-
- -d) dir_arg=true;;
-
- -g) chgrpcmd="$chgrpprog $2"
- shift;;
-
- --help) echo "$usage"; exit $?;;
-
- -m) mode=$2
- case $mode in
- *' '* | *' '* | *'
-'* | *'*'* | *'?'* | *'['*)
- echo "$0: invalid mode: $mode" >&2
- exit 1;;
- esac
- shift;;
-
- -o) chowncmd="$chownprog $2"
- shift;;
-
- -s) stripcmd=$stripprog;;
-
- -t) dst_arg=$2
- shift;;
-
- -T) no_target_directory=true;;
-
- --version) echo "$0 $scriptversion"; exit $?;;
-
- --) shift
- break;;
-
- -*) echo "$0: invalid option: $1" >&2
- exit 1;;
-
- *) break;;
- esac
- shift
-done
-
-if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then
- # When -d is used, all remaining arguments are directories to create.
- # When -t is used, the destination is already specified.
- # Otherwise, the last argument is the destination. Remove it from $@.
- for arg
- do
- if test -n "$dst_arg"; then
- # $@ is not empty: it contains at least $arg.
- set fnord "$@" "$dst_arg"
- shift # fnord
- fi
- shift # arg
- dst_arg=$arg
- done
-fi
-
-if test $# -eq 0; then
- if test -z "$dir_arg"; then
- echo "$0: no input file specified." >&2
- exit 1
- fi
- # It's OK to call `install-sh -d' without argument.
- # This can happen when creating conditional directories.
- exit 0
-fi
-
-if test -z "$dir_arg"; then
- trap '(exit $?); exit' 1 2 13 15
-
- # Set umask so as not to create temps with too-generous modes.
- # However, 'strip' requires both read and write access to temps.
- case $mode in
- # Optimize common cases.
- *644) cp_umask=133;;
- *755) cp_umask=22;;
-
- *[0-7])
- if test -z "$stripcmd"; then
- u_plus_rw=
- else
- u_plus_rw='% 200'
- fi
- cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;;
- *)
- if test -z "$stripcmd"; then
- u_plus_rw=
- else
- u_plus_rw=,u+rw
- fi
- cp_umask=$mode$u_plus_rw;;
- esac
-fi
-
-for src
-do
- # Protect names starting with `-'.
- case $src in
- -*) src=./$src;;
- esac
-
- if test -n "$dir_arg"; then
- dst=$src
- dstdir=$dst
- test -d "$dstdir"
- dstdir_status=$?
- else
-
- # Waiting for this to be detected by the "$cpprog $src $dsttmp" command
- # might cause directories to be created, which would be especially bad
- # if $src (and thus $dsttmp) contains '*'.
- if test ! -f "$src" && test ! -d "$src"; then
- echo "$0: $src does not exist." >&2
- exit 1
- fi
-
- if test -z "$dst_arg"; then
- echo "$0: no destination specified." >&2
- exit 1
- fi
-
- dst=$dst_arg
- # Protect names starting with `-'.
- case $dst in
- -*) dst=./$dst;;
- esac
-
- # If destination is a directory, append the input filename; won't work
- # if double slashes aren't ignored.
- if test -d "$dst"; then
- if test -n "$no_target_directory"; then
- echo "$0: $dst_arg: Is a directory" >&2
- exit 1
- fi
- dstdir=$dst
- dst=$dstdir/`basename "$src"`
- dstdir_status=0
- else
- # Prefer dirname, but fall back on a substitute if dirname fails.
- dstdir=`
- (dirname "$dst") 2>/dev/null ||
- expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \
- X"$dst" : 'X\(//\)[^/]' \| \
- X"$dst" : 'X\(//\)$' \| \
- X"$dst" : 'X\(/\)' \| . 2>/dev/null ||
- echo X"$dst" |
- sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{
- s//\1/
- q
- }
- /^X\(\/\/\)[^/].*/{
- s//\1/
- q
- }
- /^X\(\/\/\)$/{
- s//\1/
- q
- }
- /^X\(\/\).*/{
- s//\1/
- q
- }
- s/.*/./; q'
- `
-
- test -d "$dstdir"
- dstdir_status=$?
- fi
- fi
-
- obsolete_mkdir_used=false
-
- if test $dstdir_status != 0; then
- case $posix_mkdir in
- '')
- # Create intermediate dirs using mode 755 as modified by the umask.
- # This is like FreeBSD 'install' as of 1997-10-28.
- umask=`umask`
- case $stripcmd.$umask in
- # Optimize common cases.
- *[2367][2367]) mkdir_umask=$umask;;
- .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;;
-
- *[0-7])
- mkdir_umask=`expr $umask + 22 \
- - $umask % 100 % 40 + $umask % 20 \
- - $umask % 10 % 4 + $umask % 2
- `;;
- *) mkdir_umask=$umask,go-w;;
- esac
-
- # With -d, create the new directory with the user-specified mode.
- # Otherwise, rely on $mkdir_umask.
- if test -n "$dir_arg"; then
- mkdir_mode=-m$mode
- else
- mkdir_mode=
- fi
-
- posix_mkdir=false
- case $umask in
- *[123567][0-7][0-7])
- # POSIX mkdir -p sets u+wx bits regardless of umask, which
- # is incompatible with FreeBSD 'install' when (umask & 300) != 0.
- ;;
- *)
- tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$
- trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0
-
- if (umask $mkdir_umask &&
- exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1
- then
- if test -z "$dir_arg" || {
- # Check for POSIX incompatibilities with -m.
- # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or
- # other-writeable bit of parent directory when it shouldn't.
- # FreeBSD 6.1 mkdir -m -p sets mode of existing directory.
- ls_ld_tmpdir=`ls -ld "$tmpdir"`
- case $ls_ld_tmpdir in
- d????-?r-*) different_mode=700;;
- d????-?--*) different_mode=755;;
- *) false;;
- esac &&
- $mkdirprog -m$different_mode -p -- "$tmpdir" && {
- ls_ld_tmpdir_1=`ls -ld "$tmpdir"`
- test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1"
- }
- }
- then posix_mkdir=:
- fi
- rmdir "$tmpdir/d" "$tmpdir"
- else
- # Remove any dirs left behind by ancient mkdir implementations.
- rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null
- fi
- trap '' 0;;
- esac;;
- esac
-
- if
- $posix_mkdir && (
- umask $mkdir_umask &&
- $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir"
- )
- then :
- else
-
- # The umask is ridiculous, or mkdir does not conform to POSIX,
- # or it failed possibly due to a race condition. Create the
- # directory the slow way, step by step, checking for races as we go.
-
- case $dstdir in
- /*) prefix='/';;
- -*) prefix='./';;
- *) prefix='';;
- esac
-
- eval "$initialize_posix_glob"
-
- oIFS=$IFS
- IFS=/
- $posix_glob set -f
- set fnord $dstdir
- shift
- $posix_glob set +f
- IFS=$oIFS
-
- prefixes=
-
- for d
- do
- test -z "$d" && continue
-
- prefix=$prefix$d
- if test -d "$prefix"; then
- prefixes=
- else
- if $posix_mkdir; then
- (umask=$mkdir_umask &&
- $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break
- # Don't fail if two instances are running concurrently.
- test -d "$prefix" || exit 1
- else
- case $prefix in
- *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;;
- *) qprefix=$prefix;;
- esac
- prefixes="$prefixes '$qprefix'"
- fi
- fi
- prefix=$prefix/
- done
-
- if test -n "$prefixes"; then
- # Don't fail if two instances are running concurrently.
- (umask $mkdir_umask &&
- eval "\$doit_exec \$mkdirprog $prefixes") ||
- test -d "$dstdir" || exit 1
- obsolete_mkdir_used=true
- fi
- fi
- fi
-
- if test -n "$dir_arg"; then
- { test -z "$chowncmd" || $doit $chowncmd "$dst"; } &&
- { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } &&
- { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false ||
- test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1
- else
-
- # Make a couple of temp file names in the proper directory.
- dsttmp=$dstdir/_inst.$$_
- rmtmp=$dstdir/_rm.$$_
-
- # Trap to clean up those temp files at exit.
- trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0
-
- # Copy the file name to the temp name.
- (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") &&
-
- # and set any options; do chmod last to preserve setuid bits.
- #
- # If any of these fail, we abort the whole thing. If we want to
- # ignore errors from any of these, just make sure not to ignore
- # errors from the above "$doit $cpprog $src $dsttmp" command.
- #
- { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } &&
- { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } &&
- { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } &&
- { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } &&
-
- # If -C, don't bother to copy if it wouldn't change the file.
- if $copy_on_change &&
- old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` &&
- new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` &&
-
- eval "$initialize_posix_glob" &&
- $posix_glob set -f &&
- set X $old && old=:$2:$4:$5:$6 &&
- set X $new && new=:$2:$4:$5:$6 &&
- $posix_glob set +f &&
-
- test "$old" = "$new" &&
- $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1
- then
- rm -f "$dsttmp"
- else
- # Rename the file to the real destination.
- $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null ||
-
- # The rename failed, perhaps because mv can't rename something else
- # to itself, or perhaps because mv is so ancient that it does not
- # support -f.
- {
- # Now remove or move aside any old file at destination location.
- # We try this two ways since rm can't unlink itself on some
- # systems and the destination file might be busy for other
- # reasons. In this case, the final cleanup might fail but the new
- # file should still install successfully.
- {
- test ! -f "$dst" ||
- $doit $rmcmd -f "$dst" 2>/dev/null ||
- { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null &&
- { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; }
- } ||
- { echo "$0: cannot unlink or rename $dst" >&2
- (exit 1); exit 1
- }
- } &&
-
- # Now rename the file to the real destination.
- $doit $mvcmd "$dsttmp" "$dst"
- }
- fi || exit 1
-
- trap '' 0
- fi
-done
-
-# Local variables:
-# eval: (add-hook 'write-file-hooks 'time-stamp)
-# time-stamp-start: "scriptversion="
-# time-stamp-format: "%:y-%02m-%02d.%02H"
-# time-stamp-end: "$"
-# End:
diff --git a/lib/diameter/configure.in b/lib/diameter/configure.in
deleted file mode 100644
index 8acfb28fed..0000000000
--- a/lib/diameter/configure.in
+++ /dev/null
@@ -1,137 +0,0 @@
-dnl Process this file with autoconf to produce a configure script.
-
-dnl %CopyrightBegin%
-dnl
-dnl Copyright Ericsson AB 1998-2011. All Rights Reserved.
-dnl
-dnl The contents of this file are subject to the Erlang Public License,
-dnl Version 1.1, (the "License"); you may not use this file except in
-dnl compliance with the License. You should have received a copy of the
-dnl Erlang Public License along with this software. If not, it can be
-dnl retrieved online at http://www.erlang.org/.
-dnl
-dnl Software distributed under the License is distributed on an "AS IS"
-dnl basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-dnl the License for the specific language governing rights and limitations
-dnl under the License.
-dnl
-dnl %CopyrightEnd%
-
-if test "x$no_recursion" != "xyes" -a "x$OVERRIDE_CONFIG_CACHE" = "x"; then
- # We do not want to use a common cache!
- cache_file=/dev/null
-fi
-
-AC_INIT(vsn.mk)
-
-dnl <STANDALONE DIAMETER>
-dnl This is needed for diameters own environment to rock,
-dnl but since we are now integrated into OTP, we skip it.
-dnl In order to build stand-alone we need atleast 2.63...
-dnl AC_PREREQ(2.63)
-dnl </STANDALONE DIAMETER>
-
-dnl LM_PRECIOUS_VARS
-
-
-dnl The OTP source tree is the default "top",
-dnl but we can also define our own top: DIAMETER_TOP
-
-if test -n "$ERL_TOP" || test -d $ERL_TOP ; then
- erl_top=${ERL_TOP}
- AC_CONFIG_AUX_DIRS($erl_top/erts/autoconf)
- DIAMETER_TOP=${ERL_TOP}/lib/diameter
-else
- AC_ARG_VAR(DIAMETER_TOP, [Diameter top source directory])
- if test -n "$DIAMETER_TOP" || test -d $DIAMETER_TOP ; then
- AC_CONFIG_AUX_DIRS(autoconf)
- fi
-
- dnl <STANDALONE DIAMETER>
- dnl AC_ERLANG_SUBST_ROOT_DIR
- dnl AC_ERLANG_SUBST_LIB_DIR
- dnl AC_ERLANG_CHECK_LIB([erl_docgen],
- dnl [echo "erl_docgen version \"$ERLANG_LIB_VER_erl_docgen\""
- dnl echo "is installed in \"$ERLANG_LIB_DIR_erl_docgen\""],
- dnl [AC_MSG_ERROR([erl_docgen was not found!])])
- dnl AC_ERLANG_CHECK_LIB([test_server],
- dnl [echo "test_server version \"$ERLANG_LIB_VER_test_server\""
- dnl echo "is installed in \"$ERLANG_LIB_DIR_test_server\""],
- dnl [AC_MSG_ERROR([test_server was not found!])])
- dnl </STANDALONE DIAMETER>
-
-fi
-
-AC_SUBST(DIAMETER_TOP)
-export DIAMETER_TOP
-
-if test "X$host" != "Xfree_source" -a "X$host" != "Xwin32"; then
- AC_CANONICAL_HOST
-fi
-
-TARGET=$host
-AC_SUBST(TARGET)
-
-if test "x$erl_top" = "x"; then
- dnl STANDALONE DIAMETER
- AC_CHECK_PROGS(XSLTPROC, xsltproc)
- if test -z "$XSLTPROC"; then
- echo "xsltproc" >> doc/CONF_INFO
- AC_MSG_WARN([No 'xsltproc' command found: the documentation can not be built])
- fi
-
- AC_CHECK_PROGS(FOP, fop)
- if test -z "$FOP"; then
- AC_MSG_ERROR([No 'fop' command found: the documentation can not be built])
- fi
-fi
-
-dnl
-dnl We can live with Solaris /usr/ucb/install
-dnl
-case $host in
- *-*-solaris*|free_source)
- if test -x /usr/ucb/install; then
- INSTALL="/usr/ucb/install -c"
- fi
- ;;
- *)
- ;;
-esac
-AC_PROG_INSTALL
-LM_PROG_INSTALL_DIR
-
-case $host_os in
- darwin*)
- dnl Need to preserve modification time on archives;
- dnl otherwise, ranlib has to be run on archives
- dnl again after installation.
- INSTALL_DATA="$INSTALL_DATA -p";;
- *)
- ;;
-esac
-
-dnl
-dnl Fix for Tilera install permissions
-dnl
-
-case $build in
- *tile*)
- INSTALL_PROGRAM="$INSTALL_PROGRAM -m755"
- INSTALL_SCRIPT="$INSTALL_SCRIPT -m755"
- ;;
- *)
- ;;
-esac
-
-
-dnl <STANDALONE DIAMETER>
-dnl AC_ERLANG_NEED_ERL([$PATH])
-dnl AC_ERLANG_NEED_ERLC([$PATH])
-dnl </STANDALONE DIAMETER>
-
-AC_OUTPUT(
- Makefile:Makefile.in
- make/$host/rules.mk:make/rules.mk.in
- )
-
diff --git a/lib/diameter/doc/.gitignore b/lib/diameter/doc/.gitignore
new file mode 100644
index 0000000000..b634bdd7ba
--- /dev/null
+++ b/lib/diameter/doc/.gitignore
@@ -0,0 +1,4 @@
+
+/html/
+/man*/
+/pdf/
diff --git a/lib/diameter/doc/src/Makefile b/lib/diameter/doc/src/Makefile
index 59ca660797..d1d5e8f869 100644
--- a/lib/diameter/doc/src/Makefile
+++ b/lib/diameter/doc/src/Makefile
@@ -16,13 +16,8 @@
#
# %CopyrightEnd%
-ifneq ($(ERL_TOP),)
include $(ERL_TOP)/make/target.mk
include $(ERL_TOP)/make/$(TARGET)/otp.mk
-else
-include $(DIAMETER_TOP)/make/target.mk
-include $(DIAMETER_TOP)/make/$(TARGET)/rules.mk
-endif
include ../../vsn.mk
@@ -155,11 +150,8 @@ info:
# ----------------------------------------------------
# Release Target
# ----------------------------------------------------
-ifneq ($(ERL_TOP),)
+
include $(ERL_TOP)/make/otp_release_targets.mk
-else
-include $(DIAMETER_TOP)/make/release_targets.mk
-endif
release_docs_spec: $(LOCAL)docs
$(INSTALL_DIR) "$(RELSYSDIR)/doc/pdf"
diff --git a/lib/diameter/doc/src/diameter.xml b/lib/diameter/doc/src/diameter.xml
index b8652a7482..80863f8eff 100644
--- a/lib/diameter/doc/src/diameter.xml
+++ b/lib/diameter/doc/src/diameter.xml
@@ -375,10 +375,15 @@ is meant in the sense of <c>eval([E|A])</c>.</p>
<warning>
<p>
-Beware of using fun expressions of the form <c>fun Name/Arity</c> (not
-fun Mod:Name/Arity) in situations in which the fun is not short-lived
+Beware of using fun expressions of the form <c>fun Name/Arity</c> in
+situations in which the fun is not short-lived
and code is to be upgraded at runtime since any processes retaining
-such a fun will have a reference to old code.</p>
+such a fun will have a reference to old code.
+In particular, such a value is typically inappropriate in
+configuration passed to <seealso
+marker="#start_service">start_service/2</seealso> or
+<seealso
+marker="#add_transport">add_transport/2</seealso>.</p>
</warning>
<marker id="peer_filter"/>
@@ -699,6 +704,64 @@ the application's <seealso marker="diameter_dict">dictionary</seealso>
file.</p>
</item>
+<tag><c>{restrict_connections, false
+ | node
+ | nodes
+ | [node()]
+ | diameter:evaluable()}</c></tag>
+<item>
+<p>
+Specifies the degree to which multiple transport connections to the
+same peer are accepted by the service.</p>
+
+<p>
+If type <c>[node()]</c> then a connection is rejected if another already
+exists on any of the specified nodes.
+Values of type <c>false</c>, <c>node</c>, <c>nodes</c> or
+<c>diameter:evaluable()</c> are equivalent to values <c>[]</c>,
+<c>[node()]</c>, <c>[node()|nodes()]</c> and the evaluated value,
+respectively, evaluation of each expression taking place whenever a
+new connection is to be established.
+Note that <c>false</c> allows an unlimited number of connections to be
+established with the same peer.</p>
+
+<p>
+Multiple connections are independent and governed
+by their own peer and watchdog state machines.</p>
+
+<p>
+Defaults to <c>nodes</c>.</p>
+</item>
+
+<tag><c>{sequence, {H,N} | <seealso
+ marker="diameter#evaluable">diameter:evaluable()</seealso>}</c></tag>
+<item>
+<p>
+Specifies a constant value <c>H</c> for the topmost <c>32-N</c> bits of
+of 32-bit End-to-End and Hop-by-Hop identifiers generated
+by the service, either explicity or as a return value of a function
+to be evaluated at <seealso
+marker="diameter#start_service">diameter:start_service/2</seealso>.
+In particular, an identifier <c>Id</c> is mapped to a new identifier
+as follows.</p>
+<code>
+(H bsl N) bor (Id band ((1 bsl N) - 1))
+</code>
+<p>
+Note that RFC 3588 requires that End-to-End identifiers remain unique
+for a period of at least 4 minutes and that this and the call rate
+places a lower bound on the appropriate values of <c>N</c>:
+at a rate of <c>R</c> requests per second an <c>N</c>-bit counter
+traverses all of its values in <c>(1 bsl N) div (R*60)</c> minutes so
+the bound is <c>4*R*60 =&lt; 1 bsl N</c>.</p>
+
+<p><c>N</c> must lie in the range <c>0..32</c> and <c>H</c> must be a
+non-negative integer less than <c>1 bsl (32-N)</c>.</p>
+
+<p>
+Defaults to <c>{0,32}</c>.</p>
+</item>
+
</taglist>
<marker id="transport_opt"/>
@@ -850,7 +913,7 @@ reconnection attempts, as required by RFC 3539.</p>
For a listening transport, the timer specifies the time after which a
previously connected peer will be forgotten: a connection after this time is
regarded as an initial connection rather than a reestablishment,
-causing the RFC 3539 state machine to pass to state OPEN rather than
+causing the RFC 3539 state machine to pass to state OKAY rather than
REOPEN.
Note that these semantics are not goverened by the RFC and
that a listening transport's <c>reconnect_timer</c> should be greater
@@ -1149,6 +1212,12 @@ DPA or timeout.</p>
<desc>
<p>
Return information about a started service.
+Requesting info for an unknown service causes <c>undefined</c> to be
+returned.
+Requesting a list of items causes a tagged list to be
+returned.</p>
+
+<p>
<c>Item</c> can be one of the following.</p>
<taglist>
@@ -1419,13 +1488,27 @@ The Diameter-level statistics returned by <c>transport</c> and
<c>connections</c> info are based upon these entries.</p>
</item>
-</taglist>
-
+<tag><c><seealso marker="diameter_app#peer_ref">diameter_app:peer_ref()</seealso></c></tag>
+<item>
<p>
-Requesting info for an unknown service causes <c>undefined</c> to be
-returned.
-Requesting a list of items causes a tagged list to be
-returned.</p>
+Return transport configuration associated with a single peer, as
+passed to <seealso marker="#add_transport">add_transport/2</seealso>.
+The returned list is empty if the peer is unknown.
+Otherwise it contains the <c>ref</c>, <c>type</c> and <c>options</c>
+tuples as in <c>transport</c> and <c>connections</c> info above.
+For example:</p>
+
+<code>
+[{ref,#Ref&lt;0.0.0.61>},
+ {type,accept},
+ {options,[{transport_module,diameter_tcp},
+ {transport_config,[{reuseaddr,true},
+ {ip,{127,0,0,1}},
+ {port,3868}]}]}]
+</code>
+</item>
+
+</taglist>
<marker id="services"/>
</desc>
diff --git a/lib/diameter/doc/src/diameter_app.xml b/lib/diameter/doc/src/diameter_app.xml
index 4a4b212787..9d8a6568eb 100644
--- a/lib/diameter/doc/src/diameter_app.xml
+++ b/lib/diameter/doc/src/diameter_app.xml
@@ -112,7 +112,8 @@ and, for the call-specific callbacks, any extra arguments passed to
<item>
<p>
A record containing the identities of
-the local Diameter node and the remote Diameter peer having an established transport
+the local Diameter node and the remote Diameter peer having an
+established transport
connection, as well as the capabilities as
determined by capabilities exchange.
Each field of the record is a 2-tuple consisting of
@@ -168,13 +169,14 @@ Fields should not be set in return values except as documented.</p>
<tag><c>peer_ref() = term()</c></tag>
<item>
<p>
-A term identifying a transport connection with a Diameter peer.
-Should be treated opaquely.</p>
+A term identifying a transport connection with a Diameter peer.</p>
</item>
<marker id="peer"/>
-<tag><c>peer() = {<seealso marker="#peer_ref">peer_ref()</seealso>, <seealso marker="#capabilities">capabilities()</seealso>}</c></tag>
+<tag><c>peer() =
+ {<seealso marker="#peer_ref">peer_ref()</seealso>,
+ <seealso marker="#capabilities">capabilities()</seealso>}</c></tag>
<item>
<p>
A tuple representing a Diameter peer connection.</p>
@@ -219,10 +221,29 @@ process.</p>
</type>
<desc>
<p>
-Invoked when a transport connection has been established
-and a successful capabilities exchange has indicated that the peer
-supports the Diameter application of the application on which
-the callback module in question has been configured.</p>
+Invoked to signal the availability of a peer connection.
+In particular, capabilities exchange with the peer has indicated
+support for the application in question, the RFC 3539 watchdog state
+machine for the connection has reached state <c>OKAY</c> and Diameter
+messages can be both sent and received.</p>
+
+<note>
+<p>
+A watchdog state machine can reach state <c>OKAY</c> from state
+<c>SUSPECT</c> without a new capabilities exchange taking place.
+A new transport connection (and capabilities exchange) results in a
+new peer_ref().</p>
+</note>
+
+<note>
+<p>
+There is no requirement that a callback return before incoming
+requests are received: <seealso
+marker="#handle_request">handle_request/3</seealso> callbacks must be
+handled independently of <seealso
+marker="#peer_up">peer_up/3</seealso> and <seealso
+marker="#peer_down">peer_down/3</seealso>.</p>
+</note>
<marker id="peer_down"/>
</desc>
@@ -238,36 +259,42 @@ the callback module in question has been configured.</p>
</type>
<desc>
<p>
-Invoked when a transport connection has been lost following a previous
-call to <seealso marker="#peer_up">peer_up/3</seealso>.</p>
+Invoked to signal that a peer connection is no longer available
+following a previous call to <seealso
+marker="#peer_up">peer_up/3</seealso>.
+In particular, that the RFC 3539 watchdog state machine for the
+connection has left state <c>OKAY</c> and the peer will no longer be a
+candidate in <seealso marker="#pick_peer">pick_peer()</seealso>
+callbacks.</p>
<marker id="pick_peer"/>
</desc>
</func>
<func>
-<name>Mod:pick_peer(Candidates, Reserved, SvcName, State)
- -> {ok, Peer} | {Peer, NewState} | false</name>
+<name>Mod:pick_peer(Candidates, _Reserved, SvcName, State)
+ -> Selection | false</name>
<fsummary>Select a target peer for an outgoing request.</fsummary>
<type>
<v>Candidates = [<seealso marker="#peer">peer()</seealso>]</v>
-<v>Peer = <seealso marker="#peer">peer()</seealso> | false</v>
<v>SvcName = <seealso marker="diameter#service_name">diameter:service_name()</seealso></v>
<v>State = NewState = <seealso marker="#state">state()</seealso></v>
+<v>Selection = {ok, Peer} | {Peer, NewState}</v>
+<v>Peer = <seealso marker="#peer">peer()</seealso> | false</v>
</type>
<desc>
<p>
Invoked as a consequence of a call to <seealso
marker="diameter#call">diameter:call/4</seealso> to select a destination
-peer for an outgoing request, the return value indicating the selected
-peer.</p>
+peer for an outgoing request.
+The return value indicates the selected peer.</p>
<p>
-The candidate peers list will only include those
-which are selected by any <c>filter</c> option specified in the call to
-<seealso marker="diameter#call">diameter:call/4</seealso>, and only
-those which have indicated support for the Diameter application in
-question.
+The candidate list contains only those peers that have advertised
+support for the Diameter application in question during capabilities
+exchange, that have not be excluded by a <c>filter</c> option in
+the call to <seealso marker="diameter#call">diameter:call/4</seealso>
+and whose watchdog state machine is in the <c>OKAY</c> state.
The order of the elements is unspecified except that any
peers whose Origin-Host and Origin-Realm matches that of the
outgoing request (in the sense of a <c>{filter, {all, [host, realm]}}</c>
@@ -275,36 +302,40 @@ option to <seealso marker="diameter#call">diameter:call/4</seealso>)
will be placed at the head of the list.</p>
<p>
-The return values <c>false</c> and <c>{false, State}</c> are
-equivalent when callback state is mutable, as are
-<c>{ok, Peer}</c> and <c>{Peer, State}</c>.
-Returning a peer as <c>false</c> causes <c>{error, no_connection}</c>
-to be returned from <seealso marker="diameter#call">diameter:call/4</seealso>.
-Returning a <seealso marker="#peer">peer()</seealso> from an initial
-pick_peer/4 callback will result in a
-<seealso marker="#prepare_request">prepare_request/3</seealso> callback
-followed by either <seealso
-marker="#handle_answer">handle_answer/4</seealso>
+A callback that returns a peer() will be followed by a
+<seealso marker="#prepare_request">prepare_request/3</seealso>
+callback and, if the latter indicates that the request should be sent,
+by either <seealso marker="#handle_answer">handle_answer/4</seealso>
or <seealso marker="#handle_error">handle_error/4</seealso> depending
on whether or not an answer message is received from the peer.
-If transport with the peer is lost before this then a new <seealso
-marker="#pick_peer">pick_peer/4</seealso> callback takes place to
-select an alternate peer.</p>
-
-<p>
-Note that there is no guarantee that a <seealso
+If the transport becomes unavailable after <seealso
+marker="prepare_request">prepare_request/3</seealso> then a new <seealso
+marker="#pick_peer">pick_peer/4</seealso> callback may take place to
+failover to an alternate peer, after which <seealso
+marker="#prepare_retransmit">prepare_retransmit/3</seealso> takes the
+place of <seealso
+marker="prepare_request">prepare_request/3</seealso> in resending the
+request.
+There is no guarantee that a <seealso
marker="#pick_peer">pick_peer/4</seealso> callback to select
-an alternate peer will be followed by any additional callbacks, only
-that the initial <seealso
-marker="#pick_peer">pick_peer/4</seealso> will be, since a
+an alternate peer will be followed by any additional callbacks since a
retransmission to an alternate peer is abandoned if an answer is
received from a previously selected peer.</p>
+<p>
+Returning <c>false</c> or <c>{false, NewState}</c> causes <c>{error,
+no_connection}</c> to be returned from <seealso
+marker="diameter#call">diameter:call/4</seealso>.</p>
+
+<p>
+The return values <c>false</c> and <c>{false, State}</c> (that is,
+<c>NewState = State</c>) are equivalent, as are <c>{ok, Peer}</c> and
+<c>{Peer, State}</c>.</p>
+
<note>
<p>
-<c>{Peer, NewState}</c> and its equivalents can only be returned if
-the Diameter application in question was
-configured with the <seealso
+The return value <c>{Peer, NewState}</c> is only allowed if
+the Diameter application in question was configured with the <seealso
marker="diameter#application_opt">diameter:application_opt()</seealso>
<c>{call_mutates_state, true}</c>.
Otherwise, the <c>State</c> argument is always
@@ -325,33 +356,45 @@ or <seealso marker="#peer_down">peer_down/3</seealso> callback.</p>
<v>Packet = <seealso marker="#packet">packet()</seealso></v>
<v>SvcName = <seealso marker="diameter#service_name">diameter:service_name()</seealso></v>
<v>Peer = <seealso marker="#peer">peer()</seealso></v>
-<v>Action = {send, <seealso marker="#packet">packet()</seealso> | <seealso marker="#message">message()</seealso>} | {discard, Reason} | discard</v>
+<v>Action = Send | Discard | {eval_packet, Action, PostF}</v>
+<v>Send = {send, <seealso marker="#packet">packet()</seealso>
+ | <seealso marker="#message">message()</seealso>}</v>
+<v>Discard = {discard, Reason} | discard</v>
+<v>PostF =
+ <seealso marker="diameter#evaluable">diameter:evaluable()</seealso>}</v>
</type>
<desc>
<p>
Invoked to return a request for encoding and transport.
-Allows the sender to access the selected peer's capabilities
-in order to set (for example) <c>Destination-Host</c> and/or
-<c>Destination-Realm</c> in the outgoing request, although the
-callback need not be limited to this usage.
+Allows the sender to use the selected peer's capabilities
+to modify the outgoing request.
Many implementations may simply want to return <c>{send, Packet}</c></p>
<p>
-A returned <seealso marker="#packet">packet()</seealso> should set the request to be encoded in its
+A returned <seealso marker="#packet">packet()</seealso> should set the
+request to be encoded in its
<c>msg</c> field and can set the <c>transport_data</c> field in order
-to pass information to the transport module.
+to pass information to the transport process.
Extra arguments passed to <seealso
marker="diameter#call">diameter:call/4</seealso> can be used to
-communicate transport data to the callback.
-A returned <seealso marker="#packet">packet()</seealso> can also set the <c>header</c> field to a
-<c>#diameter_header{}</c> record in order to specify values that should
-be preserved in the outgoing request, although this should typically
-not be necessary and allows the callback to set header values
-inappropriately.
+communicate transport (or any other) data to the callback.</p>
+
+<p>
+A returned <seealso marker="#packet">packet()</seealso> can set
+the <c>header</c> field to a
+<c>#diameter_header{}</c> in order to specify values that should
+be preserved in the outgoing request, values otherwise being those in
+the header record contained in <c>Packet</c>.
A returned <c>length</c>, <c>cmd_code</c> or <c>application_id</c> is
ignored.</p>
<p>
+A returned <c>PostF</c> will be evaluated on any encoded
+<c>#diameter_packet{}</c> prior to transmission, the <c>bin</c> field
+containing the encoded binary.
+The return value is ignored.</p>
+
+<p>
Returning <c>{discard, Reason}</c> causes the request to be aborted
and the <seealso
marker="diameter#call">diameter:call/4</seealso> for which the
@@ -364,13 +407,18 @@ discarded}</c>.</p>
</func>
<func>
-<name>Mod:prepare_retransmit(Packet, SvcName, Peer) -> Result</name>
+<name>Mod:prepare_retransmit(Packet, SvcName, Peer) -> Action</name>
<fsummary>Return a request for encoding and retransmission.</fsummary>
<type>
<v>Packet = <seealso marker="#packet">packet()</seealso></v>
<v>SvcName = <seealso marker="diameter#service_name">diameter:service_name()</seealso></v>
-<v>Peer = <seealso marker="#peer">peer()</seealso></v>
-<v>Result = {send, <seealso marker="#packet">packet()</seealso> | <seealso marker="#message">message()</seealso>} | {discard, Reason} | discard</v>
+<v>Peer = <seealso marker="#peer">peer()</seealso></v>
+<v>Action = Send | Discard | {eval_packet, Action, PostF}</v>
+<v>Send = {send, <seealso marker="#packet">packet()</seealso>
+ | <seealso marker="#message">message()</seealso>}</v>
+<v>Discard = {discard, Reason} | discard</v>
+<v>PostF =
+ <seealso marker="diameter#evaluable">diameter:evaluable()</seealso>}</v>
</type>
<desc>
<p>
@@ -378,8 +426,9 @@ Invoked to return a request for encoding and retransmission.
Has the same role as <seealso
marker="#prepare_request">prepare_request/3</seealso> in the case that
a peer connection is lost an an alternate peer selected but the
-argument <seealso marker="#packet">packet()</seealso> is as returned by the initial
-<c>prepare_request/3</c>.</p>
+argument <seealso marker="#packet">packet()</seealso> is as returned
+by the initial <seealso
+marker="#prepare_request">prepare_request/3</seealso>.</p>
<p>
Returning <c>{discard, Reason}</c> causes the request to be aborted
@@ -406,40 +455,41 @@ discarded}</c>.</p>
<desc>
<p>
Invoked when an answer message is received from a peer.
-The return value is returned from the call to <seealso
-marker="diameter#call">diameter:call/4</seealso> for which the
-callback takes place unless the <c>detach</c> option was
-specified.</p>
+The return value is returned from <seealso
+marker="diameter#call">diameter:call/4</seealso> unless the
+<c>detach</c> option was specified.</p>
<p>
-The decoded answer record is in the <c>msg</c> field of the argument
-<seealso marker="#packet">packet()</seealso>,
-the undecoded binary in the <c>packet</c> field.
+The decoded answer record and undecoded binary are in the <c>msg</c>
+and <c>bin</c> fields of the argument
+<seealso marker="#packet">packet()</seealso> respectively.
<c>Request</c> is the outgoing request message as was returned from
<seealso marker="#prepare_request">prepare_request/3</seealso> or
-<seealso marker="#prepare_retransmit">prepare_retransmit/3</seealso>
-before the request was passed to the transport.</p>
+<seealso
+ marker="#prepare_retransmit">prepare_retransmit/3</seealso>.</p>
<p>
For any given call to <seealso
marker="diameter#call">diameter:call/4</seealso> there is at most one
-call to the handle_answer callback of the application in question: any
+<seealso marker="#handle_answer">handle_answer/4</seealso> callback: any
duplicate answer (due to retransmission or otherwise) is discarded.
-Similarly, only one of <c>handle_answer/4</c> or <c>handle_error/4</c> is
-called for any given request.</p>
+Similarly, only one of <seealso
+marker="#handle_answer">handle_answer/4</seealso> or
+<seealso marker="#handle_error">handle_error/4</seealso> is
+called.</p>
<p>
By default, an incoming answer message that cannot be successfully
-decoded causes the request process in question to fail, causing the
-relevant call to <seealso
-marker="diameter#call">diameter:call/4</seealso>
-to return <c>{error, failure} (unless the <c>detach</c> option was
-specified)</c>.
-In particular, there is no <c>handle_error/4</c> callback in this
+decoded causes the request process to fail, causing
+<seealso marker="diameter#call">diameter:call/4</seealso>
+to return <c>{error, failure}</c> unless the <c>detach</c> option was
+specified.
+In particular, there is no <seealso
+marker="#handle_error">handle_error/4</seealso> callback in this
case.
-Application configuration may change this behaviour as described for
-<seealso
-marker="diameter#start_service">diameter:start_service/2</seealso>.</p>
+The <seealso
+marker="diameter#application_opt">diameter:application_opt()</seealso>
+<c>answer_errors</c> can be set to change this behaviour.</p>
<marker id="handle_error"/>
</desc>
@@ -457,21 +507,20 @@ marker="diameter#start_service">diameter:start_service/2</seealso>.</p>
</type>
<desc>
<p>
-Invoked when an error occurs before an answer message is received from
-a peer in response to an outgoing request.
-The return value is returned from the call to <seealso
-marker="diameter#call">diameter:call/4</seealso> for which the
-callback takes place (unless the <c>detach</c> option was
-specified).</p>
+Invoked when an error occurs before an answer message is received
+in response to an outgoing request.
+The return value is returned from <seealso
+marker="diameter#call">diameter:call/4</seealso> unless the
+<c>detach</c> option was specified.</p>
<p>
Reason <c>timeout</c> indicates that an answer message has not been
-received within the required time.
+received within the time specified with the corresponding <seealso
+marker="diameter#call_opt">diameter:call_opt()</seealso>.
Reason <c>failover</c> indicates
that the transport connection to the peer to which the request has
-been sent has been lost but that not alternate node was available,
-possibly because a <seealso marker="#pick_peer">pick_peer/4</seealso>
-callback returned false.</p>
+been sent has become unavailable and that not alternate peer was
+not selected.</p>
<marker id="handle_request"/>
</desc>
@@ -484,7 +533,10 @@ callback returned false.</p>
<v>Packet = <seealso marker="#packet">packet()</seealso></v>
<v>SvcName = term()</v>
<v>Peer = <seealso marker="#peer">peer()</seealso></v>
-<v>Action = Reply | {relay, [Opt]} | discard | {eval, Action, PostF}</v>
+<v>Action = Reply
+ | {relay, [Opt]}
+ | discard
+ | {eval|eval_packet, Action, PostF}</v>
<v>Reply = {reply, <seealso marker="#message">message()</seealso>}
| {protocol_error, 3000..3999}</v>
<v>Opt = <seealso marker="diameter#call_opt">diameter:call_opt()</seealso></v>
@@ -603,14 +655,25 @@ causes the request to be answered with 3002 (DIAMETER_UNABLE_TO_DELIVER).</p>
<tag><c>discard</c></tag>
<item>
<p>
-Discard the request.</p>
+Discard the request.
+No answer message is sent to the peer.</p>
</item>
<tag><c>{eval, Action, PostF}</c></tag>
<item>
<p>
Handle the request as if <c>Action</c> has been returned and then
-evaluate <c>PostF</c> in the request process.</p>
+evaluate <c>PostF</c> in the request process.
+The return value is ignored.</p>
+</item>
+
+<tag><c>{eval_packet, Action, PostF}</c></tag>
+<item>
+<p>
+Like <c>eval</c> but evaluate <c>PostF</c> on any encoded
+<c>#diameter_packet{}</c> prior to transmission, the <c>bin</c> field
+containing the encoded binary.
+The return value is ignored.</p>
</item>
</taglist>
diff --git a/lib/diameter/doc/src/diameter_transport.xml b/lib/diameter/doc/src/diameter_transport.xml
index 9ab750e560..d9b36a1e09 100644
--- a/lib/diameter/doc/src/diameter_transport.xml
+++ b/lib/diameter/doc/src/diameter_transport.xml
@@ -54,14 +54,14 @@ parent).</p>
<funcs>
<func>
-<name>Mod:start({Type, Ref}, Svc, Opts)
+<name>Mod:start({Type, Ref}, Svc, Config)
-> {ok, Pid} | {ok, Pid, LAddrs} | {error, Reason}</name>
<fsummary>Start a transport process.</fsummary>
<type>
<v>Type = connect | accept</v>
<v>Ref = <seealso marker="diameter#transport_ref">diameter:transport_ref()</seealso></v>
<v>Svc = #diameter_service{}</v>
-<v>Opts = term()</v>
+<v>Config = term()</v>
<v>Pid = pid()</v>
<v>LAddrs = [<seealso marker="kernel:inet#type-ip_address">inet:ip_address()</seealso>]</v>
<v>Reason = term()</v>
@@ -75,38 +75,46 @@ order to establish or accept a transport connection respectively.
A transport process maintains a connection with a single remote peer.</p>
<p>
-The first argument indicates whether the transport process in question
+<c>Type</c> indicates whether the transport process in question
is being started for a connecting (<c>connect</c>) or listening
(<c>accept</c>) transport.
In the latter case, transport processes are started as required to
-accept connections from multiple peers.
-Ref is in each case the same value that was returned from the
-call to <seealso
-marker="diameter#add_transport">diameter:add_transport/2</seealso>
-that has lead to starting of a transport process.</p>
+accept connections from multiple peers.</p>
<p>
-A transport process must implement the message interface documented below.
-It should retain the pid of its parent, monitor the parent and terminate if
-it dies.
-It should not link to the parent.
-It should exit if its transport connection with its peer is lost.</p>
+Ref is the value that was returned from the call to <seealso
+marker="diameter#add_transport">diameter:add_transport/2</seealso>
+that has lead to starting of a transport process.</p>
<p>
-The capabilities in the <c>#diameter_service{}</c> record are as
-passed to <seealso
+<c>Svc</c> contains the capabilities passed to <seealso
marker="diameter#start_service">diameter:start_service/2</seealso> and
<seealso
marker="diameter#add_transport">diameter:add_transport/2</seealso>,
-values passed to the latter overriding those passed to the former.
+values passed to the latter overriding those passed to the former.</p>
+
+<p>
+<c>Config</c> is as passed in <c>transport_config</c> tuple in the
+<seealso marker="diameter#transport_opt">diameter:transport_opt()</seealso>
+list passed to <seealso
+marker="diameter#add_transport">diameter:add_transport/2</seealso>.</p>
+
+<p>
The start function should use the <c>Host-IP-Address</c> list and/or
-<c>Opts</c> to select an appropriate list of local IP addresses,
+<c>Config</c> to select an appropriate list of local IP addresses,
and should return this list if different from the
<c>#diameter_service{}</c> addresses.
The returned list is used to populate <c>Host-IP-Address</c> AVPs in
outgoing capabilities exchange messages, the
<c>#diameter_service{}</c> addresses being used otherwise.</p>
+<p>
+A transport process must implement the message interface documented below.
+It should retain the pid of its parent, monitor the parent and terminate if
+it dies.
+It should not link to the parent.
+It should exit if its transport connection with its peer is lost.</p>
+
<marker id="MESSAGES"/>
</desc>
</func>
diff --git a/lib/diameter/make/release_targets.mk b/lib/diameter/make/release_targets.mk
deleted file mode 100644
index 5a3b585cbc..0000000000
--- a/lib/diameter/make/release_targets.mk
+++ /dev/null
@@ -1,92 +0,0 @@
-#
-# %CopyrightBegin%
-#
-# Copyright Ericsson AB 1997-2011. All Rights Reserved.
-#
-# The contents of this file are subject to the Erlang Public License,
-# Version 1.1, (the "License"); you may not use this file except in
-# compliance with the License. You should have received a copy of the
-# Erlang Public License along with this software. If not, it can be
-# retrieved online at http://www.erlang.org/.
-#
-# Software distributed under the License is distributed on an "AS IS"
-# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-# the License for the specific language governing rights and limitations
-# under the License.
-#
-# %CopyrightEnd%
-#
-
-ifeq ($(TOPDOC),)
-$(HTMLDIR)/index.html: $(XML_FILES)
- date=`date +"%B %e %Y"`; \
- $(XSLTPROC) --noout --stringparam outdir $(HTMLDIR) --stringparam docgen "$(DOCGEN)" --stringparam topdocdir "$(TOPDOCDIR)" \
- --stringparam pdfdir "$(PDFDIR)" \
- --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" --stringparam appver "$(VSN)" --xinclude \
- -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_html_entities $(DOCGEN)/priv/xsl/db_html.xsl book.xml
-endif
-
-$(HTMLDIR)/users_guide.html: $(XML_FILES)
- date=`date +"%B %e %Y"`; \
- $(XSLTPROC) --noout --stringparam outdir $(HTMLDIR) --stringparam docgen "$(DOCGEN)" --stringparam topdocdir "$(TOPDOCDIR)" \
- --stringparam pdfdir "$(PDFDIR)" \
- --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" --stringparam appver "$(VSN)" --xinclude \
- -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_html_entities $(DOCGEN)/priv/xsl/db_html.xsl book.xml
-
-
-%.fo: $(XML_FILES)
- date=`date +"%B %e %Y"`; \
- $(XSLTPROC) --stringparam docgen "$(DOCGEN)" --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" \
- --stringparam appver "$(VSN)" --xinclude \
- -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_html_entities $(DOCGEN)/priv/xsl/db_pdf.xsl book.xml > $@
-
-
-
-# ------------------------------------------------------------------------
-# The following targets just exist in the documentation directory
-# ------------------------------------------------------------------------
-ifneq ($(XML_FILES),)
-
-# ----------------------------------------------------
-# Generation of application index data
-# ----------------------------------------------------
-$(HTMLDIR)/$(APPLICATION).eix: $(XML_FILES)
- date=`date +"%B %e %Y"`; \
- $(XSLTPROC) --stringparam docgen "$(DOCGEN)" \
- --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" --stringparam appver "$(VSN)" --xinclude \
- -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_html_entities $(DOCGEN)/priv/xsl/db_eix.xsl book.xml > $@
-
-docs:
-#docs: $(HTMLDIR)/$(APPLICATION).eix
-
-# ----------------------------------------------------
-# Local documentation target for testing
-# ----------------------------------------------------
-local_docs: TOPDOCDIR=.
-local_docs: local_copy_of_topdefs docs
-
-local_html: TOPDOCDIR=.
-local_html: local_copy_of_topdefs html
-
-local_copy_of_topdefs:
- $(INSTALL) $(DOCGEN)/priv/css/otp_doc.css $(HTMLDIR)
- $(INSTALL) $(DOCGEN)/priv/images/erlang-logo.png $(HTMLDIR)
- $(INSTALL) $(DOCGEN)/priv/images/erlang-logo.gif $(HTMLDIR)
- $(INSTALL_DIR) $(HTMLDIR)/js/flipmenu
- $(INSTALL) $(DOCGEN)/priv/js/flipmenu/flip_closed.gif \
- $(DOCGEN)/priv/js/flipmenu/flip_open.gif \
- $(DOCGEN)/priv/js/flipmenu/flip_static.gif \
- $(DOCGEN)/priv/js/flipmenu/flipmenu.js $(HTMLDIR)/js/flipmenu
-
-endif
-
-# ----------------------------------------------------
-# Standard release target
-# ----------------------------------------------------
-
-ifneq ($(PREFIX),)
-
-release release_docs release_tests release_html:
- $(MAKE) $(MFLAGS) RELEASE_PATH=$(PREFIX) $(TARGET_MAKEFILE) $@_spec
-
-endif
diff --git a/lib/diameter/make/rules.mk.in b/lib/diameter/make/rules.mk.in
deleted file mode 100644
index cd3c297d75..0000000000
--- a/lib/diameter/make/rules.mk.in
+++ /dev/null
@@ -1,193 +0,0 @@
-#-*-makefile-*- ; force emacs to enter makefile-mode
-# ----------------------------------------------------
-# %CopyrightBegin%
-#
-# Copyright Ericsson AB 2009-2011. All Rights Reserved.
-#
-# The contents of this file are subject to the Erlang Public License,
-# Version 1.1, (the "License"); you may not use this file except in
-# compliance with the License. You should have received a copy of the
-# Erlang Public License along with this software. If not, it can be
-# retrieved online at http://www.erlang.org/.
-#
-# Software distributed under the License is distributed on an "AS IS"
-# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-# the License for the specific language governing rights and limitations
-# under the License.
-#
-# %CopyrightEnd%
-
-.SUFFIXES: .erl .beam .yrl .hrl .xml .xmlsrc .html \
- .3 .1 .pdf .fo .el .elc
-
-# ----------------------------------------------------
-# Common macros
-# ----------------------------------------------------
-DEFAULT_TARGETS = opt debug release release_docs clean docs
-
-
-# Slash separated list of return values from $(origin VAR)
-# that are untrusted - set default in this file instead.
-# The list is not space separated since some return values
-# contain space, and we want to use $(findstring ...) to
-# search the list.
-DUBIOUS_ORIGINS = /undefined/environment/
-
-
-# # ----------------------------------------------------
-# # TARGET definition
-# # ----------------------------------------------------
-# # TARGET = @TARGET@
-# ifneq ($(OVERRIDE_TARGET),)
-# ifneq ($(TARGET), $(OVERRIDE_TARGET))
-# $(warning overriding $$(TARGET) = \
-# "$(TARGET)" \
-# with \
-# $$(OVERRIDE_TARGET) = \
-# "$(OVERRIDE_TARGET)")
-# override TARGET := $(OVERRIDE_TARGET)
-# endif
-# endif
-#
-
-# ----------------------------------------------------
-# Command macros
-# ----------------------------------------------------
-PREFIX = @prefix@
-INSTALL = @INSTALL@
-INSTALL_DIR = @INSTALL_DIR@
-INSTALL_PROGRAM = @INSTALL_PROGRAM@
-INSTALL_SCRIPT = @INSTALL_SCRIPT@
-INSTALL_DATA = @INSTALL_DATA@
-
-
-# ----------------------------------------------------
-# Erlang language section
-# ----------------------------------------------------
-ERL_ROOT_DIR = @ERLANG_ROOT_DIR@
-ERL_LIB_DIR = @ERLANG_LIB_DIR@
-DOCGEN_DIR = @ERLANG_LIB_DIR_erl_docgen@
-TEST_SERVER_DIR = @ERLANG_LIB_VER_test_server@
-EMULATOR = beam
-ERL_COMPILE_FLAGS += +debug_info
-ERLC_WFLAGS = -W
-ERLC = $(ERL_ROOT_DIR)/bin/erlc $(ERLC_WFLAGS) $(ERLC_FLAGS)
-ERL = $(ERL_ROOT_DIR)/bin/erl -boot start_clean
-#ERLC = @ERLC@ $(ERLC_WFLAGS) $(ERLC_FLAGS)
-#ERL = @ERL@ -boot start_clean
-
-ifneq (,$(findstring $(origin EBIN),$(DUBIOUS_ORIGINS)))
-EBIN = ../../ebin
-endif
-
-# Generated (non ebin) files...
-ifneq (,$(findstring $(origin EGEN),$(DUBIOUS_ORIGINS)))
-EGEN = .
-endif
-
-ifneq (,$(findstring $(origin ESRC),$(DUBIOUS_ORIGINS)))
-ESRC = .
-endif
-
-$(EBIN)/%.beam: $(EGEN)/%.erl
- $(ERLC) $(ERL_COMPILE_FLAGS) -o$(EBIN) $<
-
-$(EBIN)/%.beam: $(ESRC)/%.erl
- $(ERLC) $(ERL_COMPILE_FLAGS) -o$(EBIN) $<
-
-.erl.beam:
- $(ERLC) $(ERL_COMPILE_FLAGS) -o$(dir $@) $<
-
-
-#
-# When .erl files are automatically created GNU make removes them if
-# they were the result of a chain of implicit rules. To prevent this
-# we say that all .erl files are "precious".
-#
-.PRECIOUS: %.erl %.fo
-
-
-# ----------------------------------------------------
-# Documentation section
-# ----------------------------------------------------
-# export VSN
-
-# TOPDOCDIR=../../../../doc
-
-DOCDIR = ..
-
-PDFDIR=$(DOCDIR)/pdf
-
-HTMLDIR = $(DOCDIR)/html
-
-MAN1DIR = $(DOCDIR)/man1
-MAN2DIR = $(DOCDIR)/man2
-MAN3DIR = $(DOCDIR)/man3
-MAN4DIR = $(DOCDIR)/man4
-MAN6DIR = $(DOCDIR)/man6
-MAN9DIR = $(DOCDIR)/man9
-
-# HTML & GIF files that always are generated and must be delivered
-XML_COLL_FILES = $(XML_APPLICATION_FILES) $(XML_PART_FILES)
-DEFAULT_HTML_FILES = \
- $(XML_COLL_FILES:%.xml=$(HTMLDIR)/%_frame.html) \
- $(XML_COLL_FILES:%.xml=$(HTMLDIR)/%_first.html) \
- $(XML_COLL_FILES:%.xml=$(HTMLDIR)/%_term.html) \
- $(XML_COLL_FILES:%.xml=$(HTMLDIR)/%_cite.html) \
- $(XML_APPLICATION_FILES:%.xml=$(HTMLDIR)/%_index.html) \
- $(XML_APPLICATION_FILES:%.xml=$(HTMLDIR)/%.kwc) \
- $(HTMLDIR)/index.html
-
-DEFAULT_GIF_FILES = $(HTMLDIR)/min_head.gif
-
-#
-# Flags & Commands
-#
-XSLTPROC = @XSLTPROC@
-FOP = @FOP@
-
-DOCGEN=$(DOCGEN_DIR)
-
-$(MAN1DIR)/%.1:: %.xml
- date=`date +"%B %e %Y"`; \
- xsltproc --output "$@" --stringparam company "Ericsson AB" --stringparam docgen "$(DOCGEN)" --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" --stringparam appver "$(VSN)" --xinclude -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_man_entities $(DOCGEN)/priv/xsl/db_man.xsl $<
-
-
-$(MAN2DIR)/%.2:: %.xml
- date=`date +"%B %e %Y"`; \
- xsltproc --output "$@" --stringparam company "Ericsson AB" --stringparam docgen "$(DOCGEN)" --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" --stringparam appver "$(VSN)" --xinclude -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_man_entities $(DOCGEN)/priv/xsl/db_man.xsl $<
-
-
-$(MAN3DIR)/%.3:: %.xml
- date=`date +"%B %e %Y"`; \
- xsltproc --output "$@" --stringparam company "Ericsson AB" --stringparam docgen "$(DOCGEN)" --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" --stringparam appver "$(VSN)" --xinclude -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_man_entities $(DOCGEN)/priv/xsl/db_man.xsl $<
-
-# left for compatibility
-$(MAN4DIR)/%.4:: %.xml
- date=`date +"%B %e %Y"`; \
- xsltproc --output "$@" --stringparam company "Ericsson AB" --stringparam docgen "$(DOCGEN)" --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" --stringparam appver "$(VSN)" --xinclude -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_man_entities $(DOCGEN)/priv/xsl/db_man.xsl $<
-
-$(MAN4DIR)/%.5:: %.xml
- date=`date +"%B %e %Y"`; \
- xsltproc --output "$@" --stringparam company "Ericsson AB" --stringparam docgen "$(DOCGEN)" --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" --stringparam appver "$(VSN)" --xinclude -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_man_entities $(DOCGEN)/priv/xsl/db_man.xsl $<
-
-# left for compatibility
-$(MAN6DIR)/%.6:: %_app.xml
- date=`date +"%B %e %Y"`; \
- xsltproc --output "$@" --stringparam company "Ericsson AB" --stringparam docgen "$(DOCGEN)" --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" --stringparam appver "$(VSN)" --xinclude -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_man_entities $(DOCGEN)/priv/xsl/db_man.xsl $<
-
-$(MAN6DIR)/%.7:: %_app.xml
- date=`date +"%B %e %Y"`; \
- xsltproc --output "$@" --stringparam company "Ericsson AB" --stringparam docgen "$(DOCGEN)" --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" --stringparam appver "$(VSN)" --xinclude -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_man_entities $(DOCGEN)/priv/xsl/db_man.xsl $<
-
-$(MAN9DIR)/%.9:: %.xml
- date=`date +"%B %e %Y"`; \
- xsltproc --output "$@" --stringparam company "Ericsson AB" --stringparam docgen "$(DOCGEN)" --stringparam gendate "$$date" --stringparam appname "$(APPLICATION)" --stringparam appver "$(VSN)" --xinclude -path $(DOCGEN)/priv/docbuilder_dtd -path $(DOCGEN)/priv/dtd_man_entities $(DOCGEN)/priv/xsl/db_man.xsl $<
-
-
-.xmlsrc.xml:
- escript $(DOCGEN)/priv/bin/codeline_preprocessing.escript $< $@
-
-.fo.pdf:
- $(FOP) -fo $< -pdf $@
-
diff --git a/lib/diameter/make/subdir.mk b/lib/diameter/make/subdir.mk
deleted file mode 100644
index 24b08080ae..0000000000
--- a/lib/diameter/make/subdir.mk
+++ /dev/null
@@ -1,53 +0,0 @@
-#
-# %CopyrightBegin%
-#
-# Copyright Ericsson AB 1997-2011. All Rights Reserved.
-#
-# The contents of this file are subject to the Erlang Public License,
-# Version 1.1, (the "License"); you may not use this file except in
-# compliance with the License. You should have received a copy of the
-# Erlang Public License along with this software. If not, it can be
-# retrieved online at http://www.erlang.org/.
-#
-# Software distributed under the License is distributed on an "AS IS"
-# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See
-# the License for the specific language governing rights and limitations
-# under the License.
-#
-# %CopyrightEnd%
-#
-# Make include file for otp
-
-.PHONY: debug opt release docs release_docs tests release_tests \
- clean depend valgrind
-
-#
-# Targets that don't affect documentation directories
-#
-opt debug release docs release_docs tests release_tests clean depend valgrind:
- @set -e ; \
- app_pwd=`pwd` ; \
- if test -f vsn.mk; then \
- echo "=== Entering application" `basename $$app_pwd` ; \
- fi ; \
- case "$(MAKE)" in *clearmake*) tflag="-T";; *) tflag="";; esac; \
- for d in $(SUB_DIRS); do \
- if test -f $$d/SKIP ; then \
- echo "=== Skipping subdir $$d, reason:" ; \
- cat $$d/SKIP ; \
- echo "===" ; \
- else \
- if test ! -d $$d ; then \
- echo "=== Skipping subdir $$d, it is missing" ; \
- else \
- xflag="" ; \
- if test -f $$d/ignore_config_record.inf; then \
- xflag=$$tflag ; \
- fi ; \
- (cd $$d && $(MAKE) $$xflag $@) || exit $$? ; \
- fi ; \
- fi ; \
- done ; \
- if test -f vsn.mk; then \
- echo "=== Leaving application" `basename $$app_pwd` ; \
- fi
diff --git a/lib/diameter/make/target.mk b/lib/diameter/make/target.mk
deleted file mode 100644
index 4ae470b9e2..0000000000
--- a/lib/diameter/make/target.mk
+++ /dev/null
@@ -1,33 +0,0 @@
-ifeq ($(OVERRIDE_TARGET),)
-
-ifeq ($(TARGET),)
-
-TARGET := $(shell $(DIAMETER_TOP)/autoconf/config.guess)
-
-else
-
-endif
-
-else
-
-ifneq ($(TARGET),)
-
-ifneq ($(TARGET), $(OVERRIDE_TARGET))
-$(warning overriding $$(TARGET) = \
- "$(TARGET)" \
- with \
- $$(OVERRIDE_TARGET) = \
- "$(OVERRIDE_TARGET)")
-else
-endif
-
-override TARGET := $(OVERRIDE_TARGET)
-
-else
-
-TARGET := $(OVERRIDE_TARGET)
-
-endif
-
-endif
-
diff --git a/lib/diameter/src/Makefile b/lib/diameter/src/Makefile
index 99c343275b..060659bce9 100644
--- a/lib/diameter/src/Makefile
+++ b/lib/diameter/src/Makefile
@@ -16,13 +16,8 @@
#
# %CopyrightEnd%
-ifeq ($(ERL_TOP),)
-include $(DIAMETER_TOP)/make/target.mk
-include $(DIAMETER_TOP)/make/$(TARGET)/rules.mk
-else
include $(ERL_TOP)/make/target.mk
include $(ERL_TOP)/make/$(TARGET)/otp.mk
-endif
# ----------------------------------------------------
# Application version
@@ -210,11 +205,7 @@ dialyze: opt $(PLT)
# Release targets
# ----------------------------------------------------
-ifeq ($(ERL_TOP),)
-include $(DIAMETER_TOP)/make/release_targets.mk
-else
include $(ERL_TOP)/make/otp_release_targets.mk
-endif
# Can't $(INSTALL_DIR) more than one directory at a time on Solaris.
diff --git a/lib/diameter/src/base/diameter.erl b/lib/diameter/src/base/diameter.erl
index 4f90b741ae..3e3a6be0ef 100644
--- a/lib/diameter/src/base/diameter.erl
+++ b/lib/diameter/src/base/diameter.erl
@@ -44,6 +44,8 @@
stop/0]).
-export_type([evaluable/0,
+ restriction/0,
+ sequence/0,
app_alias/0,
service_name/0,
capability/0,
@@ -280,11 +282,23 @@ call(SvcName, App, Message) ->
| fun()
| maybe_improper_list(evaluable(), list()).
+-type sequence()
+ :: {'Unsigned32'(), 0..32}.
+
+-type restriction()
+ :: false
+ | node
+ | nodes
+ | [node()]
+ | evaluable().
+
%% Options passed to start_service/2
-type service_opt()
:: capability()
- | {application, [application_opt()]}.
+ | {application, [application_opt()]}
+ | {restrict_connections, restriction()}
+ | {sequence, sequence() | evaluable()}.
-type application_opt()
:: {alias, app_alias()}
diff --git a/lib/diameter/src/base/diameter_capx.erl b/lib/diameter/src/base/diameter_capx.erl
index 6c4d60ee9b..190d37262b 100644
--- a/lib/diameter/src/base/diameter_capx.erl
+++ b/lib/diameter/src/base/diameter_capx.erl
@@ -141,7 +141,9 @@ cap('Host-IP-Address', Vs)
when is_list(Vs) ->
lists:map(fun ipaddr/1, Vs);
-cap('Firmware-Revision', V) ->
+cap(K, V)
+ when K == 'Firmware-Revision';
+ K == 'Origin-State-Id' ->
[V];
cap(_, Vs)
@@ -149,7 +151,7 @@ cap(_, Vs)
Vs;
cap(K, V) ->
- ?THROW({invalid, K, V}).
+ ?THROW({invalid, {K,V}}).
ipaddr(A) ->
try
diff --git a/lib/diameter/src/base/diameter_codec.erl b/lib/diameter/src/base/diameter_codec.erl
index 421e280422..a94d37f7a8 100644
--- a/lib/diameter/src/base/diameter_codec.erl
+++ b/lib/diameter/src/base/diameter_codec.erl
@@ -333,6 +333,9 @@ decode_header(_) ->
%% wraparound counter. The 8-bit counter is incremented each time the
%% system is restarted.
+sequence_numbers({_,_} = T) ->
+ T;
+
sequence_numbers(#diameter_packet{bin = Bin})
when is_binary(Bin) ->
sequence_numbers(Bin);
diff --git a/lib/diameter/src/base/diameter_config.erl b/lib/diameter/src/base/diameter_config.erl
index e47f63f814..63d28f25a2 100644
--- a/lib/diameter/src/base/diameter_config.erl
+++ b/lib/diameter/src/base/diameter_config.erl
@@ -97,6 +97,9 @@
-record(monitor, {mref = make_ref() :: reference(),
service}). %% name
+%% The default sequence mask.
+-define(NOMASK, {0,32}).
+
%% Time to lay low before restarting a dead service.
-define(RESTART_SLEEP, 2000).
@@ -549,9 +552,11 @@ make_config(SvcName, Opts) ->
ok = encode_CER(COpts),
- Os = split(Opts, [{[fun erlang:is_boolean/1], false, share_peers},
- {[fun erlang:is_boolean/1], false, use_shared_peers},
- {[fun erlang:is_pid/1, false], false, monitor}]),
+ Os = split(Opts, fun opt/2, [{false, share_peers},
+ {false, use_shared_peers},
+ {false, monitor},
+ {?NOMASK, sequence},
+ {nodes, restrict_connections}]),
%% share_peers and use_shared_peers are currently undocumented.
#service{name = SvcName,
@@ -559,11 +564,66 @@ make_config(SvcName, Opts) ->
capabilities = Caps},
options = Os}.
+split(Opts, F, Defs) ->
+ [{K, F(K, get_opt(K, Opts, D))} || {D,K} <- Defs].
+
+opt(K, false = B)
+ when K /= sequence ->
+ B;
+
+opt(K, true = B)
+ when K == share_peer;
+ K == use_shared_peers ->
+ B;
+
+opt(monitor, P)
+ when is_pid(P) ->
+ P;
+
+opt(restrict_connections, T)
+ when T == node;
+ T == nodes;
+ T == [];
+ is_atom(hd(T)) ->
+ T;
+
+opt(restrict_connections = K, F) ->
+ try diameter_lib:eval(F) of %% no guarantee that it won't fail later
+ Nodes when is_list(Nodes) ->
+ F;
+ V ->
+ ?THROW({value, {K,V}})
+ catch
+ E:R ->
+ ?THROW({value, {K, E, R, ?STACK}})
+ end;
+
+opt(sequence, {_,_} = T) ->
+ sequence(T);
+
+opt(sequence = K, F) ->
+ try diameter_lib:eval(F) of
+ T -> sequence(T)
+ catch
+ E:R ->
+ ?THROW({value, {K, E, R, ?STACK}})
+ end;
+
+opt(K, _) ->
+ ?THROW({value, K}).
+
+sequence({H,N} = T)
+ when 0 =< N, N =< 32, 0 =< H, 0 == H bsr N ->
+ T;
+
+sequence(_) ->
+ ?THROW({value, sequence}).
+
make_caps(Caps, Opts) ->
case diameter_capx:make_caps(Caps, Opts) of
{ok, T} ->
T;
- {error, {Reason, _}} ->
+ {error, Reason} ->
?THROW(Reason)
end.
@@ -663,21 +723,6 @@ get_opt(Key, List, Def) ->
_ -> ?THROW({arity, Key})
end.
-split(Opts, Defs) ->
- [{K, value(D, Opts)} || {_,_,K} = D <- Defs].
-
-value({Preds, Def, Key}, Opts) ->
- V = get_opt(Key, Opts, Def),
- lists:any(fun(P) -> pred(P,V) end, Preds)
- orelse ?THROW({value, Key}),
- V.
-
-pred(F, V)
- when is_function(F) ->
- F(V);
-pred(T, V) ->
- T == V.
-
cb(M,F) ->
try M:F() of
V -> V
diff --git a/lib/diameter/src/base/diameter_peer.erl b/lib/diameter/src/base/diameter_peer.erl
index 46b2ba9465..1b2f32ddff 100644
--- a/lib/diameter/src/base/diameter_peer.erl
+++ b/lib/diameter/src/base/diameter_peer.erl
@@ -76,7 +76,7 @@ notify(SvcName, T) ->
%%% # start/3
%%% ---------------------------------------------------------------------------
-%% From old code: make is restart.
+%% From old code: make it restart.
start(_T, _Opts, #diameter_service{}) ->
{error, restart}.
diff --git a/lib/diameter/src/base/diameter_peer_fsm.erl b/lib/diameter/src/base/diameter_peer_fsm.erl
index 302540e76b..3f4945f7a6 100644
--- a/lib/diameter/src/base/diameter_peer_fsm.erl
+++ b/lib/diameter/src/base/diameter_peer_fsm.erl
@@ -55,10 +55,15 @@
-define(TLS, 1).
%% Keys in process dictionary.
--define(CB_KEY, cb). %% capabilities callback
--define(DWA_KEY, dwa). %% outgoing DWA
--define(Q_KEY, q). %% transport start queue
--define(START_KEY, start). %% start of connected transport
+-define(CB_KEY, cb). %% capabilities callback
+-define(DWA_KEY, dwa). %% outgoing DWA
+-define(Q_KEY, q). %% transport start queue
+-define(START_KEY, start). %% start of connected transport
+-define(SEQUENCE_KEY, mask). %% mask for sequence numbers
+-define(RESTRICT_KEY, restrict). %% nodes for connection check
+
+%% The default sequence mask.
+-define(NOMASK, {0,32}).
%% A 2xxx series Result-Code. Not necessarily 2001.
-define(IS_SUCCESS(N), 2 == (N) div 1000).
@@ -121,7 +126,10 @@
%%% Output: Pid
%%% ---------------------------------------------------------------------------
--spec start(T, [Opt], #diameter_service{})
+-spec start(T, [Opt], #diameter_service{} %% from old code
+ | {diameter:sequence(),
+ diameter:restriction(),
+ #diameter_service{}})
-> pid()
when T :: {connect|accept, diameter:transport_ref()},
Opt :: diameter:transport_opt().
@@ -131,10 +139,8 @@
%% specified on the transport in question. Check here that the list is
%% still non-empty.
-start({_,_} = Type, Opts, #diameter_service{applications = Apps} = Svc) ->
- [] /= Apps orelse ?ERROR({no_apps, Type, Opts}),
- T = {self(), Type, Opts, Svc},
- {ok, Pid} = diameter_peer_fsm_sup:start_child(T),
+start({_,_} = Type, Opts, MS) ->
+ {ok, Pid} = diameter_peer_fsm_sup:start_child({self(), Type, Opts, MS}),
Pid.
start_link(T) ->
@@ -153,12 +159,20 @@ init(T) ->
proc_lib:init_ack({ok, self()}),
gen_server:enter_loop(?MODULE, [], i(T)).
-i({WPid, T, Opts, #diameter_service{capabilities = Caps} = Svc}) ->
+i({WPid, Type, Opts, #diameter_service{} = Svc}) -> %% from old code
+ i({WPid, Type, Opts, {?NOMASK, [node() | nodes()], Svc}});
+
+i({WPid, T, Opts, {Mask, Nodes, #diameter_service{applications = Apps,
+ capabilities = Caps}
+ = Svc}}) ->
+ [] /= Apps orelse ?ERROR({no_apps, T, Opts}),
putr(?DWA_KEY, dwa(Caps)),
{M, Ref} = T,
diameter_stats:reg(Ref),
{[Ts], Rest} = proplists:split(Opts, [capabilities_cb]),
putr(?CB_KEY, {Ref, [F || {_,F} <- Ts]}),
+ putr(?SEQUENCE_KEY, Mask),
+ putr(?RESTRICT_KEY, Nodes),
erlang:monitor(process, WPid),
{TPid, Addrs} = start_transport(T, Rest, Svc),
#state{parent = WPid,
@@ -464,9 +478,24 @@ build_CER(#state{service = #diameter_service{capabilities = Caps}}) ->
%% encode/1
encode(Rec) ->
- #diameter_packet{bin = Bin} = diameter_codec:encode(?BASE, Rec),
+ Seq = diameter_session:sequence(sequence()),
+ Hdr = #diameter_header{version = ?DIAMETER_VERSION,
+ end_to_end_id = Seq,
+ hop_by_hop_id = Seq},
+ Pkt = #diameter_packet{header = Hdr,
+ msg = Rec},
+ #diameter_packet{bin = Bin} = diameter_codec:encode(?BASE, Pkt),
Bin.
+sequence() ->
+ case getr(?SEQUENCE_KEY) of
+ {_,_} = Mask ->
+ Mask;
+ undefined -> %% started in old code
+ putr(?SEQUENCE_KEY, ?NOMASK),
+ ?NOMASK
+ end.
+
%% recv/2
%% RFC 3588 has result code 5015 for an invalid length but if a
@@ -965,14 +994,35 @@ dpa_timer() ->
%% Register a term and ensure it's not registered elsewhere. Note that
%% two process that simultaneously register the same term may well
%% both fail to do so this isn't foolproof.
+%%
+%% Everywhere is no longer everywhere, it's where a
+%% restrict_connections service_opt() specifies.
register_everywhere(T) ->
- diameter_reg:add_new(T)
- andalso unregistered(T).
+ reg(getr(?RESTRICT_KEY), T).
+
+reg(Nodes, T) ->
+ add(lists:member(node(), Nodes), T) andalso unregistered(Nodes, T).
+
+add(true, T) ->
+ diameter_reg:add_new(T);
+add(false, T) ->
+ diameter_reg:add(T).
+
+%% unregistered
+%%
+%% Ensure that the term in question isn't registered on other nodes.
+
+unregistered(Nodes, T) ->
+ {ResL, _} = rpc:multicall(Nodes, ?MODULE, match, [{node(), T}]),
+ lists:all(fun nomatch/1, ResL).
+
+nomatch({badrpc, {'EXIT', {undef, _}}}) -> %% no diameter on remote node
+ true;
+nomatch(L) ->
+ [] == L.
-unregistered(T) ->
- {ResL, _} = rpc:multicall(?MODULE, match, [{node(), T}]),
- lists:all(fun(L) -> [] == L end, ResL).
+%% match/1
match({Node, _})
when Node == node() ->
diff --git a/lib/diameter/src/base/diameter_reg.erl b/lib/diameter/src/base/diameter_reg.erl
index 882b9da238..619b12ecad 100644
--- a/lib/diameter/src/base/diameter_reg.erl
+++ b/lib/diameter/src/base/diameter_reg.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -30,7 +30,8 @@
add_new/1,
del/1,
repl/2,
- match/1]).
+ match/1,
+ wait/1]).
-export([start_link/0]).
@@ -65,27 +66,22 @@
%% Table entry containing the Term -> Pid mapping.
-define(MAPPING(Term, Pid), {Term, Pid}).
--record(state, {id = now()}).
-
-%%% ----------------------------------------------------------
-%%% # add(T)
-%%%
-%%% Input: Term = term()
-%%%
-%%% Output: true
-%%%
-%%% Description: Associate the specified term with self(). The list of pids
-%%% having this or other assocations can be retrieved using
-%%% match/1.
-%%%
-%%% An association is removed when the calling process dies
-%%% or as a result of calling del/1. Adding the same term
-%%% more than once is equivalent to adding it exactly once.
-%%%
-%%% Note that since match/1 takes a pattern as argument,
-%%% specifying a term that contains match variables is
-%%% probably not a good idea
-%%% ----------------------------------------------------------
+-record(state, {id = now(),
+ q = []}). %% [{From, Pat}]
+
+%% ===========================================================================
+%% # add(T)
+%%
+%% Associate the specified term with self(). The list of pids having
+%% this or other assocations can be retrieved using match/1.
+%%
+%% An association is removed when the calling process dies or as a
+%% result of calling del/1. Adding the same term more than once is
+%% equivalent to adding it exactly once.
+%%
+%% Note that since match/1 takes a pattern as argument, specifying a
+%% term that contains match variables is probably not a good idea
+%% ===========================================================================
-spec add(any())
-> true.
@@ -93,17 +89,12 @@
add(T) ->
call({add, fun ets:insert/2, T, self()}).
-%%% ----------------------------------------------------------
-%%% # add_new(T)
-%%%
-%%% Input: T = term()
-%%%
-%%% Output: true | false
-%%%
-%%% Description: Like add/1 but only one process is allowed to have the
-%%% the association, false being returned if an association
-%%% already exists.
-%%% ----------------------------------------------------------
+%% ===========================================================================
+%% # add_new(T)
+%%
+%% Like add/1 but only one process is allowed to have the the
+%% association, false being returned if an association already exists.
+%% ===========================================================================
-spec add_new(any())
-> boolean().
@@ -111,16 +102,12 @@ add(T) ->
add_new(T) ->
call({add, fun insert_new/2, T, self()}).
-%%% ----------------------------------------------------------
-%%% # repl(T, NewT)
-%%%
-%%% Input: T, NewT = term()
-%%%
-%%% Output: true | false
-%%%
-%%% Description: Like add/1 but only replace an existing association on T,
-%%% false being returned if it doesn't exist.
-%%% ----------------------------------------------------------
+%% ===========================================================================
+%% # repl(T, NewT)
+%%
+%% Like add/1 but only replace an existing association on T, false
+%% being returned if it doesn't exist.
+%% ===========================================================================
-spec repl(any(), any())
-> boolean().
@@ -128,15 +115,11 @@ add_new(T) ->
repl(T, U) ->
call({repl, T, U, self()}).
-%%% ----------------------------------------------------------
-%%% # del(Term)
-%%%
-%%% Input: Term = term()
-%%%
-%%% Output: true
-%%%
-%%% Description: Remove any existing association of Term with self().
-%%% ----------------------------------------------------------
+%% ===========================================================================
+%% # del(Term)
+%%
+%% Remove any existing association of Term with self().
+%% ===========================================================================
-spec del(any())
-> true.
@@ -144,20 +127,16 @@ repl(T, U) ->
del(T) ->
call({del, T, self()}).
-%%% ----------------------------------------------------------
-%%% # match(Pat)
-%%%
-%%% Input: Pat = pattern in the sense of ets:match_object/2.
-%%%
-%%% Output: list of {Term, Pid}
-%%%
-%%% Description: Return the list of associations whose Term, as specified
-%%% to add/1 or add_new/1, matches the specified pattern.
-%%%
-%%% Note that there's no guarantee that the returned processes
-%%% are still alive. (Although one that isn't will soon have
-%%% its associations removed.)
-%%% ----------------------------------------------------------
+%% ===========================================================================
+%% # match(Pat)
+%%
+%% Return the list of associations whose Term, as specified to add/1
+%% or add_new/1, matches the specified pattern.
+%%
+%% Note that there's no guarantee that the returned processes are
+%% still alive. (Although one that isn't will soon have its
+%% associations removed.)
+%% ===========================================================================
-spec match(tuple())
-> [{term(), pid()}].
@@ -165,9 +144,17 @@ del(T) ->
match(Pat) ->
ets:match_object(?TABLE, ?MAPPING(Pat, '_')).
-%% ---------------------------------------------------------
-%% EXPORTED INTERNAL FUNCTIONS
-%% ---------------------------------------------------------
+%% ===========================================================================
+%% # wait(Pat)
+%%
+%% Like match/1 but return only when the result is non-empty or fails.
+%% It's up to the caller to ensure that the wait won't be forever.
+%% ===========================================================================
+
+wait(Pat) ->
+ call({wait, Pat}).
+
+%% ===========================================================================
start_link() ->
ServerName = {local, ?SERVER},
@@ -182,7 +169,7 @@ uptime() ->
%% pids/0
%%
-%% Output: list of {Pid, [Term, ...]}
+%% Return: list of {Pid, [Term, ...]}
pids() ->
to_list(fun swap/1).
@@ -202,89 +189,100 @@ id(T) -> T.
%% terms/0
%%
-%% Output: list of {Term, [Pid, ...]}
+%% Return: list of {Term, [Pid, ...]}
terms() ->
to_list(fun id/1).
swap({X,Y}) -> {Y,X}.
-%%% ----------------------------------------------------------
-%%% # init(Role)
-%%%
-%%% Output: {ok, State}
-%%% ----------------------------------------------------------
+%% ----------------------------------------------------------
+%% # init/1
+%% ----------------------------------------------------------
init(_) ->
ets:new(?TABLE, [bag, named_table]),
{ok, #state{}}.
-%%% ----------------------------------------------------------
-%%% # handle_call(Request, From, State)
-%%% ----------------------------------------------------------
+%% ----------------------------------------------------------
+%% # handle_call/3
+%% ----------------------------------------------------------
-handle_call({add, Fun, Key, Pid}, _, State) ->
+handle_call(Req, From, S)
+ when not is_record(S, state) ->
+ handle_call(Req, From, upgrade(S));
+
+handle_call({add, Fun, Key, Pid}, _, S) ->
B = Fun(?TABLE, {Key, Pid}),
monitor(B andalso no_monitor(Pid), Pid),
- {reply, B, State};
+ {reply, B, pending(B, S)};
-handle_call({del, Key, Pid}, _, State) ->
- {reply, ets:delete_object(?TABLE, ?MAPPING(Key, Pid)), State};
+handle_call({del, Key, Pid}, _, S) ->
+ {reply, ets:delete_object(?TABLE, ?MAPPING(Key, Pid)), S};
-handle_call({repl, T, U, Pid}, _, State) ->
+handle_call({repl, T, U, Pid}, _, S) ->
MatchSpec = [{?MAPPING('$1', Pid),
[{'=:=', '$1', {const, T}}],
['$_']}],
- {reply, repl(ets:select(?TABLE, MatchSpec), U, Pid), State};
+ {reply, repl(ets:select(?TABLE, MatchSpec), U, Pid), S};
+
+handle_call({wait, Pat}, From, #state{q = Q} = S) ->
+ case find(Pat) of
+ {ok, L} ->
+ {reply, L, S};
+ false ->
+ {noreply, S#state{q = [{From, Pat} | Q]}}
+ end;
-handle_call(state, _, State) ->
- {reply, State, State};
+handle_call(state, _, S) ->
+ {reply, S, S};
-handle_call(uptime, _, #state{id = Time} = State) ->
- {reply, diameter_lib:now_diff(Time), State};
+handle_call(uptime, _, #state{id = Time} = S) ->
+ {reply, diameter_lib:now_diff(Time), S};
-handle_call(Req, From, State) ->
+handle_call(Req, From, S) ->
?UNEXPECTED([Req, From]),
- {reply, nok, State}.
+ {reply, nok, S}.
-%%% ----------------------------------------------------------
-%%% # handle_cast(Request, State)
-%%% ----------------------------------------------------------
+%% ----------------------------------------------------------
+%% # handle_cast/2
+%% ----------------------------------------------------------
-handle_cast(Msg, State)->
+handle_cast(Msg, S)->
?UNEXPECTED([Msg]),
- {noreply, State}.
+ {noreply, S}.
-%%% ----------------------------------------------------------
-%%% # handle_info(Request, State)
-%%% ----------------------------------------------------------
+%% ----------------------------------------------------------
+%% # handle_info/2
+%% ----------------------------------------------------------
-handle_info({'DOWN', MRef, process, Pid, _}, State) ->
+handle_info({'DOWN', MRef, process, Pid, _}, S) ->
ets:delete_object(?TABLE, ?MONITOR(Pid, MRef)),
ets:match_delete(?TABLE, ?MAPPING('_', Pid)),
- {noreply, State};
+ {noreply, S};
-handle_info(Info, State) ->
+handle_info(Info, S) ->
?UNEXPECTED([Info]),
- {noreply, State}.
+ {noreply, S}.
-%%% ----------------------------------------------------------
-%%% # terminate(Reason, State)
-%%% ----------------------------------------------------------
+%% ----------------------------------------------------------
+%% # terminate/2
+%% ----------------------------------------------------------
terminate(_Reason, _State)->
ok.
-%%% ----------------------------------------------------------
-%%% # code_change(OldVsn, State, Extra)
-%%% ----------------------------------------------------------
+%% ----------------------------------------------------------
+%% # code_change/3
+%% ----------------------------------------------------------
code_change(_OldVsn, State, _Extra) ->
{ok, State}.
-%% ---------------------------------------------------------
-%% INTERNAL FUNCTIONS
-%% ---------------------------------------------------------
+%% ===========================================================================
+
+upgrade(S) ->
+ #state{} = list_to_tuple(tuple_to_list(S) ++ [[]]).
monitor(true, Pid) ->
ets:insert(?TABLE, ?MONITOR(Pid, erlang:monitor(process, Pid)));
@@ -321,6 +319,37 @@ repl([?MAPPING(_, Pid) = M], Key, Pid) ->
repl([], _, _) ->
false.
+%% pending/1
+
+pending(true, #state{q = [_|_] = Q} = S) ->
+ S#state{q = q(lists:reverse(Q), [])}; %% retain reply order
+pending(_, S) ->
+ S.
+
+q([], Q) ->
+ Q;
+q([{From, Pat} = T | Rest], Q) ->
+ case find(Pat) of
+ {ok, L} ->
+ gen_server:reply(From, L),
+ q(Rest, Q);
+ false ->
+ q(Rest, [T|Q])
+ end.
+
+%% find/1
+
+find(Pat) ->
+ try match(Pat) of
+ [] ->
+ false;
+ L ->
+ {ok, L}
+ catch
+ _:_ ->
+ {ok, []}
+ end.
+
%% call/1
call(Request) ->
diff --git a/lib/diameter/src/base/diameter_service.erl b/lib/diameter/src/base/diameter_service.erl
index 725cccda1e..cffba4fc94 100644
--- a/lib/diameter/src/base/diameter_service.erl
+++ b/lib/diameter/src/base/diameter_service.erl
@@ -64,7 +64,7 @@
-include_lib("diameter/include/diameter.hrl").
-include("diameter_internal.hrl").
-%% The "old" states maintained in this module historically.
+%% The states mirrored by peer_up/peer_down callbacks.
-define(STATE_UP, up).
-define(STATE_DOWN, down).
@@ -107,6 +107,12 @@
%% process.
-define(STATE_TABLE, ?MODULE).
+%% The default sequence mask.
+-define(NOMASK, {0,32}).
+
+%% The default restrict_connections.
+-define(RESTRICT, nodes).
+
%% Workaround for dialyzer's lack of understanding of match specs.
-type match(T)
:: T | '_' | '$1' | '$2' | '$3' | '$4'.
@@ -114,15 +120,18 @@
%% State of service gen_server.
-record(state,
{id = now(),
- service_name, %% as passed to start_service/2, key in ?STATE_TABLE
+ service_name, %% as passed to start_service/2, key in ?STATE_TABLE
service :: #diameter_service{},
- peerT = ets_new(peers) :: ets:tid(), %% #peer{} at start_fsm
- connT = ets_new(conns) :: ets:tid(), %% #conn{} at connection_up
- share_peers = false :: boolean(), %% broadcast peers to remote nodes?
- use_shared_peers = false :: boolean(), %% use broadcasted peers?
+ peerT = ets_new(peers) :: ets:tid(),%% #peer{} at start_fsm
+ connT = ets_new(conns) :: ets:tid(),%% #conn{} at connection_up/reopen
shared_peers = ?Dict:new(), %% Alias -> [{TPid, Caps}, ...]
local_peers = ?Dict:new(), %% Alias -> [{TPid, Caps}, ...]
- monitor = false :: false | pid()}). %% process to die with
+ monitor = false :: false | pid(), %% process to die with
+ options
+ :: [{sequence, diameter:sequence()} %% sequence mask
+ | {restrict_connections, diameter:restriction()}
+ | {share_peers, boolean()} %% broadcast peers to remote nodes?
+ | {use_shared_peers, boolean()}]}).%% use broadcasted peers?
%% shared_peers reflects the peers broadcast from remote nodes. Note
%% that the state term itself doesn't change, which is relevant for
%% the stateless application callbacks since the state is retrieved
@@ -130,7 +139,12 @@
%% service record is used to determine whether or not we need to call
%% the process for a pick_peer callback.
-%% Record representing a watchdog process.
+%% Record representing a watchdog process as implemented by
+%% diameter_watchdog. The term "peer" here is historical, made
+%% especially confusing by the fact that a peer_ref() in the
+%% documentation is the key of a #conn{} record, not a #peer{} record.
+%% The name is also unfortunate given the meaning of peer in the
+%% Diameter sense.
-record(peer,
{pid :: match(pid()),
type :: match(connect | accept),
@@ -140,9 +154,15 @@
:: match(op_state() | {op_state(), wd_state()}),
started = now(), %% at process start
conn = false :: match(boolean() | pid())}).
- %% true at accept, pid() at connection_up (connT key)
-
-%% Record representing a peer_fsm process.
+ %% true at accepted, pid() at connection_up or reopen
+
+%% Record representing a peer process as implemented by
+%% diameter_peer_fsm. The term "conn" is historical. Despite the name
+%% here, comments refer to watchdog and peer processes, that are keys
+%% in #peer{} and #conn{} records respectively. To add to the
+%% confusion, a #request.transport is a peer process = key in a
+%% #conn{} record. The actual transport process (that the peer process
+%% knows about and that has a transport connection) isn't seen here.
-record(conn,
{pid :: pid(),
apps :: [{0..16#FFFFFFFF, diameter:app_alias()}], %% {Id, Alias}
@@ -156,10 +176,9 @@
handler :: match(pid()), %% request process
transport :: match(pid()), %% peer process
caps :: match(#diameter_caps{}),
- app :: match(diameter:app_alias()), %% #diameter_app.alias
- dictionary :: match(module()), %% #diameter_app.dictionary
- module :: match([module() | list()]),
- %% #diameter_app.module
+ app :: match(diameter:app_alias()),%% #diameter_app.alias
+ dictionary :: match(module()), %% #diameter_app.dictionary
+ module :: match([module() | list()]), %% #diameter_app.module
filter :: match(diameter:peer_filter()),
packet :: match(#diameter_packet{})}).
@@ -170,20 +189,6 @@
timeout = ?DEFAULT_TIMEOUT :: 0..16#FFFFFFFF,
detach = false :: boolean()}).
-%% Since RFC 3588 requires that a Diameter agent not modify End-to-End
-%% Identifiers, the possibility of explicitly setting an End-to-End
-%% Identifier would be needed to be able to implement an agent in
-%% which one side of the communication is not implemented on top of
-%% diameter. For example, Diameter being sent or received encapsulated
-%% in some other protocol, or even another Diameter stack in a
-%% non-Erlang environment. (Not that this is likely to be a normal
-%% case.)
-%%
-%% The implemented solution is not an option but to respect any header
-%% values set in a diameter_header record returned from a
-%% prepare_request callback. A call to diameter:call/4 can communicate
-%% values to the callback using the 'extra' option if so desired.
-
%%% ---------------------------------------------------------------------------
%%% # start(SvcName)
%%% ---------------------------------------------------------------------------
@@ -236,20 +241,20 @@ stop_transport(SvcName, [_|_] = Refs) ->
%%% ---------------------------------------------------------------------------
info(SvcName, Item) ->
- info_rc(call_service_by_name(SvcName, {info, Item})).
-
-info_rc({error, _}) ->
- undefined;
-info_rc(Info) ->
- Info.
+ case find_state(SvcName) of
+ #state{} = S ->
+ service_info(Item, S);
+ false ->
+ undefined
+ end.
%%% ---------------------------------------------------------------------------
%%% # receive_message(TPid, Pkt, MessageData)
%%% ---------------------------------------------------------------------------
-%% Handle an incoming message in the watchdog process. This used to
-%% come through the service process but this avoids that becoming a
-%% bottleneck.
+%% Handle an incoming Diameter message in the watchdog process. This
+%% used to come through the service process but this avoids that
+%% becoming a bottleneck.
receive_message(TPid, Pkt, T)
when is_pid(TPid) ->
@@ -329,21 +334,39 @@ call_rc(_, _, Sent) ->
%% In the process spawned for the outgoing request.
call(SvcName, App, Msg, Opts, Caller) ->
- c(ets:lookup(?STATE_TABLE, SvcName), App, Msg, Opts, Caller).
+ c(find_state(SvcName), App, Msg, Opts, Caller).
-c([#state{service_name = SvcName} = S], App, Msg, Opts, Caller) ->
+c(#state{service_name = Svc, options = [{_, Mask} | _]} = S,
+ App,
+ Msg,
+ Opts,
+ Caller) ->
case find_transport(App, Msg, Opts, S) of
{_,_,_} = T ->
- send_request(T, Msg, Opts, Caller, SvcName);
+ send_request(T, Mask, Msg, Opts, Caller, Svc);
false ->
{error, no_connection};
{error, _} = No ->
No
end;
-c([], _, _, _, _) ->
+c(false, _, _, _, _) ->
{error, no_service}.
+%% find_state/1
+
+find_state(SvcName) ->
+ fs(ets:lookup(?STATE_TABLE, SvcName)).
+
+fs([#state{} = S]) ->
+ S;
+
+fs([S]) -> %% inserted from old code
+ upgrade(S);
+
+fs([]) ->
+ false.
+
%% make_options/1
make_options(Options) ->
@@ -439,6 +462,10 @@ i(_, false) ->
%%% # handle_call(Req, From, State)
%%% ---------------------------------------------------------------------------
+handle_call(T, From, S)
+ when not is_record(S, state) ->
+ handle_call(T, From, upgrade(S));
+
handle_call(state, _, S) ->
{reply, S, S};
@@ -462,6 +489,7 @@ handle_call({pick_peer, Local, Remote, App}, _From, S) ->
handle_call({call_module, AppMod, Req}, From, S) ->
call_module(AppMod, Req, From, S);
+%% Call from old code.
handle_call({info, Item}, _From, S) ->
{reply, service_info(Item, S), S};
@@ -472,6 +500,14 @@ handle_call(stop, _From, S) ->
%% gets the reply. We deal with this in the call to the server,
%% stating a monitor that waits for DOWN before returning.
+%% Watchdog is asking for the sequence mask.
+handle_call(sequence, _From, #state{options = [{_, Mask} | _]} = S) ->
+ {reply, Mask, S};
+
+%% Watchdog is asking for the nodes restriction.
+handle_call(restriction, _From, #state{options = [_,_,_,{_,R} | _]} = S) ->
+ {reply, R, S};
+
handle_call(Req, From, S) ->
unexpected(handle_call, [Req, From], S),
{reply, nok, S}.
@@ -488,15 +524,16 @@ handle_cast(Req, S) ->
%%% # handle_info(Req, State)
%%% ---------------------------------------------------------------------------
-handle_info(T,S) ->
+handle_info(T, #state{} = S) ->
case transition(T,S) of
ok ->
{noreply, S};
- #state{} = NS ->
- {noreply, NS};
{stop, Reason} ->
{stop, {shutdown, Reason}, S}
- end.
+ end;
+
+handle_info(T, S) ->
+ handle_info(T, upgrade(S)).
%% transition/2
@@ -507,15 +544,26 @@ transition({accepted, Pid, TPid}, S) ->
%% Peer process has a new open connection.
transition({connection_up, Pid, T}, S) ->
- connection_up(Pid, T, S);
+ connection_up(Pid, T, S),
+ ok;
+
+%% Watchdog has a new connection that will be opened after DW[RA]
+%% exchange. This message was added long after connection_up, to
+%% communicate the information as soon as it's available. Leave
+%% connection_up as is it for now, duplicated information and all.
+transition({reopen, Pid, T}, S) ->
+ reopen(Pid, T, S),
+ ok;
-%% Peer process has left state open.
+%% Watchdog has left state OKAY.
transition({connection_down, Pid}, S) ->
- connection_down(Pid, S);
+ connection_down(Pid, S),
+ ok;
-%% Peer process has returned to state open.
+%% Watchdog has returned to state OKAY.
transition({connection_up, Pid}, S) ->
- connection_up(Pid, S);
+ connection_up(Pid, S),
+ ok;
%% Accepting transport has lost connectivity.
transition({close, Pid}, S) ->
@@ -529,7 +577,7 @@ transition({reconnect, Pid}, S) ->
%% Watchdog is sending notification of a state transition. Note that
%% the connection_up/down messages are pre-date this message and are
-%% still used. A 'watchdog' message will follow these and communicate
+%% still used. A watchdog message will follow these and communicate
%% the same state as was set in handling connection_up/down.
transition({watchdog, Pid, {TPid, From, To}}, #state{service_name = SvcName,
peerT = PeerT}) ->
@@ -539,21 +587,22 @@ transition({watchdog, Pid, {TPid, From, To}}, #state{service_name = SvcName,
insert(PeerT, P#peer{op_state = {OS, To}}),
send_event(SvcName, {watchdog, Ref, TPid, {From, To}, {T, Opts}}),
ok;
-%% Death of a peer process results in the removal of it's peer and any
-%% associated conn record when 'DOWN' is received (after this) but the
-%% states will be {?STATE_UP, ?WD_DOWN} for a short time. (No real
-%% problem since ?WD_* is only used in service_info.) We set ?WD_OKAY
-%% as a consequence of connection_up since we know a watchdog is
-%% coming. We can't set anything at connection_down since we don't
-%% know if the subsequent watchdog message will be ?WD_DOWN or
-%% ?WD_SUSPECT. We don't (yet) set ?STATE_* as a consequence of a
-%% watchdog message since this requires changing some of the matching
-%% on ?STATE_*.
-%%
-%% Death of a conn process results in connection_down followed by
-%% watchdog ?WD_DOWN. The latter doesn't result in the conn record
-%% being deleted since 'DOWN' from death of its peer doesn't (yet)
-%% deal with the record having been removed.
+%% Death of a watchdog process (#peer.pid) results in the removal of
+%% it's peer and any associated conn record when 'DOWN' is received
+%% (after this) but the states will be {?STATE_UP, ?WD_DOWN} for a
+%% short time. (No real problem since ?WD_* is only used in
+%% service_info.) We set ?WD_OKAY as a consequence of connection_up
+%% since we know a watchdog is coming. We can't set anything at
+%% connection_down since we don't know if the subsequent watchdog
+%% message will be ?WD_DOWN or ?WD_SUSPECT. We don't (yet) set
+%% ?STATE_* as a consequence of a watchdog message since this requires
+%% changing some of the matching on ?STATE_*.
+%%
+%% Death of a peer process process (#conn.pid, #peer.conn) results in
+%% connection_down followed by watchdog ?WD_DOWN. The latter doesn't
+%% result in the conn record being deleted since 'DOWN' from death of
+%% its watchdog doesn't (yet) deal with the record having been
+%% removed.
%% Monitor process has died. Just die with a reason that tells
%% diameter_config about the happening. If a cleaner shutdown is
@@ -561,23 +610,26 @@ transition({watchdog, Pid, {TPid, From, To}}, #state{service_name = SvcName,
transition({'DOWN', MRef, process, _, Reason}, #state{monitor = MRef}) ->
{stop, {monitor, Reason}};
-%% Local peer process has died.
+%% Local watchdog process has died.
transition({'DOWN', _, process, Pid, Reason}, S)
when node(Pid) == node() ->
- peer_down(Pid, Reason, S);
+ peer_down(Pid, Reason, S),
+ ok;
-%% Remote service wants to know about shared transports.
+%% Remote service wants to know about shared peers.
transition({service, Pid}, S) ->
share_peers(Pid, S),
ok;
%% Remote service is communicating a shared peer.
transition({peer, TPid, Aliases, Caps}, S) ->
- remote_peer_up(TPid, Aliases, Caps, S);
+ remote_peer_up(TPid, Aliases, Caps, S),
+ ok;
%% Remote peer process has died.
transition({'DOWN', _, process, TPid, _}, S) ->
- remote_peer_down(TPid, S);
+ remote_peer_down(TPid, S),
+ ok;
%% Restart after tc expiry.
transition({tc_timeout, T}, S) ->
@@ -591,10 +643,39 @@ transition({failover, TRef, Seqs}, S) ->
failover(TRef, Seqs, S),
ok;
+%% Ensure upgraded state is stored in state table.
+transition(upgrade, _) ->
+ ok;
+
transition(Req, S) ->
unexpected(handle_info, [Req], S),
ok.
+%% upgrade/1
+
+upgrade({state, Id, Svc, Name, Svc, PT, CT, SB, UB, SD, LD, MPid}) ->
+ S = #state{id = Id,
+ service_name = Name,
+ service = Svc,
+ peerT = PT,
+ connT = CT,
+ shared_peers = SD,
+ local_peers = LD,
+ monitor = MPid,
+ options = [{sequence, ?NOMASK},
+ {share_peers, SB},
+ {use_shared_peers, UB},
+ {restrict_connections, ?RESTRICT}]},
+ upgrade_insert(S),
+ S.
+
+upgrade_insert(#state{service = #diameter_service{pid = Pid}} = S) ->
+ if Pid == self() ->
+ ets:insert(?STATE_TABLE, S);
+ true ->
+ Pid ! upgrade
+ end.
+
%%% ---------------------------------------------------------------------------
%%% # terminate(Reason, State)
%%% ---------------------------------------------------------------------------
@@ -710,16 +791,20 @@ shutdown(#state{peerT = PeerT}) ->
wait(Fun, T) ->
diameter_lib:wait(ets:foldl(Fun, [], T)).
-st(#peer{conn = B}, Acc)
- when is_boolean(B) ->
- Acc;
-st(#peer{conn = Pid}, Acc) ->
+st(#peer{op_state = {OS,_}} = P, Acc) ->
+ st(P#peer{op_state = OS}, Acc);
+st(#peer{op_state = ?STATE_UP, conn = Pid}, Acc) ->
Pid ! shutdown,
- [Pid | Acc].
+ [Pid | Acc];
+st(#peer{}, Acc) ->
+ Acc.
-sw(#peer{pid = Pid}, Acc) ->
+sw(#peer{pid = Pid}, Acc)
+ when is_pid(Pid) ->
exit(Pid, shutdown),
- [Pid | Acc].
+ [Pid | Acc];
+sw(#peer{}, Acc) ->
+ Acc.
%%% ---------------------------------------------------------------------------
%%% # call_service/2
@@ -779,9 +864,8 @@ cfg_acc({SvcName, #diameter_service{applications = Apps} = Rec, Opts},
lists:foreach(fun init_mod/1, Apps),
S = #state{service_name = SvcName,
service = Rec#diameter_service{pid = self()},
- share_peers = get_value(share_peers, Opts),
- use_shared_peers = get_value(use_shared_peers, Opts),
- monitor = mref(get_value(monitor, Opts))},
+ monitor = mref(get_value(monitor, Opts)),
+ options = service_options(Opts)},
{S, Acc};
cfg_acc({_Ref, Type, _Opts} = T, {S, Acc})
@@ -789,15 +873,24 @@ cfg_acc({_Ref, Type, _Opts} = T, {S, Acc})
Type == listen ->
{S, [T | Acc]}.
+service_options(Opts) ->
+ [{sequence, proplists:get_value(sequence, Opts, ?NOMASK)},
+ {share_peers, get_value(share_peers, Opts)},
+ {use_shared_peers, get_value(use_shared_peers, Opts)},
+ {restrict_connections, proplists:get_value(restrict_connections,
+ Opts,
+ ?RESTRICT)}].
+%% The order of options is significant since we match against the list.
+
mref(false = No) ->
No;
mref(P) ->
erlang:monitor(process, P).
-init_shared(#state{use_shared_peers = true,
+init_shared(#state{options = [_, _, {_, true} | _],
service_name = Svc}) ->
diameter_peer:notify(Svc, {service, self()});
-init_shared(#state{use_shared_peers = false}) ->
+init_shared(#state{options = [_, _, {_, false} | _]}) ->
ok.
init_mod(#diameter_app{alias = Alias,
@@ -860,9 +953,8 @@ start(Ref, Type, Opts, #state{peerT = PeerT,
Pid.
%% Note that the service record passed into the watchdog is the merged
-%% record so that each watchdog (and peer_fsm) may get a different
-%% record. This record is what is passed back into application
-%% callbacks.
+%% record so that each watchdog may get a different record. This
+%% record is what is passed back into application callbacks.
s(Type, Ref, T) ->
case diameter_watchdog:start({Type, Ref}, T) of
@@ -913,8 +1005,8 @@ accepted(Pid, _TPid, #state{peerT = PeerT} = S) ->
#peer{ref = Ref, type = accept = T, conn = false, options = Opts}
= P
= fetch(PeerT, Pid),
- insert(PeerT, P#peer{conn = true}), %% mark replacement transport started
- start(Ref, T, Opts, S). %% start new peer
+ insert(PeerT, P#peer{conn = true}), %% mark replacement as started
+ start(Ref, T, Opts, S). %% start new watchdog
fetch(Tid, Key) ->
[T] = ets:lookup(Tid, Key),
@@ -929,11 +1021,9 @@ fetch(Tid, Key) ->
%%% ---------------------------------------------------------------------------
%%% # connection_up/3
-%%%
-%%% Output: #state{}
%%% ---------------------------------------------------------------------------
-%% Peer process has reached the open state.
+%% Watchdog process has reached state OKAY.
connection_up(Pid, {TPid, {Caps, SApps, Pkt}}, #state{peerT = PeerT,
connT = ConnT}
@@ -948,9 +1038,29 @@ connection_up(Pid, {TPid, {Caps, SApps, Pkt}}, #state{peerT = PeerT,
connection_up([Pkt], P#peer{conn = TPid}, C, S).
%%% ---------------------------------------------------------------------------
+%%% # reopen/3
+%%% ---------------------------------------------------------------------------
+
+%% Note that this connection_up/3 rewrites the same #conn{} now
+%% written here. Both do so in case reopen has not happened in old
+%% code.
+
+reopen(Pid, {TPid, {Caps, SApps, _Pkt}}, #state{peerT = PeerT,
+ connT = ConnT}) ->
+ P = fetch(PeerT, Pid),
+ C = #conn{pid = TPid,
+ apps = SApps,
+ caps = Caps,
+ peer = Pid},
+
+ insert(ConnT, C),
+ #peer{op_state = {?STATE_DOWN, _}}
+ = P,
+ insert(PeerT, P#peer{op_state = {?STATE_DOWN, ?WD_REOPEN},
+ conn = TPid}).
+
+%%% ---------------------------------------------------------------------------
%%% # connection_up/2
-%%%
-%%% Output: #state{}
%%% ---------------------------------------------------------------------------
%% Peer process has transitioned back into the open state. Note that there
@@ -979,10 +1089,8 @@ connection_up(T, P, C, #state{peerT = PeerT,
insert(PeerT, P#peer{op_state = {?STATE_UP, ?WD_OKAY}}),
request_peer_up(TPid),
- report_status(up, P, C, S, T),
- S#state{local_peers = insert_local_peer(SApps,
- {{TPid, Caps}, {SvcName, Apps}},
- LDict)}.
+ insert_local_peer(SApps, {{TPid, Caps}, {SvcName, Apps}}, LDict),
+ report_status(up, P, C, S, T).
insert_local_peer(SApps, T, LDict) ->
lists:foldl(fun(A,D) -> ilp(A, T, D) end, LDict, SApps).
@@ -1024,11 +1132,9 @@ peer_cb(MFA, Alias) ->
%%% ---------------------------------------------------------------------------
%%% # connection_down/2
-%%%
-%%% Output: #state{}
%%% ---------------------------------------------------------------------------
-%% Peer process has transitioned out of the open state.
+%% Watchdog has transitioned out of state OKAY.
connection_down(Pid, #state{peerT = PeerT,
connT = ConnT}
@@ -1044,8 +1150,8 @@ connection_down(Pid, #state{peerT = PeerT,
%% connection_down/3
-connection_down(#peer{op_state = {?STATE_DOWN, _}}, _, S) ->
- S;
+connection_down(#peer{op_state = {?STATE_DOWN, _}}, _, _) ->
+ ok;
connection_down(#peer{conn = TPid,
op_state = {?STATE_UP, _}}
@@ -1058,12 +1164,8 @@ connection_down(#peer{conn = TPid,
local_peers = LDict}
= S) ->
report_status(down, P, C, S, []),
- NewS = S#state{local_peers
- = remove_local_peer(SApps,
- {{TPid, Caps}, {SvcName, Apps}},
- LDict)},
- request_peer_down(TPid, NewS),
- NewS.
+ remove_local_peer(SApps, {{TPid, Caps}, {SvcName, Apps}}, LDict),
+ request_peer_down(TPid, S).
remove_local_peer(SApps, T, LDict) ->
lists:foldl(fun(A,D) -> rlp(A, T, D) end, LDict, SApps).
@@ -1082,11 +1184,9 @@ down_conn(Id, Alias, TC, {SvcName, Apps}) ->
%%% ---------------------------------------------------------------------------
%%% # peer_down/3
-%%%
-%%% Output: #state{}
%%% ---------------------------------------------------------------------------
-%% Peer process has died.
+%% Watchdog process has died.
peer_down(Pid, Reason, #state{peerT = PeerT} = S) ->
P = fetch(PeerT, Pid),
@@ -1106,12 +1206,12 @@ closed({shutdown, {close, _TPid, Reason}},
closed(_, _, _) ->
ok.
-%% The peer has never come up ...
-peer_down(#peer{conn = B}, S)
+%% The watchdog has never reached OKAY ...
+peer_down(#peer{conn = B}, _)
when is_boolean(B) ->
- S;
+ ok;
-%% ... or it has.
+%% ... or maybe it has.
peer_down(#peer{conn = TPid} = P, #state{connT = ConnT} = S) ->
#conn{} = C = fetch(ConnT, TPid),
ets:delete_object(ConnT, C),
@@ -1139,7 +1239,7 @@ restart(#peer{ref = Ref,
started = Time}) ->
{Time, {Ref, T, Opts}};
-%% ... or it has: a replacement transport has already been spawned.
+%% ... or it has: a replacement has already been spawned.
restart(#peer{type = accept}) ->
false.
@@ -1165,8 +1265,8 @@ default_tc(connect, Opts) ->
default_tc(accept, _) ->
0.
-%% Bound tc below if the peer was restarted recently to avoid
-%% continuous in case of faulty config or other problems.
+%% Bound tc below if the watchdog was restarted recently to avoid
+%% continuous restarted in case of faulty config or other problems.
tc(Time, Tc) ->
choose(Tc > ?RESTART_TC
orelse timer:now_diff(now(), Time) > 1000*?RESTART_TC,
@@ -1288,7 +1388,7 @@ cm([_,_|_], _, _, _) ->
multiple.
%%% ---------------------------------------------------------------------------
-%%% # send_request/5
+%%% # send_request/6
%%% ---------------------------------------------------------------------------
%% Send an outgoing request in its dedicated process.
@@ -1301,71 +1401,89 @@ cm([_,_|_], _, _, _) ->
%% The mod field of the #diameter_app{} here includes any extra
%% arguments passed to diameter:call/2.
-send_request({TPid, Caps, App}, Msg, Opts, Caller, SvcName) ->
+send_request({TPid, Caps, App} = T, Mask, Msg, Opts, Caller, SvcName) ->
#diameter_app{module = ModX}
= App,
- Pkt = make_request_packet(Msg),
-
- case cb(ModX, prepare_request, [Pkt, SvcName, {TPid, Caps}]) of
- {send, P} ->
- send_request(make_request_packet(P, Pkt),
- TPid,
- Caps,
- App,
- Opts,
- Caller,
- SvcName);
- {discard, Reason} ->
- {error, Reason};
- discard ->
- {error, discarded};
- T ->
- ?ERROR({invalid_return, prepare_request, App, T})
- end.
+ Pkt = make_prepare_packet(Mask, Msg),
+
+ send_req(cb(ModX, prepare_request, [Pkt, SvcName, {TPid, Caps}]),
+ Pkt,
+ T,
+ Opts,
+ Caller,
+ SvcName,
+ []).
+
+send_req({send, P}, Pkt, T, Opts, Caller, SvcName, Fs) ->
+ send_req(make_request_packet(P, Pkt), T, Opts, Caller, SvcName, Fs);
-%% make_request_packet/1
+send_req({discard, Reason} , _, _, _, _, _, _) ->
+ {error, Reason};
+
+send_req(discard, _, _, _, _, _, _) ->
+ {error, discarded};
+
+send_req({eval_packet, RC, F}, Pkt, T, Opts, Caller, SvcName, Fs) ->
+ send_req(RC, Pkt, T, Opts, Caller, SvcName, [F|Fs]);
+
+send_req(E, _, {_, _, App}, _, _, _, _) ->
+ ?ERROR({invalid_return, prepare_request, App, E}).
+
+%% make_prepare_packet/2
%%
%% Turn an outgoing request as passed to call/4 into a diameter_packet
%% record in preparation for a prepare_request callback.
-make_request_packet(Bin)
+make_prepare_packet(_, Bin)
when is_binary(Bin) ->
#diameter_packet{header = diameter_codec:decode_header(Bin),
bin = Bin};
-make_request_packet(#diameter_packet{msg = [#diameter_header{} = Hdr | Avps]}
- = Pkt) ->
- Pkt#diameter_packet{msg = [make_request_header(Hdr) | Avps]};
+make_prepare_packet(Mask, #diameter_packet{msg = [#diameter_header{} = Hdr
+ | Avps]}
+ = Pkt) ->
+ Pkt#diameter_packet{msg = [make_prepare_header(Mask, Hdr) | Avps]};
-make_request_packet(#diameter_packet{header = Hdr} = Pkt) ->
- Pkt#diameter_packet{header = make_request_header(Hdr)};
+make_prepare_packet(Mask, #diameter_packet{header = Hdr} = Pkt) ->
+ Pkt#diameter_packet{header = make_prepare_header(Mask, Hdr)};
-make_request_packet(Msg) ->
- make_request_packet(#diameter_packet{msg = Msg}).
+make_prepare_packet(Mask, Msg) ->
+ make_prepare_packet(Mask, #diameter_packet{msg = Msg}).
-%% make_request_header/1
+%% make_prepare_header/1
-make_request_header(undefined) ->
- Seq = diameter_session:sequence(),
- make_request_header(#diameter_header{end_to_end_id = Seq,
+make_prepare_header(Mask, undefined) ->
+ Seq = diameter_session:sequence(Mask),
+ make_prepare_header(#diameter_header{end_to_end_id = Seq,
hop_by_hop_id = Seq});
-make_request_header(#diameter_header{version = undefined} = Hdr) ->
- make_request_header(Hdr#diameter_header{version = ?DIAMETER_VERSION});
+make_prepare_header(Mask, #diameter_header{end_to_end_id = undefined,
+ hop_by_hop_id = undefined}) ->
+ Seq = diameter_session:sequence(Mask),
+ make_prepare_header(#diameter_header{end_to_end_id = Seq,
+ hop_by_hop_id = Seq});
+
+make_prepare_header(Mask, #diameter_header{end_to_end_id = undefined} = H) ->
+ Seq = diameter_session:sequence(Mask),
+ make_prepare_header(H#diameter_header{end_to_end_id = Seq});
+
+make_prepare_header(Mask, #diameter_header{hop_by_hop_id = undefined} = H) ->
+ Seq = diameter_session:sequence(Mask),
+ make_prepare_header(H#diameter_header{hop_by_hop_id = Seq});
+
+make_prepare_header(_, Hdr) ->
+ make_prepare_header(Hdr).
-make_request_header(#diameter_header{end_to_end_id = undefined} = H) ->
- Seq = diameter_session:sequence(),
- make_request_header(H#diameter_header{end_to_end_id = Seq});
+%% make_prepare_header/1
-make_request_header(#diameter_header{hop_by_hop_id = undefined} = H) ->
- Seq = diameter_session:sequence(),
- make_request_header(H#diameter_header{hop_by_hop_id = Seq});
+make_prepare_header(#diameter_header{version = undefined} = Hdr) ->
+ make_prepare_header(Hdr#diameter_header{version = ?DIAMETER_VERSION});
-make_request_header(#diameter_header{} = Hdr) ->
+make_prepare_header(#diameter_header{} = Hdr) ->
Hdr;
-make_request_header(T) ->
+make_prepare_header(T) ->
?ERROR({invalid_header, T}).
%% make_request_packet/2
@@ -1375,7 +1493,7 @@ make_request_header(T) ->
make_request_packet(Bin, _)
when is_binary(Bin) ->
- make_request_packet(Bin);
+ make_prepare_packet(false, Bin);
make_request_packet(#diameter_packet{msg = [#diameter_header{} | _]}
= Pkt,
@@ -1387,7 +1505,7 @@ make_request_packet(#diameter_packet{msg = [#diameter_header{} | _]}
%% This is primarily so that the end to end and hop by hop identifiers
%% are retained.
make_request_packet(#diameter_packet{header = Hdr} = Pkt,
- #diameter_packet{header = Hdr0}) ->
+ #diameter_packet{header = Hdr0}) ->
Pkt#diameter_packet{header = fold_record(Hdr0, Hdr)};
make_request_packet(Msg, Pkt) ->
@@ -1400,16 +1518,16 @@ fold_record(undefined, R) ->
fold_record(Rec, R) ->
diameter_lib:fold_tuple(2, Rec, R).
-%% send_request/7
+%% send_req/6
-send_request(Pkt, TPid, Caps, App, Opts, Caller, SvcName) ->
+send_req(Pkt, {TPid, Caps, App}, Opts, Caller, SvcName, Fs) ->
#diameter_app{alias = Alias,
dictionary = Dict,
module = ModX,
options = [{answer_errors, AE} | _]}
= App,
- EPkt = encode(Dict, Pkt),
+ EPkt = encode(Dict, Pkt, Fs),
#options{filter = Filter,
timeout = Timeout}
@@ -1490,6 +1608,13 @@ msg(#diameter_packet{msg = undefined, bin = Bin}) ->
msg(#diameter_packet{msg = Msg}) ->
Msg.
+%% encode/3
+
+encode(Dict, Pkt, Fs) ->
+ P = encode(Dict, Pkt),
+ eval_packet(P, Fs),
+ P.
+
%% encode/2
%% Note that prepare_request can return a diameter_packet containing
@@ -1571,38 +1696,47 @@ send(Pid, Pkt) ->
%% retransmit/4
-retransmit({TPid, Caps, #diameter_app{alias = Alias} = App},
- #request{app = Alias,
- packet = Pkt}
+retransmit({TPid, Caps, #diameter_app{alias = Alias} = App} = T,
+ #request{app = Alias, packet = Pkt}
= Req,
SvcName,
Timeout) ->
have_request(Pkt, TPid) %% Don't failover to a peer we've
andalso ?THROW(timeout), %% already sent to.
- case cb(App, prepare_retransmit, [Pkt, SvcName, {TPid, Caps}]) of
- {send, P} ->
- retransmit(make_request_packet(P, Pkt), TPid, Caps, Req, Timeout);
- {discard, Reason} ->
- ?THROW(Reason);
- discard ->
- ?THROW(discarded);
- T ->
- ?ERROR({invalid_return, prepare_retransmit, App, T})
- end.
+ resend_req(cb(App, prepare_retransmit, [Pkt, SvcName, {TPid, Caps}]),
+ T,
+ Req,
+ Timeout,
+ []).
+
+resend_req({send, P}, T, #request{packet = Pkt} = Req, Timeout, Fs) ->
+ retransmit(make_request_packet(P, Pkt), T, Req, Timeout, Fs);
+
+resend_req({discard, Reason}, _, _, _, _) ->
+ ?THROW(Reason);
+
+resend_req(discard, _, _, _, _) ->
+ ?THROW(discarded);
-%% retransmit/5
+resend_req({eval_packet, RC, F}, T, Req, Timeout, Fs) ->
+ resend_req(RC, T, Req, Timeout, [F|Fs]);
+
+resend_req(T, {_, _, App}, _, _, _) ->
+ ?ERROR({invalid_return, prepare_retransmit, App, T}).
+
+%% retransmit/6
-retransmit(Pkt, TPid, Caps, #request{dictionary = Dict} = Req, Timeout) ->
- EPkt = encode(Dict, Pkt),
+retransmit(Pkt, {TPid, Caps, _}, #request{dictionary = D} = Req0, Tmo, Fs) ->
+ EPkt = encode(D, Pkt, Fs),
- NewReq = Req#request{transport = TPid,
- packet = Pkt,
- caps = Caps},
+ Req = Req0#request{transport = TPid,
+ packet = Pkt,
+ caps = Caps},
- ?LOG(retransmission, NewReq),
- TRef = send_request(TPid, EPkt, NewReq, Timeout),
- {TRef, NewReq}.
+ ?LOG(retransmission, Req),
+ TRef = send_request(TPid, EPkt, Req, Tmo),
+ {TRef, Req}.
%% store_request/4
@@ -1674,10 +1808,13 @@ request_peer_down(TPid, S) ->
%%% recv_request/3
%%% ---------------------------------------------------------------------------
-recv_request(TPid, Pkt, {ConnT, SvcName, Apps}) ->
+recv_request(TPid, Pkt, {ConnT, SvcName, Apps}) -> %% upgrade
+ recv_request(TPid, Pkt, {ConnT, SvcName, Apps, ?NOMASK});
+
+recv_request(TPid, Pkt, {ConnT, SvcName, Apps, Mask}) ->
try ets:lookup(ConnT, TPid) of
[C] ->
- recv_request(C, TPid, Pkt, SvcName, Apps);
+ recv_request(C, TPid, Pkt, SvcName, Apps, Mask);
[] -> %% transport has gone down
ok
catch
@@ -1687,7 +1824,12 @@ recv_request(TPid, Pkt, {ConnT, SvcName, Apps}) ->
%% recv_request/5
-recv_request(#conn{apps = SApps, caps = Caps}, TPid, Pkt, SvcName, Apps) ->
+recv_request(#conn{apps = SApps, caps = Caps},
+ TPid,
+ Pkt,
+ SvcName,
+ Apps,
+ Mask) ->
#diameter_caps{origin_host = {OH,_},
origin_realm = {OR,_}}
= Caps,
@@ -1699,6 +1841,7 @@ recv_request(#conn{apps = SApps, caps = Caps}, TPid, Pkt, SvcName, Apps) ->
{SvcName, OH, OR},
TPid,
Apps,
+ Mask,
Caps,
Pkt).
@@ -1724,20 +1867,24 @@ keyfind([Key | Rest], Pos, L) ->
T
end.
-%% recv_request/6
+%% recv_request/7
-recv_request({Id, Alias}, T, TPid, Apps, Caps, Pkt) ->
+recv_request({Id, Alias}, T, TPid, Apps, Mask, Caps, Pkt) ->
#diameter_app{dictionary = Dict}
= A
= find_app(Alias, Apps),
- recv_request(T, {TPid, Caps}, A, diameter_codec:decode(Id, Dict, Pkt));
+ recv_request(T,
+ {TPid, Caps},
+ A,
+ Mask,
+ diameter_codec:decode(Id, Dict, Pkt));
%% Note that the decode is different depending on whether or not Id is
%% ?APP_ID_RELAY.
%% DIAMETER_APPLICATION_UNSUPPORTED 3007
%% A request was sent for an application that is not supported.
-recv_request(false, T, TPid, _, _, Pkt) ->
+recv_request(false, T, TPid, _, _, _, Pkt) ->
As = collect_avps(Pkt),
protocol_error(3007, T, TPid, Pkt#diameter_packet{avps = As}).
@@ -1749,7 +1896,7 @@ collect_avps(Pkt) ->
As
end.
-%% recv_request/4
+%% recv_request/5
%% Wrong number of bits somewhere in the message: reply.
%%
@@ -1758,7 +1905,7 @@ collect_avps(Pkt) ->
%% set to an unrecognized value, or that is inconsistent with the
%% AVP's definition.
%%
-recv_request(T, {TPid, _}, _, #diameter_packet{errors = [Bs | _]} = Pkt)
+recv_request(T, {TPid, _}, _, _, #diameter_packet{errors = [Bs | _]} = Pkt)
when is_bitstring(Bs) ->
protocol_error(3009, T, TPid, Pkt);
@@ -1773,6 +1920,7 @@ recv_request(T, {TPid, _}, _, #diameter_packet{errors = [Bs | _]} = Pkt)
recv_request(T,
{TPid, _},
#diameter_app{id = Id},
+ _,
#diameter_packet{header = #diameter_header{is_proxiable = P},
msg = M}
= Pkt)
@@ -1790,6 +1938,7 @@ recv_request(T,
recv_request(T,
{TPid, _},
_,
+ _,
#diameter_packet{header = #diameter_header{is_error = true}}
= Pkt) ->
protocol_error(3008, T, TPid, Pkt);
@@ -1798,14 +1947,20 @@ recv_request(T,
%% in the relay application. Don't distinguish between the two since
%% each application has its own callback config. That is, the user can
%% easily distinguish between the two cases.
-recv_request(T, TC, App, Pkt) ->
- request_cb(T, TC, App, examine(Pkt)).
+recv_request(T, TC, App, Mask, Pkt) ->
+ request_cb(T, TC, App, Mask, examine(Pkt)).
%% Note that there may still be errors but these aren't protocol
%% (3xxx) errors that lead to an answer-message.
-request_cb({SvcName, _OH, _OR} = T, TC, App, Pkt) ->
- request_cb(cb(App, handle_request, [Pkt, SvcName, TC]), App, T, TC, Pkt).
+request_cb({SvcName, _OH, _OR} = T, TC, App, Mask, Pkt) ->
+ request_cb(cb(App, handle_request, [Pkt, SvcName, TC]),
+ App,
+ Mask,
+ T,
+ TC,
+ [],
+ Pkt).
%% examine/1
%%
@@ -1825,7 +1980,7 @@ examine(#diameter_packet{errors = Es} = Pkt) ->
Pkt#diameter_packet{errors = [5011 | Es]}.
%% It's odd/unfortunate that this isn't a protocol error.
-%% request_cb/5
+%% request_cb/7
%% A reply may be an answer-message, constructed either here or by
%% the handle_request callback. The header from the incoming request
@@ -1835,21 +1990,23 @@ examine(#diameter_packet{errors = Es} = Pkt) ->
request_cb({reply, Ans},
#diameter_app{dictionary = Dict},
_,
+ _,
{TPid, _},
+ Fs,
Pkt) ->
- reply(Ans, Dict, TPid, Pkt);
+ reply(Ans, Dict, TPid, Fs, Pkt);
%% An 3xxx result code, for which the E-bit is set in the header.
-request_cb({protocol_error, RC}, _, T, {TPid, _}, Pkt)
+request_cb({protocol_error, RC}, _, _, T, {TPid, _}, Fs, Pkt)
when 3000 =< RC, RC < 4000 ->
- protocol_error(RC, T, TPid, Pkt);
+ protocol_error(RC, T, TPid, Fs, Pkt);
%% RFC 3588 says we must reply 3001 to anything unrecognized or
%% unsupported. 'noreply' is undocumented (and inappropriately named)
%% backwards compatibility for this, protocol_error the documented
%% alternative.
-request_cb(noreply, _, T, {TPid, _}, Pkt) ->
- protocol_error(3001, T, TPid, Pkt);
+request_cb(noreply, _, _, T, {TPid, _}, Fs, Pkt) ->
+ protocol_error(3001, T, TPid, Fs, Pkt);
%% Relay a request to another peer. This is equivalent to doing an
%% explicit call/4 with the message in question except that (1) a loop
@@ -1869,38 +2026,51 @@ request_cb(noreply, _, T, {TPid, _}, Pkt) ->
request_cb({A, Opts},
#diameter_app{id = Id}
= App,
+ Mask,
T,
TC,
+ Fs,
Pkt)
when A == relay, Id == ?APP_ID_RELAY;
A == proxy, Id /= ?APP_ID_RELAY;
A == resend ->
- resend(Opts, App, T, TC, Pkt);
+ resend(Opts, App, Mask, T, TC, Fs, Pkt);
-request_cb(discard, _, _, _, _) ->
+request_cb(discard, _, _, _, _, _, _) ->
ok;
-request_cb({eval, RC, F}, App, T, TC, Pkt) ->
- request_cb(RC, App, T, TC, Pkt),
+request_cb({eval_packet, RC, F}, App, Mask, T, TC, Fs, Pkt) ->
+ request_cb(RC, App, Mask, T, TC, [F|Fs], Pkt);
+
+request_cb({eval, RC, F}, App, Mask, T, TC, Fs, Pkt) ->
+ request_cb(RC, App, Mask, T, TC, Pkt, Fs),
diameter_lib:eval(F).
-%% protocol_error/4
+%% protocol_error/5
-protocol_error(RC, {_, OH, OR}, TPid, #diameter_packet{avps = Avps} = Pkt) ->
+protocol_error(RC, {_, OH, OR}, TPid, Fs, Pkt) ->
+ #diameter_packet{avps = Avps} = Pkt,
?LOG({error, RC}, Pkt),
- reply(answer_message({OH, OR, RC}, Avps), ?BASE, TPid, Pkt).
+ reply(answer_message({OH, OR, RC}, Avps), ?BASE, TPid, Fs, Pkt).
+
+%% protocol_error/4
-%% resend/5
+protocol_error(RC, T, TPid, Pkt) ->
+ protocol_error(RC, T, TPid, [], Pkt).
+
+%% resend/7
%%
%% Resend a message as a relay or proxy agent.
resend(Opts,
#diameter_app{} = App,
+ Mask,
{_SvcName, OH, _OR} = T,
{_TPid, _Caps} = TC,
+ Fs,
#diameter_packet{avps = Avps} = Pkt) ->
{Code, _Flags, Vid} = ?BASE:avp_header('Route-Record'),
- resend(is_loop(Code, Vid, OH, Avps), Opts, App, T, TC, Pkt).
+ resend(is_loop(Code, Vid, OH, Avps), Opts, App, Mask, T, TC, Fs, Pkt).
%% DIAMETER_LOOP_DETECTED 3005
%% An agent detected a loop while trying to get the message to the
@@ -1908,8 +2078,8 @@ resend(Opts,
%% if one is available, but the peer reporting the error has
%% identified a configuration problem.
-resend(true, _, _, T, {TPid, _}, Pkt) -> %% Route-Record loop
- protocol_error(3005, T, TPid, Pkt);
+resend(true, _, _, _, T, {TPid, _}, Fs, Pkt) -> %% Route-Record loop
+ protocol_error(3005, T, TPid, Fs, Pkt);
%% 6.1.8. Relaying and Proxying Requests
%%
@@ -1920,16 +2090,18 @@ resend(true, _, _, T, {TPid, _}, Pkt) -> %% Route-Record loop
resend(false,
Opts,
App,
+ Mask,
{SvcName, _, _} = T,
{TPid, #diameter_caps{origin_host = {_, OH}}},
+ Fs,
#diameter_packet{header = Hdr0,
avps = Avps}
= Pkt) ->
Route = #diameter_avp{data = {?BASE, 'Route-Record', OH}},
- Seq = diameter_session:sequence(),
+ Seq = diameter_session:sequence(Mask),
Hdr = Hdr0#diameter_header{hop_by_hop_id = Seq},
Msg = [Hdr, Route | Avps],
- resend(call(SvcName, App, Msg, Opts), T, TPid, Pkt).
+ resend(call(SvcName, App, Msg, Opts), T, TPid, Fs, Pkt).
%% The incoming request is relayed with the addition of a
%% Route-Record. Note the requirement on the return from call/4 below,
%% which places a requirement on the value returned by the
@@ -1955,15 +2127,18 @@ resend(#diameter_packet{bin = B}
= Pkt,
_,
TPid,
+ Fs,
#diameter_packet{header = #diameter_header{hop_by_hop_id = Id},
transport_data = TD}) ->
- send(TPid, Pkt#diameter_packet{bin = diameter_codec:hop_by_hop_id(Id, B),
- transport_data = TD});
+ P = Pkt#diameter_packet{bin = diameter_codec:hop_by_hop_id(Id, B),
+ transport_data = TD},
+ eval_packet(P, Fs),
+ send(TPid, P);
%% TODO: counters
%% Or not: DIAMETER_UNABLE_TO_DELIVER.
-resend(_, T, TPid, Pkt) ->
- protocol_error(3002, T, TPid, Pkt).
+resend(_, T, TPid, Fs, Pkt) ->
+ protocol_error(3002, T, TPid, Fs, Pkt).
%% is_loop/4
%%
@@ -1985,33 +2160,38 @@ is_loop(Code, Vid, OH, [_ | Avps])
is_loop(Code, Vid, OH, Avps) ->
is_loop(Code, Vid, ?BASE:avp(encode, OH, 'Route-Record'), Avps).
-%% reply/4
+%% reply/5
%%
%% Send a locally originating reply.
%% Skip the setting of Result-Code and Failed-AVP's below.
-reply([Msg], Dict, TPid, Pkt)
+reply([Msg], Dict, TPid, Fs, Pkt)
when is_list(Msg);
is_tuple(Msg) ->
- reply(Msg, Dict, TPid, Pkt#diameter_packet{errors = []});
+ reply(Msg, Dict, TPid, Fs, Pkt#diameter_packet{errors = []});
%% No errors or a diameter_header/avp list.
-reply(Msg, Dict, TPid, #diameter_packet{errors = Es,
- transport_data = TD}
- = ReqPkt)
+reply(Msg, Dict, TPid, Fs, #diameter_packet{errors = Es,
+ transport_data = TD}
+ = ReqPkt)
when [] == Es;
is_record(hd(Msg), diameter_header) ->
Pkt = diameter_codec:encode(Dict, make_answer_packet(Msg, ReqPkt)),
+ eval_packet(Pkt, Fs),
incr(send, Pkt, Dict, TPid), %% count result codes in sent answers
send(TPid, Pkt#diameter_packet{transport_data = TD});
%% Or not: set Result-Code and Failed-AVP AVP's.
-reply(Msg, Dict, TPid, #diameter_packet{errors = [H|_] = Es} = Pkt) ->
+reply(Msg, Dict, TPid, Fs, #diameter_packet{errors = [H|_] = Es} = Pkt) ->
reply(rc(Msg, rc(H), [A || {_,A} <- Es], Dict),
Dict,
TPid,
+ Fs,
Pkt#diameter_packet{errors = []}).
+eval_packet(Pkt, Fs) ->
+ lists:foreach(fun(F) -> diameter_lib:eval([F,Pkt]) end, Fs).
+
%% make_answer_packet/2
%% Binaries and header/avp lists are sent as-is.
@@ -2071,7 +2251,7 @@ rc([MsgName | _], {'Result-Code' = K, RC} = T, Dict) ->
rc(Rec, T, Dict) ->
rc([Dict:rec2msg(element(1, Rec))], T, Dict).
-
+
%% failed_avp/3
failed_avp(_, [] = No, _) ->
@@ -2307,7 +2487,7 @@ a(#diameter_packet{errors = Es} = Pkt, SvcName, AE, #request{transport = TPid,
when [] == Es;
callback == AE ->
cb(Req, handle_answer, [Pkt, msg(P), SvcName, {TPid, Caps}]);
-
+
a(Pkt, SvcName, report, Req) ->
x(errors, handle_answer, [SvcName, Req, Pkt]);
@@ -2469,7 +2649,7 @@ send_event(#diameter_event{service = SvcName} = E) ->
%%% # share_peer/5
%%% ---------------------------------------------------------------------------
-share_peer(up, Caps, Aliases, TPid, #state{share_peers = true,
+share_peer(up, Caps, Aliases, TPid, #state{options = [_, {_, true} | _],
service_name = Svc}) ->
diameter_peer:notify(Svc, {peer, TPid, Aliases, Caps});
@@ -2480,11 +2660,11 @@ share_peer(_, _, _, _, _) ->
%%% # share_peers/2
%%% ---------------------------------------------------------------------------
-share_peers(Pid, #state{share_peers = true,
+share_peers(Pid, #state{options = [_, {_, true} | _],
local_peers = PDict}) ->
?Dict:fold(fun(A,Ps,ok) -> sp(Pid, A, Ps), ok end, ok, PDict);
-share_peers(_, #state{share_peers = false}) ->
+share_peers(_, _) ->
ok.
sp(Pid, Alias, Peers) ->
@@ -2494,39 +2674,31 @@ sp(Pid, Alias, Peers) ->
%%% # remote_peer_up/4
%%% ---------------------------------------------------------------------------
-remote_peer_up(Pid, Aliases, Caps, #state{use_shared_peers = true,
+remote_peer_up(Pid, Aliases, Caps, #state{options = [_, _, {_, true} | _],
service = Svc,
- shared_peers = PDict}
- = S) ->
+ shared_peers = PDict}) ->
#diameter_service{applications = Apps} = Svc,
- Update = lists:filter(fun(A) ->
- lists:keymember(A, #diameter_app.alias, Apps)
- end,
- Aliases),
- S#state{shared_peers = rpu(Pid, Caps, PDict, Update)};
+ Key = #diameter_app.alias,
+ As = lists:filter(fun(A) -> lists:keymember(A, Key, Apps) end, Aliases),
+ rpu(Pid, Caps, PDict, As);
-remote_peer_up(_, _, _, #state{use_shared_peers = false} = S) ->
- S.
+remote_peer_up(_, _, _, #state{options = [_, _, {_, false} | _]}) ->
+ ok.
rpu(_, _, PDict, []) ->
PDict;
rpu(Pid, Caps, PDict, Aliases) ->
erlang:monitor(process, Pid),
T = {Pid, Caps},
- lists:foldl(fun(A,D) -> ?Dict:append(A, T, D) end,
- PDict,
- Aliases).
+ lists:foreach(fun(A) -> ?Dict:append(A, T, PDict) end, Aliases).
%%% ---------------------------------------------------------------------------
%%% # remote_peer_down/2
%%% ---------------------------------------------------------------------------
-remote_peer_down(Pid, #state{use_shared_peers = true,
- shared_peers = PDict}
- = S) ->
- S#state{shared_peers = lists:foldl(fun(A,D) -> rpd(Pid, A, D) end,
- PDict,
- ?Dict:fetch_keys(PDict))}.
+remote_peer_down(Pid, #state{options = [_, _, {_, true} | _],
+ shared_peers = PDict}) ->
+ lists:foreach(fun(A) -> rpd(Pid, A, PDict) end, ?Dict:fetch_keys(PDict)).
rpd(Pid, Alias, PDict) ->
?Dict:update(Alias, fun(Ps) -> lists:keydelete(Pid, 1, Ps) end, PDict).
@@ -2851,7 +3023,8 @@ transports(#state{peerT = PeerT}) ->
-define(ALL_INFO, [capabilities,
applications,
transport,
- pending]).
+ pending,
+ options]).
%% The rest.
-define(OTHER_INFO, [connections,
@@ -2878,6 +3051,19 @@ tagged_info(Item, S)
undefined
end;
+tagged_info(TPid, #state{peerT = PT, connT = CT})
+ when is_pid(TPid) ->
+ try
+ [#conn{peer = Pid}] = ets:lookup(CT, TPid),
+ [#peer{ref = Ref, type = Type, options = Opts}] = ets:lookup(PT, Pid),
+ [{ref, Ref},
+ {type, Type},
+ {options, Opts}]
+ catch
+ error:_ ->
+ []
+ end;
+
tagged_info(Items, S)
when is_list(Items) ->
[T || I <- Items, T <- [tagged_info(I,S)], T /= undefined, T /= []];
@@ -2928,6 +3114,7 @@ complete_info(Item, #state{service = Svc} = S) ->
capabilities -> service_info(?CAP_INFO, S);
applications -> info_apps(S);
transport -> info_transport(S);
+ options -> info_options(S);
pending -> info_pending(S);
keys -> ?ALL_INFO ++ ?CAP_INFO ++ ?OTHER_INFO;
all -> service_info(?ALL_INFO, S);
@@ -2955,7 +3142,12 @@ info_stats(#state{peerT = PeerT}) ->
MatchSpec = [{#peer{ref = '$1', conn = '$2', _ = '_'},
[{'is_pid', '$2'}],
[['$1', '$2']]}],
- diameter_stats:read(lists:append(ets:select(PeerT, MatchSpec))).
+ try ets:select(PeerT, MatchSpec) of
+ L ->
+ diameter_stats:read(lists:append(L))
+ catch
+ error: badarg -> [] %% service has gone down
+ end.
%% info_transport/1
%%
@@ -3000,7 +3192,12 @@ transport([[{type, accept}, {options, Opts} | _] | _] = Ls) ->
{accept, [lists:nthtail(2,L) || L <- Ls]}].
peer_dict(#state{peerT = PeerT, connT = ConnT}, Dict0) ->
- ets:foldl(fun(T,A) -> peer_acc(ConnT, A, T) end, Dict0, PeerT).
+ try ets:tab2list(PeerT) of
+ L ->
+ lists:foldl(fun(T,A) -> peer_acc(ConnT, A, T) end, Dict0, L)
+ catch
+ error: badarg -> Dict0 %% service has gone down
+ end.
peer_acc(ConnT, Acc, #peer{pid = Pid,
type = Type,
@@ -3017,9 +3214,16 @@ peer_acc(ConnT, Acc, #peer{pid = Pid,
| info_conn(ConnT, TPid, WS /= ?WD_DOWN)],
Acc).
+info_conn(ConnT, [TPid], B) ->
+ info_conn(ConnT, TPid, B);
+
info_conn(ConnT, TPid, true)
when is_pid(TPid) ->
- info_conn(ets:lookup(ConnT, TPid));
+ try ets:lookup(ConnT, TPid) of
+ T -> info_conn(T)
+ catch
+ error: badarg -> [] %% service has gone down
+ end;
info_conn(_, _, _) ->
[].
@@ -3096,7 +3300,11 @@ info_pending(#state{} = S) ->
{{transport, '$2'}},
{{from, '$3'}}]}}]}],
- ets:select(?REQUEST_TABLE, MatchSpec).
+ try
+ ets:select(?REQUEST_TABLE, MatchSpec)
+ catch
+ error: badarg -> [] %% service has gone down
+ end.
%% info_connections/1
%%
@@ -3151,3 +3359,8 @@ peer_acc(Peer, {PeerD, RefD}) ->
[{TPid, _}, [{origin_host, {_, OH}} | _]]
= [proplists:get_value(K, Peer) || K <- [peer, caps]],
{dict:append(OH, Peer, PeerD), dict:append(OH, TPid, RefD)}.
+
+%% info_options/1
+
+info_options(S) ->
+ S#state.options.
diff --git a/lib/diameter/src/base/diameter_session.erl b/lib/diameter/src/base/diameter_session.erl
index 4c468f207c..3b236f109a 100644
--- a/lib/diameter/src/base/diameter_session.erl
+++ b/lib/diameter/src/base/diameter_session.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -20,6 +20,7 @@
-module(diameter_session).
-export([sequence/0,
+ sequence/1,
session_id/1,
origin_state_id/0]).
@@ -30,7 +31,7 @@
-define(INT32, 16#FFFFFFFF).
%% ---------------------------------------------------------------------------
-%% # sequence/0
+%% # sequence/0-1
%%
%% Output: 32-bit
%% ---------------------------------------------------------------------------
@@ -77,6 +78,15 @@ sequence() ->
Instr = {_Pos = 2, _Incr = 1, _Threshold = ?INT32, _SetVal = 0},
ets:update_counter(diameter_sequence, sequence, Instr).
+-spec sequence(diameter:sequence())
+ -> diameter:'Unsigned32'().
+
+sequence({_,32}) ->
+ sequence();
+
+sequence({H,N}) ->
+ (H bsl N) bor (sequence() band (1 bsl N - 1)).
+
%% ---------------------------------------------------------------------------
%% # origin_state_id/0
%% ---------------------------------------------------------------------------
diff --git a/lib/diameter/src/base/diameter_watchdog.erl b/lib/diameter/src/base/diameter_watchdog.erl
index d7474e5c56..d814f1afe2 100644
--- a/lib/diameter/src/base/diameter_watchdog.erl
+++ b/lib/diameter/src/base/diameter_watchdog.erl
@@ -43,6 +43,7 @@
-include("diameter_internal.hrl").
-define(DEFAULT_TW_INIT, 30000). %% RFC 3539 ch 3.4.1
+-define(NOMASK, {0,32}). %% default sequence mask
-record(watchdog,
{%% PCB - Peer Control Block; see RFC 3539, Appendix A
@@ -56,7 +57,9 @@
parent = self() :: pid(),
transport :: pid() | undefined,
tref :: reference(), %% reference for current watchdog timer
- message_data}). %% term passed into diameter_service with message
+ message_data, %% term passed into diameter_service with message
+ sequence :: diameter:sequence(), %% mask
+ restrict :: {diameter:restriction(), boolean()}}).
%% start/2
%%
@@ -118,12 +121,23 @@ make_state({T, Pid, {RecvData,
random:seed(now()),
putr(restart, {T, Opts, Svc}), %% save seeing it in trace
putr(dwr, dwr(Caps)), %%
+ {_,_} = Mask = call(Pid, sequence),
+ Restrict = call(Pid, restriction),
+ Nodes = restrict_nodes(Restrict),
#watchdog{parent = Pid,
- transport = monitor(diameter_peer_fsm:start(T, Opts, Svc)),
+ transport = monitor(diameter_peer_fsm:start(T,
+ Opts,
+ {Mask, Nodes, Svc})),
tw = proplists:get_value(watchdog_timer,
Opts,
?DEFAULT_TW_INIT),
- message_data = {RecvData, SvcName, Apps}}.
+ message_data = {RecvData, SvcName, Apps, Mask},
+ sequence = Mask,
+ restrict = {Restrict, lists:member(node(), Nodes)}}.
+
+%% Retrieve the sequence mask from the parent from the parent, rather
+%% than having it passed into init/1, for upgrade reasons: the call to
+%% diameter_service:receive_message/3 passes back the mask.
%% handle_call/3
@@ -137,7 +151,7 @@ handle_cast(_, State) ->
%% handle_info/2
-handle_info(T, State) ->
+handle_info(T, #watchdog{} = State) ->
case transition(T, State) of
ok ->
{noreply, State};
@@ -148,7 +162,13 @@ handle_info(T, State) ->
?LOG(stop, T),
event(State, State#watchdog{status = down}),
{stop, {shutdown, T}, State}
- end.
+ end;
+
+handle_info(T, S) ->
+ handle_info(T, upgrade(S)).
+
+upgrade(S) ->
+ #watchdog{} = list_to_tuple(tuple_to_list(S) ++ [?NOMASK, {nodes, true}]).
event(#watchdog{status = T}, #watchdog{status = T}) ->
ok;
@@ -241,9 +261,10 @@ transition({close, TPid, _Reason}, #watchdog{transport = TPid}) ->
transition({open, TPid, Hosts, T} = Open,
#watchdog{transport = TPid,
status = initial,
- parent = Pid}
+ parent = Pid,
+ restrict = {_, R}}
= S) ->
- case okay(getr(restart), Hosts) of
+ case okay(getr(restart), Hosts, R) of
okay ->
open(Pid, {TPid, T}),
set_watchdog(S#watchdog{status = okay});
@@ -258,12 +279,15 @@ transition({open, TPid, Hosts, T} = Open,
transition({open = P, TPid, _Hosts, T},
#watchdog{transport = TPid,
+ parent = Pid,
status = down}
= S) ->
%% Store the info we need to notify the parent to reopen the
%% connection after the requisite DWA's are received, at which
- %% time we eraser(open).
+ %% time we eraser(open). The reopen message is a later addition,
+ %% to communicate the new capabilities as soon as they're known.
putr(P, {TPid, T}),
+ Pid ! {reopen, self(), {TPid, T}},
set_watchdog(send_watchdog(S#watchdog{status = reopen,
num_dwa = 0}));
@@ -312,6 +336,15 @@ transition({state, Pid}, #watchdog{status = S}) ->
%% ===========================================================================
+%% Only call "upwards", to the parent service.
+call(Pid, Req) ->
+ try
+ gen_server:call(Pid, Req, infinity)
+ catch
+ exit: Reason ->
+ exit({shutdown, {Req, Reason}})
+ end.
+
monitor(Pid) ->
erlang:monitor(process, Pid),
Pid.
@@ -325,26 +358,36 @@ getr(Key) ->
eraser(Key) ->
erase({?MODULE, Key}).
-%% encode/1
+%% encode/2
-encode(Msg) ->
- #diameter_packet{bin = Bin} = diameter_codec:encode(?BASE, Msg),
+encode(Msg, Mask) ->
+ Seq = diameter_session:sequence(Mask),
+ Hdr = #diameter_header{version = ?DIAMETER_VERSION,
+ end_to_end_id = Seq,
+ hop_by_hop_id = Seq},
+ Pkt = #diameter_packet{header = Hdr,
+ msg = Msg},
+ #diameter_packet{bin = Bin} = diameter_codec:encode(?BASE, Pkt),
Bin.
-%% okay/2
+%% okay/3
-okay({{accept, Ref}, _, _}, Hosts) ->
+okay({{accept, Ref}, _, _}, Hosts, Restrict) ->
T = {?MODULE, connection, Ref, Hosts},
diameter_reg:add(T),
- okay(diameter_reg:match(T));
+ if Restrict ->
+ okay(diameter_reg:match(T));
+ true ->
+ okay
+ end;
%% Register before matching so that at least one of two registering
-%% processes will match the other. (Which can't happen as long as
-%% diameter_peer_fsm guarantees at most one open connection to the same
-%% peer.)
+%% processes will match the other.
-okay({{connect, _}, _, _}, _) ->
+okay({{connect, _}, _, _}, _, _) ->
okay.
+%% okay/2
+
%% The peer hasn't been connected recently ...
okay([{_,P}]) ->
P = self(), %% assert
@@ -400,9 +443,10 @@ close(#watchdog{parent = Pid}) ->
%% send_watchdog/1
send_watchdog(#watchdog{pending = false,
- transport = TPid}
+ transport = TPid,
+ sequence = Mask}
= S) ->
- TPid ! {send, encode(getr(dwr))},
+ TPid ! {send, encode(getr(dwr), Mask)},
?LOG(send, 'DWR'),
S#watchdog{pending = true}.
@@ -555,7 +599,7 @@ timeout(#watchdog{status = T,
= S)
when T == suspect;
T == reopen, P, N < 0 ->
- exit(TPid, shutdown),
+ exit(TPid, {shutdown, watchdog_timeout}),
close(S),
S#watchdog{status = down};
@@ -600,19 +644,40 @@ restart(#watchdog{transport = undefined} = S) ->
restart(S) ->
S.
+%% restart/2
+%%
%% Only restart the transport in the connecting case. For an accepting
-%% transport, we've registered the peer connection when leaving state
-%% initial and this is used by a new accepting process to realize that
-%% it's actually in state down rather then initial when receiving
-%% notification of an open connection.
-
-restart({{connect, _} = T, Opts, Svc}, #watchdog{parent = Pid} = S) ->
+%% transport, there's no guarantee that an accepted connection in a
+%% restarted transport if from the peer we've lost contact with so
+%% have to be prepared for another watchdog to handle it. This is what
+%% the diameter_reg registration in this module is for: the peer
+%% connection is registered when leaving state initial and this is
+%% used by a new accepting watchdog to realize that it's actually in
+%% state down rather then initial when receiving notification of an
+%% open connection.
+
+restart({{connect, _} = T, Opts, Svc}, #watchdog{parent = Pid,
+ sequence = Mask,
+ restrict = {R,_}}
+ = S) ->
Pid ! {reconnect, self()},
- S#watchdog{transport = monitor(diameter_peer_fsm:start(T, Opts, Svc))};
+ Nodes = restrict_nodes(R),
+ S#watchdog{transport = monitor(diameter_peer_fsm:start(T,
+ Opts,
+ {Mask, Nodes, Svc})),
+ restrict = {R, lists:member(node(), Nodes)}};
+
+%% No restriction on the number of connections to the same peer: just
+%% die. Note that a state machine never enters state REOPEN in this
+%% case.
+restart({{accept, _}, _, _}, #watchdog{restrict = {_, false}}) ->
+ stop;
+
+%% Otherwise hang around until told to die.
restart({{accept, _}, _, _}, S) ->
S.
-%% Don't currently use Opts/Svc in the accept case but having them in
-%% the process dictionary is helpful if the process dies unexpectedly.
+
+%% Don't currently use Opts/Svc in the accept case.
%% dwr/1
@@ -622,3 +687,22 @@ dwr(#diameter_caps{origin_host = OH,
['DWR', {'Origin-Host', OH},
{'Origin-Realm', OR},
{'Origin-State-Id', OSI}].
+
+%% restrict_nodes/1
+
+restrict_nodes(false) ->
+ [];
+
+restrict_nodes(nodes) ->
+ [node() | nodes()];
+
+restrict_nodes(node) ->
+ [node()];
+
+restrict_nodes(Nodes)
+ when [] == Nodes;
+ is_atom(hd(Nodes)) ->
+ Nodes;
+
+restrict_nodes(F) ->
+ diameter_lib:eval(F).
diff --git a/lib/diameter/test/Makefile b/lib/diameter/test/Makefile
index e3a57553c1..866d135bd9 100644
--- a/lib/diameter/test/Makefile
+++ b/lib/diameter/test/Makefile
@@ -16,13 +16,8 @@
#
# %CopyrightEnd%
-ifeq ($(ERL_TOP),)
-include $(DIAMETER_TOP)/make/target.mk
-include $(DIAMETER_TOP)/make/$(TARGET)/rules.mk
-else
include $(ERL_TOP)/make/target.mk
include $(ERL_TOP)/make/$(TARGET)/otp.mk
-endif
# ----------------------------------------------------
# Application version
@@ -148,11 +143,7 @@ log:
/%: % force
sed -f release.sed $< > "$(RELSYSDIR)$@"
-ifeq ($(ERL_TOP),)
-include $(DIAMETER_TOP)/make/release_targets.mk
-else
include $(ERL_TOP)/make/otp_release_targets.mk
-endif
release_spec release_docs_spec:
diff --git a/lib/diameter/test/diameter_traffic_SUITE.erl b/lib/diameter/test/diameter_traffic_SUITE.erl
index 669918f757..5744ff0307 100644
--- a/lib/diameter/test/diameter_traffic_SUITE.erl
+++ b/lib/diameter/test/diameter_traffic_SUITE.erl
@@ -81,11 +81,11 @@
%% diameter callbacks
-export([peer_up/3,
peer_down/3,
- pick_peer/5, pick_peer/6,
- prepare_request/4, prepare_request/5,
- prepare_retransmit/4,
- handle_answer/5, handle_answer/6,
- handle_error/5,
+ pick_peer/6, pick_peer/7,
+ prepare_request/5, prepare_request/6,
+ prepare_retransmit/5,
+ handle_answer/6, handle_answer/7,
+ handle_error/6,
handle_request/3]).
-include("diameter.hrl").
@@ -108,10 +108,16 @@
-define(BASE, ?DIAMETER_DICT_COMMON).
-define(ACCT, ?DIAMETER_DICT_ACCOUNTING).
+%% Sequence mask for End-to-End and Hop-by-Hop identifiers.
+-define(CLIENT_MASK, {1,26}). %% 1 in top 6 bits
+
%% Run tests cases in different encoding variants. Send outgoing
%% messages as lists or records.
-define(ENCODINGS, [list, record]).
+%% Identifers for client connections.
+-define(CONNECTIONS, [c1,c2,c3]).
+
%% Not really what we should be setting unless the message is sent in
%% the common application but diameter doesn't care.
-define(APP_ID, ?DIAMETER_APP_ID_COMMON).
@@ -124,7 +130,8 @@
{'Vendor-Id', 12345},
{'Product-Name', "OTP/diameter"},
{'Auth-Application-Id', [?DIAMETER_APP_ID_COMMON]},
- {'Acct-Application-Id', [?DIAMETER_APP_ID_ACCOUNTING]}
+ {'Acct-Application-Id', [?DIAMETER_APP_ID_ACCOUNTING]},
+ {restrict_connections, false}
| [{application, [{dictionary, D},
{module, ?MODULE},
{answer_errors, callback}]}
@@ -175,12 +182,14 @@ suite() ->
all() ->
[start, start_services, add_transports, result_codes]
- ++ [{group, name([E]), P} || E <- ?ENCODINGS, P <- [[], [parallel]]]
+ ++ [{group, name([E,C]), P} || E <- ?ENCODINGS,
+ C <- ?CONNECTIONS,
+ P <- [[], [parallel]]]
++ [remove_transports, stop_services, stop].
groups() ->
Ts = tc(),
- [{name([E]), [], Ts} || E <- ?ENCODINGS].
+ [{name([E,C]), [], Ts} || E <- ?ENCODINGS, C <- ?CONNECTIONS].
init_per_group(Name, Config) ->
[{group, Name} | Config].
@@ -244,16 +253,17 @@ start(_Config) ->
start_services(_Config) ->
ok = diameter:start_service(?SERVER, ?SERVICE(?SERVER)),
- ok = diameter:start_service(?CLIENT, ?SERVICE(?CLIENT)).
+ ok = diameter:start_service(?CLIENT, [{sequence, ?CLIENT_MASK}
+ | ?SERVICE(?CLIENT)]).
add_transports(Config) ->
LRef = ?util:listen(?SERVER, tcp, [{capabilities_cb, fun capx/2}]),
- CRef = ?util:connect(?CLIENT, tcp, LRef),
- ?util:write_priv(Config, "transport", {LRef, CRef}).
+ Cs = [?util:connect(?CLIENT, tcp, LRef, [{id, C}]) || C <- ?CONNECTIONS],
+ ?util:write_priv(Config, "transport", [LRef | Cs]).
remove_transports(Config) ->
- {LRef, CRef} = ?util:read_priv(Config, "transport"),
- ?util:disconnect(?CLIENT, CRef, ?SERVER, LRef).
+ [LRef | Cs] = ?util:read_priv(Config, "transport"),
+ [?util:disconnect(?CLIENT, C, ?SERVER, LRef) || C <- Cs].
stop_services(_Config) ->
ok = diameter:stop_service(?CLIENT),
@@ -539,11 +549,11 @@ call(Config, Req) ->
call(Config, Req, Opts) ->
Name = proplists:get_value(testcase, Config),
- [Enc] = name(proplists:get_value(group, Config)),
+ [Encoding, Client] = name(proplists:get_value(group, Config)),
diameter:call(?CLIENT,
dict(Req),
- req(Req, Enc),
- [{extra, [Name]} | Opts]).
+ req(Req, Encoding),
+ [{extra, [Name, Client]} | Opts]).
req(['ACR' = H | T], record) ->
?ACCT:'#new-'(?ACCT:msg2rec(H), T);
@@ -580,7 +590,7 @@ set(_, _, _, Rec) ->
Rec.
%% Contruct and deconstruct names to work around group names being
-%% restricted to atoms. (Not really used yet.)
+%% restricted to atoms.
name(Names)
when is_list(Names) ->
@@ -603,28 +613,40 @@ peer_up(_SvcName, _Peer, State) ->
peer_down(_SvcName, _Peer, State) ->
State.
-%% pick_peer/5/6
+%% pick_peer/6-7
-pick_peer([Peer], _, ?CLIENT, _State, Name)
+pick_peer(Peers, _, ?CLIENT, _State, Name, Id)
when Name /= send_detach ->
- {ok, Peer}.
+ find(Id, Peers).
-pick_peer([_Peer], _, ?CLIENT, _State, send_nopeer, ?EXTRA) ->
+pick_peer(_Peers, _, ?CLIENT, _State, send_nopeer, _, ?EXTRA) ->
false;
-pick_peer([Peer], _, ?CLIENT, _State, send_detach, {_,_}) ->
- {ok, Peer}.
+pick_peer(Peers, _, ?CLIENT, _State, send_detach, Id, {_,_}) ->
+ find(Id, Peers).
+
+find(Id, Peers) ->
+ [P] = [P || P <- Peers, id(Id, P)],
+ {ok, P}.
-%% prepare_request/4/5
+id(Id, {Pid, _Caps}) ->
+ [{ref, _}, {type, _}, {options, Opts} | _]
+ = diameter:service_info(?CLIENT, Pid),
+ lists:member({id, Id}, Opts).
-prepare_request(_Pkt, ?CLIENT, {_Ref, _Caps}, send_discard) ->
+%% prepare_request/5-6
+
+prepare_request(_Pkt, ?CLIENT, {_Ref, _Caps}, send_discard, _) ->
{discard, unprepared};
-prepare_request(Pkt, ?CLIENT, {_Ref, Caps}, Name) ->
+prepare_request(Pkt, ?CLIENT, {_Ref, Caps}, Name, _) ->
{send, prepare(Pkt, Caps, Name)}.
-prepare_request(Pkt, ?CLIENT, {_Ref, Caps}, send_detach, _) ->
- {send, prepare(Pkt, Caps)}.
+prepare_request(Pkt, ?CLIENT, {_Ref, Caps}, send_detach, _, _) ->
+ {eval_packet, {send, prepare(Pkt, Caps)}, [fun log/2, detach]}.
+
+log(#diameter_packet{} = P, T) ->
+ io:format("~p: ~p~n", [T,P]).
prepare(Pkt, Caps, send_unsupported) ->
Req = prepare(Pkt, Caps),
@@ -707,25 +729,25 @@ prepare(#diameter_packet{msg = Req}, Caps)
{'Destination-Realm', DR},
{'Auth-Application-Id', ?APP_ID}]).
-%% prepare_retransmit/4
+%% prepare_retransmit/5
-prepare_retransmit(_Pkt, false, _Peer, _Name) ->
+prepare_retransmit(_Pkt, false, _Peer, _Name, _Id) ->
discard.
-%% handle_answer/5/6
+%% handle_answer/6-7
-handle_answer(Pkt, Req, ?CLIENT, Peer, Name) ->
+handle_answer(Pkt, Req, ?CLIENT, Peer, Name, _Id) ->
answer(Pkt, Req, Peer, Name).
-handle_answer(Pkt, _Req, ?CLIENT, _Peer, send_detach, {Pid, Ref}) ->
+handle_answer(Pkt, _Req, ?CLIENT, _Peer, send_detach, _Id, {Pid, Ref}) ->
Pid ! {Ref, Pkt}.
answer(#diameter_packet{msg = Rec, errors = []}, _Req, _Peer, _) ->
Rec.
-%% handle_error/5
+%% handle_error/6
-handle_error(Reason, _Req, ?CLIENT, _Peer, _Name) ->
+handle_error(Reason, _Req, ?CLIENT, _Peer, _Name, _Id) ->
{error, Reason}.
%% handle_request/3
@@ -733,12 +755,18 @@ handle_error(Reason, _Req, ?CLIENT, _Peer, _Name) ->
%% Note that diameter will set Result-Code and Failed-AVPs if
%% #diameter_packet.errors is non-null.
-handle_request(#diameter_packet{msg = M}, ?SERVER, {_Ref, Caps}) ->
+handle_request(#diameter_packet{header = H, msg = M}, ?SERVER, {_Ref, Caps}) ->
+ #diameter_header{end_to_end_id = EI,
+ hop_by_hop_id = HI}
+ = H,
+ {V,B} = ?CLIENT_MASK,
+ V = EI bsr B, %% assert
+ V = HI bsr B, %%
request(M, Caps).
request(#diameter_base_accounting_ACR{'Accounting-Record-Number' = 0},
_) ->
- {protocol_error, ?INVALID_AVP_BITS};
+ {eval_packet, {protocol_error, ?INVALID_AVP_BITS}, [fun log/2, invalid]};
request(#diameter_base_accounting_ACR{'Session-Id' = SId,
'Accounting-Record-Type' = RT,
diff --git a/lib/diameter/test/diameter_watchdog_SUITE.erl b/lib/diameter/test/diameter_watchdog_SUITE.erl
index ff40326947..7ce09e93ca 100644
--- a/lib/diameter/test/diameter_watchdog_SUITE.erl
+++ b/lib/diameter/test/diameter_watchdog_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -30,11 +30,21 @@
end_per_suite/1]).
%% testcases
--export([reopen/1, reopen/4]).
+-export([reopen/1, reopen/4, reopen/7]).
--export([start/3, %% diameter_transport callback
- id/1, %% jitter callback
- run/1]).
+-export([id/1, %% jitter callback
+ run1/1]).
+
+%% diameter_app callbacks
+-export([peer_up/3,
+ peer_down/3]).
+
+%% gen_tcp-ish interface
+-export([listen/2,
+ accept/1,
+ connect/3,
+ send/2,
+ setopts/2]).
-include("diameter.hrl").
-include("diameter_ct.hrl").
@@ -43,33 +53,21 @@
-define(util, diameter_util).
--define(BASE, diameter_gen_base_rfc3588).
--define(APPL_ID, diameter_gen_base_rfc3588:id()).
--define(SUCCESS, 2001). %% DIAMETER_SUCCESS
-
-%% Addresses for the local and remote diameter nodes. The values don't
-%% matter since we're faking transport.
--define(LOCALHOST, {127,0,0,1}).
--define(REMOTEHOST, {10,0,0,1}).
-
--define(CAPS, #diameter_caps{origin_host = "node.innan.com",
- origin_realm = "innan.com",
- host_ip_address = [?LOCALHOST],
- vendor_id = 1022,
- product_name = "remote",
- auth_application_id = [?APPL_ID]}).
-
--define(APPL, #diameter_app{alias = ?MODULE,
- dictionary = ?BASE,
- module = [?MODULE],
- init_state = now(),
- id = ?APPL_ID,
- mutable = false}).
-
-%% Service record maintained by our faked service process.
--define(SERVICE, #diameter_service{pid = self(),
- capabilities = ?CAPS,
- applications = [?APPL]}).
+-define(BASE, ?DIAMETER_DICT_COMMON).
+-define(REALM, "erlang.org").
+-define(ADDR, {127,0,0,1}).
+
+%% Config for diameter:start_service/2.
+-define(SERVICE(Name),
+ [{'Origin-Host', Name ++ "." ++ ?REALM},
+ {'Origin-Realm', ?REALM},
+ {'Host-IP-Address', [?ADDR]},
+ {'Vendor-Id', 42},
+ {'Product-Name', "OTP/diameter"},
+ {'Auth-Application-Id', [?DIAMETER_APP_ID_COMMON]},
+ {application, [{alias, Name},
+ {dictionary, ?BASE},
+ {module, ?MODULE}]}]).
%% Watchdog timer as a callback.
-define(WD(T), {?MODULE, id, [T]}).
@@ -82,28 +80,28 @@
F_ <- [fun(T__) -> T__ end,
fun(T__) -> ?WD(T__) end]]]).
-%% Transport types.
--define(TRANSPORTS, [connect, accept]).
+%% Watchdog timer of the misbehaving peer.
+-define(PEER_WD, 10000).
-%% Message over the transport interface.
--define(TMSG(T), {diameter, T}).
-
-%% Receive a message within a specified time.
--define(RECV(T, Timeout),
- receive T -> now()
- after Timeout -> ?ERROR({timeout, Timeout})
+%% Receive a watchdog event within a specified time.
+-define(EVENT(T, Tmo),
+ receive #diameter_event{info = T} -> now()
+ after Tmo -> ?ERROR({timeout, Tmo})
end).
-%% Receive a message in a given number of watchdogs, plus or minus
+%% Receive an event in a given number of watchdogs, plus or minus
%% half. Note that the call to now_diff assumes left to right
%% evaluation order.
--define(RECV(T, N, WdL, WdH),
+-define(EVENT(T, N, WdL, WdH),
[?ERROR({received, _Elapsed_, _LowerBound_, N, WdL})
|| _UpperBound_ <- [(N)*(WdH) + (WdH) div 2],
- _Elapsed_ <- [now_diff(now(), ?RECV(T, _UpperBound_))],
+ _Elapsed_ <- [now_diff(now(), ?EVENT(T, _UpperBound_))],
_LowerBound_ <- [(N)*(WdL) - (WdL) div 2],
_Elapsed_ =< _LowerBound_*1000]).
+-define(EVENT(T, N, Wd),
+ ?EVENT(T, N, Wd, Wd)).
+
%% A timeout that ensures one watchdog. The ensure only one watchdog
%% requires (Wd + 2000) + 1000 < 2*(Wd - 2000) ==> 7000 < Wd for the
%% case with random jitter.
@@ -112,7 +110,7 @@
%% ===========================================================================
suite() ->
- [{timetrap, {minutes, 6}}].%% enough for 11 watchdogs @ 30 sec plus jitter
+ [{timetrap, {minutes, 10}}].%% enough for 17 watchdogs @ 30 sec plus jitter
all() ->
[reopen].
@@ -134,52 +132,97 @@ end_per_suite(_Config) ->
%% implement a transport process that plays the role of the peer
%% Diameter node.
+%reopen(_) ->
+% reopen(connect, ?WD(10000), 1, 'DWR');
+
reopen(_) ->
- [] = ?util:run([{?MODULE, [run, [reopen, Wd, T, N, M]]}
- || Wd <- ?WD_TIMERS,
- T <- ?TRANSPORTS,
- N <- [0,1,2],
- M <- ['DWR', 'DWA', other]]).
+ [] = run([[reopen, T, Wd, N, M]
+ || Wd <- ?WD_TIMERS, %% watchdog_timer value
+ T <- [listen, connect], %% watchdog to test
+ N <- [0,1,2], %% DWR's to answer before ignoring
+ M <- ['DWR', 'DWA', 'RAA']]). %% how to induce failback
-reopen(Wd, Type, N, What) ->
- Ref = make_ref(),
+reopen(Type, Wd, N, M) ->
+ Server = start_service(),
+ Client = start_service(),
- %% The maker of transport processes.
- TPid = start({N, Wd, What, Ref}),
+ %% The peer to the transport whose watchdog is tested is given a
+ %% long watchdog timeout so that it doesn't send DWR of its own.
+ {Node, Peer} = {{[], Wd}, {[{module, ?MODULE}], ?WD(?PEER_WD)}},
- %% Act like diameter_service and start the watchdog process, which
- %% in turn starts a peer_fsm process, which in turn starts a
- %% transport process by way of start/3. Messages received by the
- %% testcase are those sent by diameter_watchdog to the service
- %% process (= process starting the watchdog).
- WPid1 = watchdog(Type, Ref, TPid, Wd),
+ {{LH,LW},{CH,CW}} = case Type of
+ listen -> {Node, Peer};
+ connect -> {Peer, Node}
+ end,
- %% Low/high watchdog timeouts.
- WdL = jitter(Wd, -2000),
- WdH = jitter(Wd, 2000),
+ LO = [{transport_module, diameter_tcp},
+ {transport_config, LH ++ [{ip, ?ADDR}, {port, 0}]},
+ {watchdog_timer, LW}],
+
+ {ok, LRef} = diameter:add_transport(Server, {listen, LO}),
+
+ [LP] = ?util:lport(tcp, LRef, 20),
+
+ CO = [{transport_module, diameter_tcp},
+ {transport_config, CH ++ [{ip, ?ADDR}, {port, 0},
+ {raddr, ?ADDR}, {rport, LP}]},
+ {watchdog_timer, CW}],
+
+ %% Use a temporary process to ensure the connecting transport is
+ %% added only once events from the listening transport are
+ %% subscribed to.
+ Pid = spawn(fun() -> receive _ -> ok end end),
+
+ [] = run([[reopen, Type, T, LRef, Pid, Wd, N, M]
+ || T <- [{listen, Server}, {connect, Client, CO}]]).
+
+%% start_service/1
+
+start_service() ->
+ Name = hostname(),
+ ok = diameter:start_service(Name, [{monitor, self()} | ?SERVICE(Name)]),
+ Name.
+
+%% reopen/7
+reopen(Type, {listen = T, SvcName}, Ref, Pid, Wd, N, M) ->
+ diameter:subscribe(SvcName),
+ Pid ! ok,
+ recv(Type, T, SvcName, Ref, Wd, N, M);
+
+reopen(Type, {connect = T, SvcName, Opts}, _, Pid, Wd, N, M) ->
+ diameter:subscribe(SvcName),
+ MRef = erlang:monitor(process, Pid),
+ receive {'DOWN', MRef, process, _, _} -> ok end,
+ {ok, Ref} = diameter:add_transport(SvcName, {T, Opts}),
+ recv(Type, T, SvcName, Ref, Wd, N, M).
+
+%% recv/7
+
+%% The watchdog to be tested.
+recv(Type, Type, _SvcName, Ref, Wd, N, M) ->
%% Connection should come up immediately as a consequence of
%% starting the watchdog process. In the accepting case this
%% results in a new watchdog on a transport waiting for a new
%% connection.
- ?RECV({connection_up, WPid1, _}, 1000),
- WPid2 = case Type of
- connect ->
- WPid1;
- accept ->
- watchdog(Type, Ref, TPid, Wd)
- end,
+ ?EVENT({watchdog, Ref, _, {initial, okay}, _}, 2000),
+ ?EVENT({up, Ref, _, _, #diameter_packet{}}, 0),
+
+ %% Low/high watchdog timeouts.
+ WdL = jitter(Wd, -2000),
+ WdH = jitter(Wd, 2000),
%% OKAY Timer expires & Failover()
%% Pending SetWatchdog() SUSPECT
%%
- %% Since our transport is replying to N DWR's before becoming
- %% silent, we should go down after N+2 watchdog_timer expirations:
- %% that is, after the first unanswered DWR. Knowing the min/max
- %% watchdog timeout values gives the time interval in which the
- %% down message is expected.
- ?RECV({connection_down, WPid1}, N+2, WdL, WdH),
+ %% The peer replies to N DWR's before becoming silent, we should
+ %% go down after N+2 watchdog_timer expirations: that is, after
+ %% the first unanswered DWR. Knowing the min/max watchdog timeout
+ %% values gives the time interval in which the event is expected.
+
+ ?EVENT({watchdog, Ref, _, {okay, suspect}, _}, N+2, WdL, WdH),
+ ?EVENT({down, Ref, _, _}, 0),
%% SUSPECT Receive DWA Pending = FALSE
%% Failback()
@@ -188,9 +231,11 @@ reopen(Wd, Type, N, What) ->
%% SUSPECT Receive non-DWA Failback()
%% SetWatchdog() OKAY
%%
- %% The transport receives a message before the expiry of another
- %% watchdog to induce failback.
- ?RECV({connection_up, WPid1}, WdH),
+ %% The peer sends a message before the expiry of another watchdog
+ %% to induce failback.
+
+ ?EVENT({watchdog, Ref, _, {suspect, okay}, _}, WdH + 2000),
+ ?EVENT({up, Ref, _, _}, 0),
%% OKAY Timer expires & SendWatchdog()
%% !Pending SetWatchdog()
@@ -199,30 +244,35 @@ reopen(Wd, Type, N, What) ->
%% OKAY Timer expires & Failover()
%% Pending SetWatchdog() SUSPECT
%%
- %% The transport is still not responding to watchdogs so the
- %% connection should go back down after either one or two watchdog
- %% expiries, depending on whether or not DWA restored the connection.
- F = choose(What == 'DWA', 2, 1),
- ?RECV({connection_down, WPid1}, F, WdL, WdH),
+ %% The peer is now ignoring all watchdogs so the connection goes
+ %% back down after either one or two watchdog expiries, depending
+ %% on whether or not DWA restored the connection.
+
+ F = choose(M == 'DWA', 2, 1),
+ ?EVENT({watchdog, Ref, _, {okay, suspect}, _}, F, WdL, WdH),
+ ?EVENT({down, Ref, _, _}, 0),
%% SUSPECT Timer expires CloseConnection()
%% SetWatchdog() DOWN
%%
+ %% Non-response brings the connection down after another timeout.
+
+ ?EVENT({watchdog, Ref, _, {suspect, down}, _}, 1, WdL, WdH),
+
%% DOWN Timer expires AttemptOpen()
%% SetWatchdog() DOWN
%%
- %% Our transport tells us when the fake connection is
- %% reestablished, which should happen after another couple of
- %% watchdog expiries, the first bringing the watchdog to state
- %% DOWN, the second triggering an attempt to reopen the
- %% connection.
- ?RECV({reopen, Ref}, 2, WdL, WdH),
-
%% DOWN Connection up NumDWA = 0
%% SendWatchdog()
%% SetWatchdog()
%% Pending = TRUE REOPEN
%%
+ %% The connection is reestablished after another timeout.
+
+ recv_reopen(Type, Ref, WdL, WdH),
+
+ %% REOPEN Receive non-DWA Throwaway() REOPEN
+ %%
%% REOPEN Receive DWA & Pending = FALSE
%% NumDWA < 2 NumDWA++ REOPEN
%%
@@ -230,312 +280,259 @@ reopen(Wd, Type, N, What) ->
%% NumDWA == 2 NumDWA++
%% Failback() OKAY
%%
- %% Now the watchdog should require three received DWA's before
- %% taking the connection back up. The first DWR is sent directly
- %% after capabilities exchange so it should take no more than two
- %% watchdog expiries.
- ?RECV({connection_up, WPid2, _}, 2, WdL, WdH).
+ %% REOPEN Timer expires & SendWatchdog()
+ %% !Pending SetWatchdog()
+ %% Pending = TRUE REOPEN
+ %%
+ %% An exchange of 3 watchdogs (the first directly after
+ %% capabilities exchange) brings the connection back up.
-%% ===========================================================================
+ ?EVENT({watchdog, Ref, _, {reopen, okay}, _}, 2, WdL, WdH),
+ ?EVENT({up, Ref, _, _, #diameter_packet{}}, 0),
-%% Start the fake transport process. From diameter's point of view
-%% it's started when diameter calls start/3. We start it before this
-%% happens since we use the same fake transport each time diameter
-%% calls start/3. The process lives and dies with the test case.
-start(Config) ->
- Pid = self(),
- spawn(fun() -> loop(init(Pid, Config)) end).
-
-%% Transport start from diameter. This may be called multiple times
-%% depending on the testcase.
-start({Type, _Ref}, #diameter_service{}, Pid) ->
- Ref = make_ref(),
- MRef = erlang:monitor(process, Pid),
- Pid ! {start, self(), Type, Ref},
- {Ref, TPid} = receive
- {Ref, _} = T ->
- T;
- {'DOWN', MRef, process, _, _} = T ->
- T
- end,
- erlang:demonitor(MRef, [flush]),
- {ok, TPid}.
+ %% Non-response brings it down again.
-%% id/1
+ ?EVENT({watchdog, Ref, _, {okay, suspect}, _}, 2, WdL, WdH),
+ ?EVENT({down, Ref, _, _}, 0),
+ ?EVENT({watchdog, Ref, _, {suspect, down}, _}, 1, WdL, WdH),
-id(T) ->
- T.
+ %% Reestablish after another watchdog.
-%% ===========================================================================
+ recv_reopen(Type, Ref, WdL, WdH),
-choose(true, X, _) -> X;
-choose(false, _, X) -> X.
+ %% REOPEN Timer expires & NumDWA = -1
+ %% Pending & SetWatchdog()
+ %% NumDWA >= 0 REOPEN
+ %%
+ %% REOPEN Timer expires & CloseConnection()
+ %% Pending & SetWatchdog()
+ %% NumDWA < 0 DOWN
+ %%
+ %% Peer is now ignoring all watchdogs go down again after 2
+ %% timeouts.
-%% run/1
-%%
-%% A more useful badmatch in case of failure.
+ ?EVENT({watchdog, Ref, _, {reopen, down}, _}, 2, WdL, WdH);
-run([F|A]) ->
- ok = try
- apply(?MODULE, F, A),
- ok
- catch
- E:R ->
- {A, E, R, erlang:get_stacktrace()}
- end.
+%% The misbehaving peer.
+recv(_, Type, SvcName, Ref, Wd, N, M) ->
+ %% First transport process.
+ ?EVENT({watchdog, Ref, _, {initial, okay}, _}, 1000),
+ ?EVENT({up, Ref, _, _, #diameter_packet{}}, 0),
+ reg(Type, Ref, SvcName, {SvcName, {Wd,N,M}}),
+ ?EVENT({watchdog, Ref, _, {okay, down}, _}, infinity),
-%% now_diff/2
+ %% Second transport process.
+ ?EVENT({watchdog, Ref, _, {_, reopen}, _}, infinity),
+ reg(Type, Ref, SvcName, 3),
+ ?EVENT({watchdog, Ref, _, {_, down}, _}, infinity),
-now_diff(T1, T2) ->
- timer:now_diff(T2, T1).
+ %% Third transport process.
+ ?EVENT({watchdog, Ref, _, {_, reopen}, _}, infinity),
+ reg(Type, Ref, SvcName, 0),
+ ?EVENT({watchdog, Ref, _, {_, down}, _}, infinity),
-%% jitter/2
+ ok.
-jitter(?WD(T), _) ->
- T;
-jitter(T,D) ->
- T+D.
+%% recv_reopen/4
-%% watchdog/4
-%%
-%% Fake the call from diameter_service. The watchdog process will send
-%% messages to the calling "service" process so our tests are that the
-%% watchdog responds as expected.
-
-watchdog(Type, Ref, TPid, Wd) ->
- Opts = [{transport_module, ?MODULE},
- {transport_config, TPid},
- {watchdog_timer, Wd}],
- {_MRef, Pid} = diameter_watchdog:start({Type, Ref},
- {false, Opts, false, ?SERVICE}),
- Pid.
+recv_reopen(connect, Ref, WdL, WdH) ->
+ ?EVENT({watchdog, Ref, _, {_, reopen}, _}, 1, WdL, WdH),
+ ?EVENT({reconnect, Ref, _}, 0);
-%% ===========================================================================
+recv_reopen(listen, Ref, _, _) ->
+ ?EVENT({watchdog, Ref, _, {_, reopen}, _}, 1, ?PEER_WD).
-%% Transport process implmentation. Fakes reception of messages by
-%% sending fakes to the parent (peer fsm) process that called start/3.
-
--record(transport,
- {type, %% connect | accept | manager
- parent, %% pid() of peer_fsm/ervice process
- open = false, %% done with capabilities exchange?
- config}).%% testcase-specific config
-
-%% init/2
-
-%% Testcase starting the manager.
-init(SvcPid, {_,_,_,_} = Config) ->
- putr(peer, [{'Origin-Host', hostname() ++ ".utan.com"},
- {'Origin-Realm', "utan.com"}]),
- #transport{type = manager,
- parent = monitor(SvcPid),
- config = Config};
-
-%% Manager starting a transport.
-init(_, {Type, ParentPid, SvcPid, TwinPid, Peer, {N,_,_,_} = Config}) ->
- putr(peer, Peer),
- putr(service, SvcPid),
- putr(count, init(Type, ParentPid, TwinPid, N)),%% number of DWR's to answer
- #transport{type = Type,
- parent = monitor(ParentPid),
- config = Config}.
-
-init(Type, ParentPid, undefined, N) ->
- connected(ParentPid, Type),
- N;
-init(_, _, TPid, _) ->
- monitor(TPid),
- 3.
-
-monitor(Pid) ->
- erlang:monitor(process, Pid),
- Pid.
+%% reg/4
+%%
+%% Lookup the pid of the transport process and publish a term for
+%% send/2 to lookup.
+reg(Type, Ref, SvcName, T) ->
+ TPid = tpid(Type, Ref, diameter:service_info(SvcName, transport)),
+ true = diameter_reg:add_new({?MODULE, TPid, T}).
+
+%% tpid/3
+
+tpid(connect, Ref, [[{ref, Ref},
+ {type, connect},
+ {options, _},
+ {watchdog, _},
+ {peer, _},
+ {apps, _},
+ {caps, _},
+ {port, [{owner, TPid} | _]}
+ | _]]) ->
+ TPid;
+
+tpid(listen, Ref, [[{ref, Ref},
+ {type, listen},
+ {options, _},
+ {accept, As}
+ | _]]) ->
+ [[{watchdog, _},
+ {peer, _},
+ {apps, _},
+ {caps, _},
+ {port, [{owner, TPid} | _]}
+ | _]]
+ = lists:filter(fun([{watchdog, {_,_,S}} | _]) ->
+ S == okay orelse S == reopen
+ end,
+ As),
+ TPid.
-%% Generate a unique hostname for the faked peer.
-hostname() ->
- lists:flatten(io_lib:format("~p-~p-~p", tuple_to_list(now()))).
+%% ===========================================================================
-%% loop/1
-
-loop(S) ->
- loop(msg(receive T -> T end, S)).
-
-msg(T,S) ->
- case transition(T,S) of
- ok ->
- S;
- #transport{} = NS ->
- NS;
- {stop, Reason} ->
- x(Reason)
- end.
-
-x(Reason) ->
- exit(Reason).
-
-%% transition/2
-
-%% Manager is being asked for a new transport process.
-transition({start, Pid, Type, Ref}, #transport{type = manager,
- parent = SvcPid,
- config = Config}) ->
- TPid = start({Type, Pid, SvcPid, getr(transport), getr(peer), Config}),
- Pid ! {Ref, TPid},
- putr(transport, TPid),
+listen(PortNr, Opts) ->
+ gen_tcp:listen(PortNr, Opts).
+
+accept(LSock) ->
+ gen_tcp:accept(LSock).
+
+connect(Addr, Port, Opts) ->
+ gen_tcp:connect(Addr, Port, Opts).
+
+setopts(Sock, Opts) ->
+ inet:setopts(Sock, Opts).
+
+send(Sock, Bin) ->
+ send(getr(config), Sock, Bin).
+
+%% send/3
+
+%% First outgoing message from a new transport process is CER/CEA.
+%% Remaining outgoing messages are either DWR or DWA.
+send(undefined, Sock, Bin) ->
+ putr(config, init),
+ gen_tcp:send(Sock, Bin);
+
+%% Outgoing DWR: fake reception of DWA. Use the fact that AVP values
+%% are ignored. This is to ensure that the peer's watchdog state
+%% transitions are only induced by responses to messages it sends.
+send(_, Sock, <<_:32, 1:1, _:7, 280:24, _:32, EId:32, HId:32, _/binary>>) ->
+ Pkt = #diameter_packet{header = #diameter_header{version = 1,
+ end_to_end_id = EId,
+ hop_by_hop_id = HId},
+ msg = ['DWA', {'Result-Code', 2001},
+ {'Origin-Host', "XXX"},
+ {'Origin-Realm', ?REALM}]},
+ #diameter_packet{bin = Bin} = diameter_codec:encode(?BASE, Pkt),
+ self() ! {tcp, Sock, Bin},
ok;
-%% Peer fsm or testcase process has died.
-transition({'DOWN', _, process, Pid, _} = T, #transport{parent = Pid}) ->
- {stop, T};
-
-%% Twin transport process has gone down. In the connect case, the
-%% transport isn't started until this happens in the first place so
-%% connect immediately. In the accept case, fake the peer reconnecting
-%% only after another watchdog expiry.
-transition({'DOWN', _, process, _, _}, #transport{type = Type,
- config = {_, Wd, _, _}}) ->
- Tmo = case Type of
- connect ->
- 0;
- accept ->
- ?ONE_WD(Wd)
- end,
- erlang:send_after(Tmo, self(), reconnect),
+%% First outgoing DWA.
+send(init, Sock, Bin) ->
+ [{{?MODULE, _, T}, _}] = diameter_reg:wait({?MODULE, self(), '_'}),
+ putr(config, T),
+ send(Sock, Bin);
+
+%% First transport process.
+send({SvcName, {_,_,_} = T}, Sock, Bin) ->
+ [{'Origin-Host', _} = OH, {'Origin-Realm', _} = OR | _]
+ = ?SERVICE(SvcName),
+ putr(origin, [OH, OR]),
+ putr(config, T),
+ send(Sock, Bin);
+
+%% Discard DWA, failback after another timeout in the peer.
+send({Wd, 0 = No, Msg}, Sock, Bin) ->
+ Origin = getr(origin),
+ spawn(fun() -> failback(?ONE_WD(Wd), Msg, Sock, Bin, Origin) end),
+ putr(config, No),
ok;
-transition(reconnect, #transport{type = Type,
- parent = Pid,
- config = {_,_,_,Ref}}) ->
- getr(service) ! {reopen, Ref},
- connected(Pid, Type),
- ok;
+%% Send DWA while we're in the mood (aka 0 < N).
+send({Wd, N, Msg}, Sock, Bin) ->
+ putr(config, {Wd, N-1, Msg}),
+ gen_tcp:send(Sock, Bin);
-%% Peer fsm process is sending CER: fake the peer's CEA.
-transition(?TMSG({send, Bin}), #transport{type = connect,
- open = false,
- parent = Pid}
- = S) ->
- {Code, Flags, _} = ?BASE:msg_header('CER'),
- <<_:32, Flags:8, Code:24, _:96, _/binary>> = Bin,
- Hdr = make_header(Bin),
- recv(Pid, {Hdr, make_cea()}),
- S#transport{open = true};
-
-%% Peer fsm process is sending CEA.
-transition(?TMSG({send, Bin}), #transport{type = accept,
- open = false}
- = S) ->
- {Code, Flags, _} = ?BASE:msg_header('CEA'),
- <<_:32, Flags:8, Code:24, _:96, _/binary>> = Bin,
- S#transport{open = true};
-
-%% Watchdog is sending DWR or DWA.
-transition(?TMSG({send, Bin}), #transport{open = true} = S) ->
- {Code, _, _} = ?BASE:msg_header('DWR'),
- {Code, _, _} = ?BASE:msg_header('DWA'),
- <<_:32, R:1, 0:7, Code:24, _:96, _/binary>> = Bin,
- Hdr = make_header(Bin),
- dwa(1 == R, S, Hdr),
+%% Discard DWA.
+send(0, _Sock, _Bin) ->
ok;
-%% We're telling ourselves to fake a received message.
-transition({recv, Msg}, #transport{parent = Pid}) ->
- recv(Pid, Msg),
- ok;
+%% Send DWA.
+send(N, Sock, <<_:32, 0:1, _:7, 280:24, _/binary>> = Bin) ->
+ putr(config, N-1),
+ gen_tcp:send(Sock, Bin).
-%% We're telling ourselves to receive a message to induce failback.
-transition(failback = T, #transport{parent = Pid}) ->
- recv(Pid, eraser(T)),
- ok.
+failback(Tmo, Msg, Sock, Bin, Origin) ->
+ timer:sleep(Tmo),
+ ok = gen_tcp:send(Sock, msg(Msg, Bin, Origin)).
-make_header(Bin) ->
- #diameter_header{end_to_end_id = E,
- hop_by_hop_id = H}
- = diameter_codec:decode_header(Bin),
- #diameter_header{end_to_end_id = E,
- hop_by_hop_id = H}.
-
-recv(Pid, Msg) ->
- Pid ! ?TMSG({recv, encode(Msg)}).
-
-%% Replace the end-to-end/hop-by-hop identifiers with those from an
-%% incoming request to which we're constructing a reply.
-encode({Hdr, [_|_] = Msg}) ->
- #diameter_header{hop_by_hop_id = HBH,
- end_to_end_id = E2E}
- = Hdr,
- #diameter_packet{bin = Bin} = diameter_codec:encode(?BASE, Msg),
- <<H:12/binary, _:64, T/binary>> = Bin,
- <<H/binary, HBH:32, E2E:32, T/binary>>;
-
-encode([_|_] = Msg) ->
- #diameter_packet{bin = Bin} = diameter_codec:encode(?BASE, Msg),
+%% msg/2
+
+msg('DWA', Bin, _Origin) ->
+ Bin;
+msg(Msg, _Bin, Origin) ->
+ #diameter_packet{bin = Bin}
+ = diameter_codec:encode(?BASE, msg(Msg, Origin)),
Bin.
-connected(Pid, connect) ->
- Pid ! ?TMSG({self(), connected, make_ref()});
-connected(Pid, accept) ->
- Pid ! ?TMSG({self(), connected}),
- recv(Pid, make_cer()).
+msg('DWR' = M, T) ->
+ [M | T];
-make_cer() ->
- ['CER' | getr(peer)] ++ [{'Host-IP-Address', [?REMOTEHOST]},
- {'Vendor-Id', 1028},
- {'Product-Name', "Utan"},
- {'Auth-Application-Id', [?APPL_ID]}].
+msg('RAA', T) ->
+ ['RAA', {'Session-Id', diameter:session_id("abc")},
+ {'Result-Code', 2001}
+ | T].
+%% An unexpected answer is discarded after passing through the
+%% watchdog state machine.
-make_cea() ->
- ['CER' | Rest] = make_cer(),
- ['CEA', {'Result-Code', ?SUCCESS} | Rest].
+%% ===========================================================================
-make_dwr() ->
- ['DWR' | getr(peer)].
+peer_up(_SvcName, _Peer, S) ->
+ S.
-make_dwa() ->
- ['DWR' | Rest] = make_dwr(),
- ['DWA', {'Result-Code', ?SUCCESS} | Rest].
+peer_down(_SvcName, _Peer, S) ->
+ S.
-dwa(false, _, _) -> %% outgoing was DWA ...
- ok;
-dwa(true, S, Hdr) -> %% ... or DWR
- dwa(getr(count), Hdr, S);
-
-%% React to the DWR only after another watchdog expiry. We shouldn't
-%% get another DWR while the answer is pending.
-dwa(0, Hdr, #transport{config = {_, Wd, What, _}}) ->
- erlang:send_after(?ONE_WD(Wd), self(), failback),
- putr(failback, make_msg(What, Hdr)),
- eraser(count);
-
-dwa(undefined, _, _) ->
- undefined = getr(failback), %% ensure this is after failback
- ok;
+%% ===========================================================================
-%% Reply with DWA.
-dwa(N, Hdr, #transport{parent = Pid}) ->
- putr(count, N-1),
- recv(Pid, {Hdr, make_dwa()}).
+choose(true, X, _) -> X;
+choose(false, _, X) -> X.
-%% Answer to received DWR.
-make_msg('DWA', Hdr) ->
- {Hdr, make_dwa()};
+%% id/1
+%%
+%% Jitter callback.
-%% DWR from peer.
-make_msg('DWR', _) ->
- make_dwr();
+id(T) ->
+ T.
-%% An unexpected answer is discarded after passing through the
-%% watchdog state machine.
-make_msg(other, _) ->
- ['RAA', {'Session-Id', diameter:session_id("abc")},
- {'Result-Code', 2001}
- | getr(peer)].
+%% run/1
+%%
+%% A more useful badmatch in case of failure.
+
+run(Fs) ->
+ ?util:run([{?MODULE, [run1, F]} || F <- Fs]).
+
+run1([F|A]) ->
+ ok = try
+ apply(?MODULE, F, A),
+ ok
+ catch
+ E:R ->
+ S = erlang:get_stacktrace(),
+ io:format("~p~n", [{A, E, R, S}]),
+ S
+ end.
+
+%% now_diff/2
+
+now_diff(T1, T2) ->
+ timer:now_diff(T2, T1).
+
+%% jitter/2
+
+jitter(?WD(T), _) ->
+ T;
+jitter(T,D) ->
+ T+D.
+
+%% Generate a unique hostname for the faked peer.
+hostname() ->
+ lists:flatten(io_lib:format("~p-~p-~p", tuple_to_list(now()))).
putr(Key, Val) ->
put({?MODULE, Key}, Val).
getr(Key) ->
get({?MODULE, Key}).
-
-eraser(Key) ->
- erase({?MODULE, Key}).
diff --git a/lib/inets/doc/src/httpd.xml b/lib/inets/doc/src/httpd.xml
index 7e21229fcf..8497d91549 100644
--- a/lib/inets/doc/src/httpd.xml
+++ b/lib/inets/doc/src/httpd.xml
@@ -178,7 +178,13 @@
<p>Note that this option is only used when the option
<c>socket_type</c> has the value <c>ip_comm</c>. </p>
</item>
-
+ <marker id="prop_minimum_bytes_per_second"></marker>
+ <tag>{minimum_bytes_per_second, integer()}</tag>
+ <item>
+ <p>If given, sets a minimum bytes per second value for connections.</p>
+ <p>If the value is not reached, the socket will close for that connection.</p>
+ <p>The option is good for reducing the risk of "slow dos" attacks.</p>
+ </item>
</taglist>
<marker id="props_api_modules"></marker>
diff --git a/lib/inets/src/http_server/httpd_conf.erl b/lib/inets/src/http_server/httpd_conf.erl
index 747118431e..a97bbd9b25 100644
--- a/lib/inets/src/http_server/httpd_conf.erl
+++ b/lib/inets/src/http_server/httpd_conf.erl
@@ -483,7 +483,7 @@ validate_properties(Properties) ->
case mandatory_properties(Properties) of
ok ->
%% Second, check that property dependency are ok
- {ok, validate_properties2(Properties)};
+ {ok, check_minimum_bytes_per_second(validate_properties2(Properties))};
Error ->
throw(Error)
end.
@@ -522,7 +522,18 @@ validate_properties2(Properties) ->
throw(Error)
end
end.
-
+check_minimum_bytes_per_second(Properties) ->
+ case proplists:get_value(minimum_bytes_per_second, Properties, false) of
+ false ->
+ Properties;
+ Nr ->
+ case is_integer(Nr) of
+ false ->
+ throw({error, {minimum_bytes_per_second, is_not_integer}});
+ _ ->
+ Properties
+ end
+ end.
mandatory_properties(ConfigList) ->
a_must(ConfigList, [server_name, port, server_root, document_root]).
diff --git a/lib/inets/src/http_server/httpd_request_handler.erl b/lib/inets/src/http_server/httpd_request_handler.erl
index b62c10bbc7..5e0bd39cb3 100644
--- a/lib/inets/src/http_server/httpd_request_handler.erl
+++ b/lib/inets/src/http_server/httpd_request_handler.erl
@@ -44,7 +44,9 @@
timeout, %% infinity | integer() > 0
timer, %% ref() - Request timer
headers, %% #http_request_h{}
- body %% binary()
+ body, %% binary()
+ data, %% The total data received in bits, checked after 10s
+ byte_limit %% Bit limit per second before kick out
}).
%%====================================================================
@@ -98,7 +100,6 @@ init([Manager, ConfigDB, AcceptTimeout]) ->
[{socket_type, SocketType}, {socket, Socket}]),
TimeOut = httpd_util:lookup(ConfigDB, keep_alive_timeout, 150000),
-
Then = erlang:now(),
?hdrd("negotiate", []),
@@ -139,12 +140,11 @@ continue_init(Manager, ConfigDB, SocketType, Socket, TimeOut) ->
mfa = MFA},
?hdrt("activate request timeout", []),
- NewState = activate_request_timeout(State),
?hdrt("set socket options (binary, packet & active)", []),
http_transport:setopts(SocketType, Socket,
[binary, {packet, 0}, {active, once}]),
-
+ NewState = data_receive_counter(activate_request_timeout(State), httpd_util:lookup(ConfigDB, minimum_bytes_per_second, false)),
?hdrt("init done", []),
gen_server:enter_loop(?MODULE, [], NewState).
@@ -205,16 +205,25 @@ handle_info({Proto, Socket, Data},
?hdrd("received data",
[{data, Data}, {proto, Proto},
{socket, Socket}, {socket_type, SockType}, {mfa, MFA}]),
-
+
%% case (catch Module:Function([Data | Args])) of
PROCESSED = (catch Module:Function([Data | Args])),
-
+ NewDataSize = case State#state.byte_limit of
+ undefined ->
+ undefined;
+ _ ->
+ State#state.data + byte_size(Data)
+ end,
?hdrt("data processed", [{processing_result, PROCESSED}]),
-
case PROCESSED of
{ok, Result} ->
?hdrd("data processed", [{result, Result}]),
- NewState = cancel_request_timeout(State),
+ NewState = case NewDataSize of
+ undefined ->
+ cancel_request_timeout(State);
+ _ ->
+ set_new_data_size(cancel_request_timeout(State), NewDataSize)
+ end,
handle_http_msg(Result, NewState);
{error, {uri_too_long, MaxSize}, Version} ->
@@ -239,7 +248,12 @@ handle_info({Proto, Socket, Data},
NewMFA ->
?hdrd("data processed - reactivate socket", [{new_mfa, NewMFA}]),
http_transport:setopts(SockType, Socket, [{active, once}]),
- {noreply, State#state{mfa = NewMFA}}
+ case NewDataSize of
+ undefined ->
+ {noreply, State#state{mfa = NewMFA}};
+ _ ->
+ {noreply, State#state{mfa = NewMFA, data = NewDataSize}}
+ end
end;
%% Error cases
@@ -263,7 +277,22 @@ handle_info(timeout, #state{mod = ModData} = State) ->
error_log("The client did not send the whole request before the "
"server side timeout", ModData),
{stop, normal, State#state{response_sent = true}};
-
+handle_info(check_data_first, #state{data = Data, byte_limit = Byte_Limit} = State) ->
+ case Data >= (Byte_Limit*3) of
+ true ->
+ erlang:send_after(1000, self(), check_data),
+ {noreply, State#state{data = 0}};
+ _ ->
+ {stop, normal, State#state{response_sent = true}}
+ end;
+handle_info(check_data, #state{data = Data, byte_limit = Byte_Limit} = State) ->
+ case Data >= Byte_Limit of
+ true ->
+ erlang:send_after(1000, self(), check_data),
+ {noreply, State#state{data = 0}};
+ _ ->
+ {stop, normal, State#state{response_sent = true}}
+ end;
%% Default case
handle_info(Info, #state{mod = ModData} = State) ->
Error = lists:flatten(
@@ -311,6 +340,8 @@ code_change(_OldVsn, State, _Extra) ->
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
+set_new_data_size(State, NewData) ->
+ State#state{data = NewData}.
await_socket_ownership_transfer(AcceptTimeout) ->
receive
{socket_ownership_transfered, SocketType, Socket} ->
@@ -603,7 +634,14 @@ activate_request_timeout(#state{timeout = Time} = State) ->
?hdrt("activate request timeout", [{time, Time}]),
Ref = erlang:send_after(Time, self(), timeout),
State#state{timer = Ref}.
-
+data_receive_counter(State, Byte_limit) ->
+ case Byte_limit of
+ false ->
+ State#state{data = 0};
+ Nr ->
+ erlang:send_after(3000, self(), check_data_first),
+ State#state{data = 0, byte_limit = Nr}
+ end.
cancel_request_timeout(#state{timer = undefined} = State) ->
State;
cancel_request_timeout(#state{timer = Timer} = State) ->
diff --git a/lib/inets/src/inets_app/inets.appup.src b/lib/inets/src/inets_app/inets.appup.src
index 2adb2a0fc8..ffd0ed622f 100644
--- a/lib/inets/src/inets_app/inets.appup.src
+++ b/lib/inets/src/inets_app/inets.appup.src
@@ -18,8 +18,14 @@
{"%VSN%",
[
+ {"5.9.1",
+ [
+ {load_module, httpd_request_handler, soft_purge, soft_purge, []}
+ ]
+ },
{"5.9",
[
+ {load_module, httpd_request_handler, soft_purge, soft_purge, []},
{load_module, tftp, soft_purge, soft_purge, [inets_service]},
{load_module, inets_service, soft_purge, soft_purge, []},
{load_module, httpc, soft_purge, soft_purge, [httpc_manager]},
@@ -29,6 +35,7 @@
},
{"5.8.1",
[
+ {load_module, httpd_request_handler, soft_purge, soft_purge, []},
{load_module, tftp, soft_purge, soft_purge, [inets_service]},
{load_module, inets_service, soft_purge, soft_purge, []},
@@ -64,8 +71,14 @@
}
],
[
+ {"5.9.1",
+ [
+ {load_module, httpd_request_handler, soft_purge, soft_purge, []}
+ ]
+ },
{"5.9",
[
+ {load_module, httpd_request_handler, soft_purge, soft_purge, []},
{load_module, tftp, soft_purge, soft_purge, [inets_service]},
{load_module, inets_service, soft_purge, soft_purge, []},
{load_module, httpc, soft_purge, soft_purge, [httpc_manager]},
@@ -75,6 +88,7 @@
},
{"5.8.1",
[
+ {load_module, httpd_request_handler, soft_purge, soft_purge, []},
{load_module, tftp, soft_purge, soft_purge, [inets_service]},
{load_module, inets_service, soft_purge, soft_purge, []},
diff --git a/lib/inets/test/httpd_basic_SUITE.erl b/lib/inets/test/httpd_basic_SUITE.erl
index 7a476ea14a..523cf9d38c 100644
--- a/lib/inets/test/httpd_basic_SUITE.erl
+++ b/lib/inets/test/httpd_basic_SUITE.erl
@@ -34,7 +34,8 @@ all() ->
[
uri_too_long_414,
header_too_long_413,
- escaped_url_in_error_body
+ escaped_url_in_error_body,
+ slowdose
].
groups() ->
@@ -278,7 +279,18 @@ escaped_url_in_error_body(Config) when is_list(Config) ->
inets:stop(httpd, Pid),
tsp("escaped_url_in_error_body -> done"),
ok.
-
+slowdose(doc) ->
+ ["Testing minimum bytes per second option"];
+slowdose(Config) when is_list(Config) ->
+ HttpdConf = ?config(httpd_conf, Config),
+ {ok, Pid} = inets:start(httpd, [{port, 0}, {minimum_bytes_per_second, 200}|HttpdConf]),
+ Info = httpd:info(Pid),
+ Port = proplists:get_value(port, Info),
+ {ok, Socket} = gen_tcp:connect("localhost", Port, []),
+ receive
+ after 6000 ->
+ {error, closed} = gen_tcp:send(Socket, "Hey")
+ end.
find_URL_path([]) ->
"";
find_URL_path(["URL", URL | _]) ->
diff --git a/lib/inets/test/inets_app_test.erl b/lib/inets/test/inets_app_test.erl
index db2218f3b6..d32f7e290b 100644
--- a/lib/inets/test/inets_app_test.erl
+++ b/lib/inets/test/inets_app_test.erl
@@ -35,6 +35,15 @@
init_per_testcase(undef_funcs, Config) ->
NewConfig = lists:keydelete(watchdog, 1, Config),
Dog = test_server:timetrap(inets_test_lib:minutes(10)),
+
+ %% We need to check if there is a point to run this test.
+ %% On some platforms, crypto will not build, which in turn
+ %% causes ssl to not build (at this time, this will
+ %% change in the future).
+ %% So, we first check if we can start crypto, and if not,
+ %% we skip this test case!
+ ?ENSURE_STARTED(crypto),
+
[{watchdog, Dog}| NewConfig];
init_per_testcase(_, Config) ->
Config.
@@ -240,13 +249,6 @@ undef_funcs(suite) ->
undef_funcs(doc) ->
[];
undef_funcs(Config) when is_list(Config) ->
- %% We need to check if there is a point to run this test.
- %% On some platforms, crypto will not build, which in turn
- %% causes ssl to not build (at this time, this will
- %% change in the future).
- %% So, we first check if we can start crypto, and if not,
- %% we skip this test case!
- ?ENSURE_STARTED(crypto),
App = inets,
AppFile = key1search(app_file, Config),
Mods = key1search(modules, AppFile),
diff --git a/lib/inets/vsn.mk b/lib/inets/vsn.mk
index 949eceea7f..0c7cb5e7c2 100644
--- a/lib/inets/vsn.mk
+++ b/lib/inets/vsn.mk
@@ -18,7 +18,7 @@
# %CopyrightEnd%
APPLICATION = inets
-INETS_VSN = 5.9.1
+INETS_VSN = 5.9.2
PRE_VSN =
APP_VSN = "$(APPLICATION)-$(INETS_VSN)$(PRE_VSN)"
diff --git a/lib/jinterface/java_src/com/ericsson/otp/erlang/OtpEpmd.java b/lib/jinterface/java_src/com/ericsson/otp/erlang/OtpEpmd.java
index deac528133..b985f8aa50 100644
--- a/lib/jinterface/java_src/com/ericsson/otp/erlang/OtpEpmd.java
+++ b/lib/jinterface/java_src/com/ericsson/otp/erlang/OtpEpmd.java
@@ -358,7 +358,7 @@ public class OtpEpmd {
}
public static String[] lookupNames() throws IOException {
- return lookupNames(InetAddress.getLocalHost());
+ return lookupNames(InetAddress.getByName(null));
}
public static String[] lookupNames(final InetAddress address)
diff --git a/lib/kernel/doc/src/inet.xml b/lib/kernel/doc/src/inet.xml
index d0ed26a18d..f498a88c5e 100644
--- a/lib/kernel/doc/src/inet.xml
+++ b/lib/kernel/doc/src/inet.xml
@@ -524,7 +524,7 @@ fe80::204:acff:fe17:bf38
<item>
<p>
Restricts the socket to only use IPv6, prohibiting any
- IPv4 connections. This only applicable for
+ IPv4 connections. This is only applicable for
IPv6 sockets (option <c>inet6</c>).
</p>
<p>
@@ -561,9 +561,10 @@ fe80::204:acff:fe17:bf38
Setting this option on platforms where it does not exist
is ignored and getting this option with
<seealso marker="#getopts/2">getopts/2</seealso>
- returns no value. On Windows the option acually does
- not exist but it is emulated as being a read-only option
- with the value <c>true</c>.
+ returns no value i.e the returned list will not contain an
+ <c>{ipv6_v6only,_}</c> tuple. On Windows the option acually
+ does not exist, but it is emulated as being a
+ read-only option with the value <c>true</c>.
</p>
<p>
So it boils down to that setting this option to <c>true</c>
diff --git a/lib/kernel/test/code_SUITE.erl b/lib/kernel/test/code_SUITE.erl
index 5e0300639e..d7424c0c9a 100644
--- a/lib/kernel/test/code_SUITE.erl
+++ b/lib/kernel/test/code_SUITE.erl
@@ -684,8 +684,8 @@ ext_mod_dep(Config) when is_list(Config) ->
xref:set_default(s, [{verbose,false},{warnings,false},
{builtins,true},{recurse,true}]),
xref:set_library_path(s, code:get_path()),
- xref:add_directory(s, filename:dirname(code:which(kernel))),
- xref:add_directory(s, filename:dirname(code:which(lists))),
+ xref:add_directory(s, filename:join(code:lib_dir(kernel),"ebin")),
+ xref:add_directory(s, filename:join(code:lib_dir(stdlib),"ebin")),
case catch ext_mod_dep2() of
{'EXIT', Reason} ->
xref:stop(s),
diff --git a/lib/kernel/test/global_SUITE.erl b/lib/kernel/test/global_SUITE.erl
index 60035b50a0..6eb2134644 100644
--- a/lib/kernel/test/global_SUITE.erl
+++ b/lib/kernel/test/global_SUITE.erl
@@ -168,7 +168,7 @@ end_per_testcase(_Case, Config) ->
register_1(suite) -> [];
register_1(Config) when is_list(Config) ->
Timeout = 15,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
P = spawn_link(?MODULE, lock_global, [self(), Config]),
@@ -195,7 +195,6 @@ register_1(Config) when is_list(Config) ->
?line _ = global:unregister_name(foo),
write_high_level_trace(Config),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
lock_global(Parent, Config) ->
@@ -238,7 +237,7 @@ lock_global(Parent, Config) ->
both_known_1(suite) -> [];
both_known_1(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
@@ -316,7 +315,6 @@ both_known_1(Config) when is_list(Config) ->
stop_node(Cp3),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
lost_unregister(suite) -> [];
@@ -324,7 +322,7 @@ lost_unregister(doc) ->
["OTP-6428. An unregistered name reappears."];
lost_unregister(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
@@ -361,7 +359,6 @@ lost_unregister(Config) when is_list(Config) ->
stop_node(B),
stop_node(C),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
-define(UNTIL_LOOP, 300).
@@ -448,7 +445,7 @@ lock_global2(Id, Parent) ->
names(suite) -> [];
names(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -532,7 +529,6 @@ names(Config) when is_list(Config) ->
?line ?UNTIL(undefined =:= global:whereis_name(test)),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
names_hidden(suite) -> [];
@@ -541,7 +537,7 @@ names_hidden(doc) ->
"visible nodes."];
names_hidden(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -639,13 +635,12 @@ names_hidden(Config) when is_list(Config) ->
stop_node(Cp3),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
locks(suite) -> [];
locks(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line {ok, Cp1} = start_node(cp1, Config),
@@ -750,7 +745,6 @@ locks(Config) when is_list(Config) ->
?line test_server:sleep(10),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
@@ -760,7 +754,7 @@ locks_hidden(doc) ->
"visible nodes."];
locks_hidden(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNodes = nodes(),
@@ -833,14 +827,13 @@ locks_hidden(Config) when is_list(Config) ->
stop_node(Cp3),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
bad_input(suite) -> [];
bad_input(Config) when is_list(Config) ->
Timeout = 15,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
Pid = whereis(global_name_server),
@@ -854,13 +847,12 @@ bad_input(Config) when is_list(Config) ->
?line {'EXIT', _} = (catch global:trans({id, self()}, {m,f}, [node()], -1)),
?line Pid = whereis(global_name_server),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
names_and_locks(suite) -> [];
names_and_locks(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -922,7 +914,6 @@ names_and_locks(Config) when is_list(Config) ->
stop_node(Cp3),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
lock_die(suite) -> [];
@@ -930,7 +921,7 @@ lock_die(doc) ->
["OTP-6341. Remove locks using monitors."];
lock_die(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -964,7 +955,6 @@ lock_die(Config) when is_list(Config) ->
stop_node(Cp1),
stop_node(Cp2),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
name_die(suite) -> [];
@@ -972,7 +962,7 @@ name_die(doc) ->
["OTP-6341. Remove names using monitors."];
name_die(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -1027,7 +1017,6 @@ name_die(Config) when is_list(Config) ->
write_high_level_trace(Config),
stop_nodes(Cps),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
kill_pid(Pid, File, Config) ->
@@ -1040,7 +1029,7 @@ basic_partition(doc) ->
["Tests that two partitioned networks exchange correct info."];
basic_partition(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -1088,7 +1077,6 @@ basic_partition(Config) when is_list(Config) ->
stop_node(Cp2),
stop_node(Cp3),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
basic_name_partition(suite) ->
@@ -1099,7 +1087,7 @@ basic_name_partition(doc) ->
"during connect phase are handled correctly."];
basic_name_partition(Config) when is_list(Config) ->
Timeout = 60,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -1167,7 +1155,6 @@ basic_name_partition(Config) when is_list(Config) ->
stop_node(Cp2),
stop_node(Cp3),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
%Peer nodes cp0 - cp6 are started. Break apart the connections from
@@ -1190,7 +1177,7 @@ advanced_partition(doc) ->
"partitioned networks connect."];
advanced_partition(Config) when is_list(Config) ->
Timeout = 60,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -1278,7 +1265,6 @@ advanced_partition(Config) when is_list(Config) ->
stop_node(Cp5),
stop_node(Cp6),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
%Peer nodes cp0 - cp6 are started, and partitioned just like in
@@ -1297,7 +1283,7 @@ stress_partition(doc) ->
"go up/down a bit."];
stress_partition(Config) when is_list(Config) ->
Timeout = 90,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -1377,7 +1363,6 @@ stress_partition(Config) when is_list(Config) ->
stop_node(Cp7),
stop_node(Cp8),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
@@ -1408,7 +1393,7 @@ ring(doc) ->
"Make sure that there's just one winner."];
ring(Config) when is_list(Config) ->
Timeout = 60,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -1486,7 +1471,6 @@ ring(Config) when is_list(Config) ->
stop_node(Cp7),
stop_node(Cp8),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
simple_ring(suite) ->
@@ -1499,7 +1483,7 @@ simple_ring(doc) ->
"Make sure that there's just one winner."];
simple_ring(Config) when is_list(Config) ->
Timeout = 60,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -1565,7 +1549,6 @@ simple_ring(Config) when is_list(Config) ->
stop_node(Cp4),
stop_node(Cp5),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
line(suite) ->
@@ -1576,7 +1559,7 @@ line(doc) ->
"Make sure that there's just one winner."];
line(Config) when is_list(Config) ->
Timeout = 60,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -1655,7 +1638,6 @@ line(Config) when is_list(Config) ->
stop_node(Cp7),
stop_node(Cp8),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
@@ -1669,7 +1651,7 @@ simple_line(doc) ->
"Make sure that there's just one winner."];
simple_line(Config) when is_list(Config) ->
Timeout = 60,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -1735,7 +1717,6 @@ simple_line(Config) when is_list(Config) ->
stop_node(Cp4),
stop_node(Cp5),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
otp_1849(suite) -> [];
@@ -1743,7 +1724,7 @@ otp_1849(doc) ->
["Test ticket: Global should keep track of all pids that set the same lock."];
otp_1849(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line {ok, Cp1} = start_node(cp1, Config),
@@ -1822,7 +1803,6 @@ otp_1849(Config) when is_list(Config) ->
stop_node(Cp2),
stop_node(Cp3),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
@@ -1840,7 +1820,7 @@ otp_3162(Config) when is_list(Config) ->
do_otp_3162(StartFun, Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line [Cp1, Cp2, Cp3] = StartFun(),
@@ -1898,7 +1878,6 @@ do_otp_3162(StartFun, Config) ->
stop_node(Cp2),
stop_node(Cp3),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
@@ -1907,7 +1886,7 @@ otp_5640(doc) ->
["OTP-5640. 'allow' multiple names for registered processes."];
otp_5640(Config) when is_list(Config) ->
Timeout = 25,
- ?line Dog = test_server:timetrap(test_server:seconds(Timeout)),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
init_condition(Config),
?line {ok, B} = start_node(b, Config),
@@ -1965,7 +1944,6 @@ otp_5640(Config) when is_list(Config) ->
write_high_level_trace(Config),
stop_node(B),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
otp_5640_proc(_Parent) ->
@@ -1979,7 +1957,7 @@ otp_5737(doc) ->
["OTP-5737. set_lock/3 and trans/4 accept Retries = 0."];
otp_5737(Config) when is_list(Config) ->
Timeout = 25,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
@@ -2000,7 +1978,6 @@ otp_5737(Config) when is_list(Config) ->
write_high_level_trace(Config),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
otp_6931(suite) -> [];
@@ -2025,7 +2002,7 @@ simple_disconnect(suite) -> [];
simple_disconnect(doc) -> ["OTP-5563. Disconnected nodes (not partitions)"];
simple_disconnect(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -2075,7 +2052,6 @@ simple_disconnect(Config) when is_list(Config) ->
write_high_level_trace(Config),
stop_nodes(Cps),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
%% Not used right now.
@@ -2118,7 +2094,7 @@ simple_resolve(suite) -> [];
simple_resolve(doc) -> ["OTP-5563. Partitions and names."];
simple_resolve(Config) when is_list(Config) ->
Timeout = 360,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -2245,7 +2221,6 @@ simple_resolve(Config) when is_list(Config) ->
write_high_level_trace(Config),
stop_nodes(Cps),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
simple_resolve2(suite) -> [];
@@ -2255,7 +2230,7 @@ simple_resolve2(Config) when is_list(Config) ->
%% always work to re-start z_2. "Cannot be a global bug."
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -2283,7 +2258,6 @@ simple_resolve2(Config) when is_list(Config) ->
write_high_level_trace(Config),
stop_nodes(Cps), % Not all nodes may be present, but it works anyway.
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
simple_resolve3(suite) -> [];
@@ -2292,7 +2266,7 @@ simple_resolve3(Config) when is_list(Config) ->
%% Continuation of simple_resolve.
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -2320,7 +2294,6 @@ simple_resolve3(Config) when is_list(Config) ->
write_high_level_trace(Config),
stop_nodes(Cps), % Not all nodes may be present, but it works anyway.
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
res({Res,Resolver}, [N1, A2, Z2], Cf) ->
@@ -2504,7 +2477,7 @@ leftover_name(suite) -> [];
leftover_name(doc) -> ["OTP-5563. Bug: nodedown while synching."];
leftover_name(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -2565,7 +2538,6 @@ leftover_name(Config) when is_list(Config) ->
write_high_level_trace(Config),
stop_nodes(Cps),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
%% Runs on n_1
@@ -2604,7 +2576,7 @@ re_register_name(Config) when is_list(Config) ->
%% occupied by links, that's all.
%% Later: now monitors are checked.
Timeout = 15,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
Me = self(),
@@ -2618,7 +2590,6 @@ re_register_name(Config) when is_list(Config) ->
receive {Pid2, MonitoredBy2} -> [_] = MonitoredBy2 end,
?line _ = global:unregister_name(name),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
proc(Parent) ->
@@ -2652,7 +2623,7 @@ do_name_exit(StartFun, Version, Config) ->
%% The current release uses monitors so this test is not so relevant.
Timeout = 60,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -2692,7 +2663,6 @@ do_name_exit(StartFun, Version, Config) ->
write_high_level_trace(Config),
stop_nodes(Cps),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
long_lock(Parent) ->
@@ -2709,7 +2679,7 @@ external_nodes(suite) -> [];
external_nodes(doc) -> ["OTP-5563. External nodes (cnodes)."];
external_nodes(Config) when is_list(Config) ->
Timeout = 30,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -2793,7 +2763,6 @@ external_nodes(Config) when is_list(Config) ->
?line ?UNTIL(length(get_ext_names()) =:= 0),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
get_ext_names() ->
@@ -2845,8 +2814,8 @@ many_nodes(suite) ->
many_nodes(doc) ->
["OTP-5770. Start many nodes. Make them connect at the same time."];
many_nodes(Config) when is_list(Config) ->
- Timeout = 180,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ Timeout = 240,
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -2902,7 +2871,6 @@ many_nodes(Config) when is_list(Config) ->
write_high_level_trace(Config),
?line stop_nodes(Cps),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
Diff = Time2 - Time,
Return = lists:flatten(io_lib:format("~w nodes took ~w ms",
[N_cps, Diff])),
@@ -2988,7 +2956,7 @@ sync_0(doc) ->
["OTP-5770. sync/0."];
sync_0(Config) when is_list(Config) ->
Timeout = 180,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
@@ -3013,7 +2981,6 @@ sync_0(Config) when is_list(Config) ->
stop_nodes(Cps),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
start_and_sync([]) ->
@@ -3031,7 +2998,7 @@ global_groups_change(suite) -> [];
global_groups_change(doc) -> ["Test change of global_groups parameter."];
global_groups_change(Config) ->
Timeout = 90,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line M = from($@, atom_to_list(node())),
@@ -3376,7 +3343,6 @@ global_groups_change(Config) ->
stop_node(CpE),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
sync_and_wait(Node) ->
@@ -3919,7 +3885,7 @@ global_lost_nodes(doc) ->
["Tests that locally loaded nodes do not loose contact with other nodes."];
global_lost_nodes(Config) when is_list(Config) ->
Timeout = 60,
- Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
@@ -3943,7 +3909,6 @@ global_lost_nodes(Config) when is_list(Config) ->
?line stop_node(Node1),
?line stop_node(Node2),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
global_load(MyName, OtherNode, OtherName) ->
@@ -3994,7 +3959,7 @@ mass_death(doc) ->
["Tests the simultaneous death of many processes with registered names"];
mass_death(Config) when is_list(Config) ->
Timeout = 90,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line OrigNames = global:registered_names(),
@@ -4023,9 +3988,9 @@ mass_death(Config) when is_list(Config) ->
{H,M,S} = time(),
io:format("Started probing: ~.4.0w-~.2.0w-~.2.0w ~.2.0w:~.2.0w:~.2.0w~n",
[YYYY,MM,DD,H,M,S]),
- wait_mass_death(Dog, Nodes, OrigNames, erlang:now(), Config).
+ wait_mass_death(Nodes, OrigNames, erlang:now(), Config).
-wait_mass_death(Dog, Nodes, OrigNames, Then, Config) ->
+wait_mass_death(Nodes, OrigNames, Then, Config) ->
?line Names = global:registered_names(),
?line
case Names--OrigNames of
@@ -4036,12 +4001,11 @@ wait_mass_death(Dog, Nodes, OrigNames, Then, Config) ->
stop_node(Node)
end, Nodes),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
{comment,lists:flatten(io_lib:format("~.3f s~n", [T/1000.0]))};
Ndiff ->
?line io:format("Ndiff: ~p~n", [Ndiff]),
?line test_server:sleep(1000),
- ?line wait_mass_death(Dog, Nodes, OrigNames, Then, Config)
+ ?line wait_mass_death(Nodes, OrigNames, Then, Config)
end.
mass_spawn([]) ->
@@ -4213,7 +4177,7 @@ garbage_messages(suite) ->
[];
garbage_messages(Config) when is_list(Config) ->
Timeout = 25,
- ?line Dog = test_server:timetrap({seconds,Timeout}),
+ ct:timetrap({seconds,Timeout}),
init_high_level_trace(Timeout),
?line init_condition(Config),
?line [Slave] = start_nodes([garbage_messages], slave, Config),
@@ -4233,7 +4197,6 @@ garbage_messages(Config) when is_list(Config) ->
write_high_level_trace(Config),
?line stop_node(Slave),
?line init_condition(Config),
- ?line test_server:timetrap_cancel(Dog),
ok.
wait_for_ready_net(Config) ->
diff --git a/lib/kernel/test/interactive_shell_SUITE.erl b/lib/kernel/test/interactive_shell_SUITE.erl
index da57867a54..36e13cec26 100644
--- a/lib/kernel/test/interactive_shell_SUITE.erl
+++ b/lib/kernel/test/interactive_shell_SUITE.erl
@@ -30,19 +30,10 @@
init_per_testcase(_Func, Config) ->
Dog = test_server:timetrap(test_server:minutes(3)),
- Term = case os:getenv("TERM") of
- List when is_list(List) ->
- List;
- _ ->
- "dumb"
- end,
- os:putenv("TERM","vt100"),
- [{watchdog,Dog},{term,Term}|Config].
+ [{watchdog,Dog}|Config].
end_per_testcase(_Func, Config) ->
Dog = ?config(watchdog, Config),
- Term = ?config(term,Config),
- os:putenv("TERM",Term),
test_server:timetrap_cancel(Dog).
@@ -56,9 +47,19 @@ groups() ->
[].
init_per_suite(Config) ->
- Config.
+ Term = case os:getenv("TERM") of
+ List when is_list(List) ->
+ List;
+ _ ->
+ "dumb"
+ end,
+ os:putenv("TERM","vt100"),
+ DefShell = get_default_shell(),
+ [{default_shell,DefShell},{term,Term}|Config].
-end_per_suite(_Config) ->
+end_per_suite(Config) ->
+ Term = ?config(term,Config),
+ os:putenv("TERM",Term),
ok.
init_per_group(_GroupName, Config) ->
@@ -78,70 +79,118 @@ end_per_group(_GroupName, Config) ->
get_columns_and_rows(suite) -> [];
get_columns_and_rows(doc) -> ["Test that the shell can access columns and rows"];
get_columns_and_rows(Config) when is_list(Config) ->
- ?line rtnode([{putline,""},
- {putline, "2."},
- {getline, "2"},
- {putline,"io:columns()."},
-%% Behaviour change in R12B-5, returns 80
-%% {getline,"{error,enotsup}"},
- {getline,"{ok,80}"},
- {putline,"io:rows()."},
-%% Behaviour change in R12B-5, returns 24
-%% {getline,"{error,enotsup}"}
- {getline,"{ok,24}"}
- ],[]),
- ?line rtnode([{putline,""},
- {putline, "2."},
- {getline, "2"},
- {putline,"io:columns()."},
- {getline,"{ok,90}"},
- {putline,"io:rows()."},
- {getline,"{ok,40}"}],
- [],
- "stty rows 40; stty columns 90; ").
+ case proplists:get_value(default_shell,Config) of
+ old ->
+ %% Old shell tests
+ ?dbg(old_shell),
+ ?line rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline,"io:columns()."},
+ {getline_re,".*{error,enotsup}"},
+ {putline,"io:rows()."},
+ {getline_re,".*{error,enotsup}"}
+
+ ],[]),
+ ?line rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline,"io:columns()."},
+ {getline_re,".*{ok,90}"},
+ {putline,"io:rows()."},
+ {getline_re,".*{ok,40}"}],
+ [],
+ "stty rows 40; stty columns 90; ");
+ new ->
+ % New shell tests
+ ?dbg(new_shell),
+ ?line rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline,"io:columns()."},
+ %% Behaviour change in R12B-5, returns 80
+ %% {getline,"{error,enotsup}"},
+ {getline,"{ok,80}"},
+ {putline,"io:rows()."},
+ %% Behaviour change in R12B-5, returns 24
+ %% {getline,"{error,enotsup}"}
+ {getline,"{ok,24}"}
+ ],[]),
+ ?line rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline,"io:columns()."},
+ {getline,"{ok,90}"},
+ {putline,"io:rows()."},
+ {getline,"{ok,40}"}],
+ [],
+ "stty rows 40; stty columns 90; ")
+ end.
exit_initial(suite) -> [];
exit_initial(doc) -> ["Tests that exit of initial shell restarts shell"];
exit_initial(Config) when is_list(Config) ->
- ?line rtnode([{putline,""},
- {putline, "2."},
- {getline, "2"},
- {putline,"exit()."},
- {getline,""},
- {getline,"Eshell"},
- {putline,""},
- {putline,"35."},
- {getline,"35"}],[]).
+ case proplists:get_value(default_shell,Config) of
+ old ->
+ rtnode([{putline,""},
+ {putline, "2."},
+ {getline_re, ".*2"},
+ {putline,"exit()."},
+ {getline,""},
+ {getline,"Eshell"},
+ {putline,""},
+ {putline,"35."},
+ {getline_re,".*35"}],[]);
+ new ->
+ rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline,"exit()."},
+ {getline,""},
+ {getline,"Eshell"},
+ {putline,""},
+ {putline,"35."},
+ {getline_re,"35"}],[])
+ end.
job_control_local(suite) -> [];
job_control_local(doc) -> [ "Tests that local shell can be "
"started by means of job control" ];
job_control_local(Config) when is_list(Config) ->
- ?line rtnode([{putline,""},
- {putline, "2."},
- {getline, "2"},
- {putline,[7]},
- {sleep,timeout(short)},
- {putline,""},
- {getline," -->"},
- {putline,"s"},
- {putline,"c"},
- {putline_raw,""},
- {getline,"Eshell"},
- {putline_raw,""},
- {getline,"1>"},
- {putline,"35."},
- {getline,"35"}],[]).
+ case proplists:get_value(default_shell,Config) of
+ old ->
+ %% Old shell tests
+ {skip,"No new shell found"};
+ new ->
+ %% New shell tests
+ ?line rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline,[7]},
+ {sleep,timeout(short)},
+ {putline,""},
+ {getline," -->"},
+ {putline,"s"},
+ {putline,"c"},
+ {putline_raw,""},
+ {getline,"Eshell"},
+ {putline_raw,""},
+ {getline,"1>"},
+ {putline,"35."},
+ {getline,"35"}],[])
+ end.
job_control_remote(suite) -> [];
job_control_remote(doc) -> [ "Tests that remote shell can be "
"started by means of job control" ];
job_control_remote(Config) when is_list(Config) ->
- case node() of
- nonode@nohost ->
+ case {node(),proplists:get_value(default_shell,Config)} of
+ {nonode@nohost,_} ->
?line exit(not_distributed);
+ {_,old} ->
+ {skip,"No new shell found"};
_ ->
?line RNode = create_nodename(),
?line MyNode = atom_to_list(node()),
@@ -190,9 +239,11 @@ job_control_remote_noshell(doc) ->
[ "Tests that remote shell can be "
"started by means of job control to -noshell node" ];
job_control_remote_noshell(Config) when is_list(Config) ->
- case node() of
- nonode@nohost ->
+ case {node(),proplists:get_value(default_shell,Config)} of
+ {nonode@nohost,_} ->
?line exit(not_distributed);
+ {_,old} ->
+ {skip,"No new shell found"};
_ ->
?line RNode = create_nodename(),
?line NSNode = start_noshell_node(interactive_shell_noshell),
@@ -351,6 +402,33 @@ get_and_put(CPid, [{getline, Match}|T],N) ->
end
end;
+%% Hey ho copy paste from stdlib/io_proto_SUITE
+get_and_put(CPid, [{getline_re, Match}|T],N) ->
+ ?dbg({getline_re, Match}),
+ CPid ! {self(), {get_line, timeout(normal)}},
+ receive
+ {get_line, timeout} ->
+ error_logger:error_msg("~p: getline_re timeout waiting for \"~s\" "
+ "(command number ~p, skipped: ~p)~n",
+ [?MODULE, Match,N,get(getline_skipped)]),
+ {error, timeout};
+ {get_line, Data} ->
+ ?dbg({data,Data}),
+ case re:run(Data, Match,[{capture,none}]) of
+ match ->
+ erase(getline_skipped),
+ get_and_put(CPid, T,N+1);
+ _ ->
+ case get(getline_skipped) of
+ undefined ->
+ put(getline_skipped,[Data]);
+ List ->
+ put(getline_skipped,List ++ [Data])
+ end,
+ get_and_put(CPid, [{getline_re, Match}|T],N)
+ end
+ end;
+
get_and_put(CPid, [{putline_raw, Line}|T],N) ->
?dbg({putline_raw, Line}),
CPid ! {self(), {send_line, Line}},
@@ -631,6 +709,13 @@ get_data_within(Port, Timeout, Acc) ->
timeout
end.
-
-
-
+get_default_shell() ->
+ try
+ rtnode([{putline,""},
+ {putline, "whereis(user_drv)."},
+ {getline, "undefined"}],[]),
+ old
+ catch E:R ->
+ ?dbg({E,R}),
+ new
+ end.
diff --git a/lib/kernel/test/kernel.cover b/lib/kernel/test/kernel.cover
index f6967ca651..af1dd7eaad 100644
--- a/lib/kernel/test/kernel.cover
+++ b/lib/kernel/test/kernel.cover
@@ -1,3 +1,3 @@
%% -*- erlang -*-
-{incl_mods,[gen_udp,inet6_udp,inet_res,inet_dns]}.
+{incl_app,kernel,details}.
diff --git a/lib/kernel/test/wrap_log_reader_SUITE.erl b/lib/kernel/test/wrap_log_reader_SUITE.erl
index 96dc3e6d33..6c47fda9c5 100644
--- a/lib/kernel/test/wrap_log_reader_SUITE.erl
+++ b/lib/kernel/test/wrap_log_reader_SUITE.erl
@@ -557,7 +557,7 @@ rec(M, Where) ->
M ->
ok;
Else -> ?t:fail({error, {Where, Else}})
- after 1000 -> ?t:fail({error, {Where, time_out}})
+ after 5000 -> ?t:fail({error, {Where, time_out}})
end.
pps() ->
diff --git a/lib/public_key/asn1/AuthenticationFramework.asn1 b/lib/public_key/asn1/AuthenticationFramework.asn1
new file mode 100644
index 0000000000..3754486473
--- /dev/null
+++ b/lib/public_key/asn1/AuthenticationFramework.asn1
@@ -0,0 +1,367 @@
+AuthenticationFramework {joint-iso-itu-t ds(5) module(1)
+ authenticationFramework(7) 6} DEFINITIONS ::=
+BEGIN
+
+-- EXPORTS All
+-- The types and values defined in this module are exported for use in the other ASN.1 modules contained
+-- within the Directory Specifications, and for the use of other applications which will use them to access
+-- Directory services. Other applications may use them for their own purposes, but this will not constrain
+-- extensions and modifications needed to maintain or improve the Directory service.
+IMPORTS
+ id-at, id-nf, id-oc, informationFramework, selectedAttributeTypes,
+ basicAccessControl, certificateExtensions
+ FROM UsefulDefinitions {joint-iso-itu-t ds(5) module(1)
+ usefulDefinitions(0) 6}
+ Name, ATTRIBUTE, OBJECT-CLASS, NAME-FORM, top
+ FROM InformationFramework informationFramework
+ UniqueIdentifier, octetStringMatch, commonName, UnboundedDirectoryString
+ FROM SelectedAttributeTypes selectedAttributeTypes
+ certificateExactMatch, certificatePairExactMatch, certificateListExactMatch,
+ KeyUsage, GeneralNames, CertificatePoliciesSyntax,
+ algorithmIdentifierMatch, CertPolicyId
+ FROM CertificateExtensions certificateExtensions;
+
+-- parameterized types
+ENCRYPTED{ToBeEnciphered} ::=
+ BIT STRING
+ (CONSTRAINED BY {
+ -- shall be the result of applying an encipherment procedure
+ -- to the BER-encoded octets of a value of --ToBeEnciphered})
+
+HASH{ToBeHashed} ::= SEQUENCE {
+ algorithmIdentifier AlgorithmIdentifier{{SupportedAlgorithms}},
+ hashValue
+ BIT STRING
+ (CONSTRAINED BY {
+ -- shall be the result of applying a hashing procedure to the DER-encoded octets
+ -- of a value of -- ToBeHashed})
+}
+
+ENCRYPTED-HASH{ToBeSigned} ::=
+ BIT STRING
+ (CONSTRAINED BY {
+ -- shall be the result of applying a hashing procedure to the DER-encoded (see 6.1) octets
+ -- of a value of --ToBeSigned -- and then applying an encipherment procedure to those octets --})
+
+SIGNATURE{ToBeSigned} ::= SEQUENCE {
+ algorithmIdentifier AlgorithmIdentifier{{SupportedAlgorithms}},
+ encrypted ENCRYPTED-HASH{ToBeSigned}
+}
+
+SIGNED{ToBeSigned} ::= SEQUENCE {
+ toBeSigned ToBeSigned,
+ COMPONENTS OF SIGNATURE{ToBeSigned}
+}
+
+-- public-key certificate definition
+Certificate ::= SIGNED{CertificateContent}
+
+CertificateContent ::= SEQUENCE {
+ version [0] Version DEFAULT v1,
+ serialNumber CertificateSerialNumber,
+ signature AlgorithmIdentifier{{SupportedAlgorithms}},
+ issuer Name,
+ validity Validity,
+ subject Name,
+ subjectPublicKeyInfo SubjectPublicKeyInfo,
+ issuerUniqueIdentifier [1] IMPLICIT UniqueIdentifier OPTIONAL,
+ -- if present, version shall be v2 or v3
+ subjectUniqueIdentifier [2] IMPLICIT UniqueIdentifier OPTIONAL,
+ -- if present, version shall be v2 or v3
+ extensions [3] Extensions OPTIONAL
+ -- If present, version shall be v3
+}
+
+Version ::= INTEGER {v1(0), v2(1), v3(2)}
+
+CertificateSerialNumber ::= INTEGER
+
+AlgorithmIdentifier{ALGORITHM:SupportedAlgorithms} ::= SEQUENCE {
+ algorithm ALGORITHM.&id({SupportedAlgorithms}),
+ parameters ALGORITHM.&Type({SupportedAlgorithms}{@algorithm}) OPTIONAL
+}
+
+-- Definition of the following information object set is deferred, perhaps to standardized
+-- profiles or to protocol implementation conformance statements. The set is required to
+-- specify a table constraint on the parameters component of AlgorithmIdentifier.
+SupportedAlgorithms ALGORITHM ::=
+ {...}
+
+Validity ::= SEQUENCE {notBefore Time,
+ notAfter Time
+}
+
+SubjectPublicKeyInfo ::= SEQUENCE {
+ algorithm AlgorithmIdentifier{{SupportedAlgorithms}},
+ subjectPublicKey BIT STRING
+}
+
+Time ::= CHOICE {utcTime UTCTime,
+ generalizedTime GeneralizedTime
+}
+
+Extensions ::= SEQUENCE OF Extension
+
+-- For those extensions where ordering of individual extensions within the SEQUENCE is significant, the
+-- specification of those individual extensions shall include the rules for the significance of the order therein
+Extension ::= SEQUENCE {
+ extnId EXTENSION.&id({ExtensionSet}),
+ critical BOOLEAN DEFAULT FALSE,
+ extnValue
+ OCTET STRING
+ (CONTAINING EXTENSION.&ExtnType({ExtensionSet}{@extnId})
+ ENCODED BY
+ der)
+}
+
+der OBJECT IDENTIFIER ::=
+ {joint-iso-itu-t asn1(1) ber-derived(2) distinguished-encoding(1)}
+
+ExtensionSet EXTENSION ::=
+ {...}
+
+EXTENSION ::= CLASS {&id OBJECT IDENTIFIER UNIQUE,
+ &ExtnType
+}WITH SYNTAX {SYNTAX &ExtnType
+ IDENTIFIED BY &id
+}
+
+ALGORITHM ::= CLASS {&Type OPTIONAL,
+ &id OBJECT IDENTIFIER UNIQUE
+}WITH SYNTAX {[&Type]
+ IDENTIFIED BY &id
+}
+
+-- other PKI certificate constructs
+Certificates ::= SEQUENCE {
+ userCertificate Certificate,
+ certificationPath ForwardCertificationPath OPTIONAL
+}
+
+CertificationPath ::= SEQUENCE {
+ userCertificate Certificate,
+ theCACertificates SEQUENCE OF CertificatePair OPTIONAL
+}
+
+ForwardCertificationPath ::= SEQUENCE OF CrossCertificates
+
+CrossCertificates ::= SET OF Certificate
+
+PkiPath ::= SEQUENCE OF Certificate
+
+-- certificate revocation list (CRL)
+CertificateList ::=
+ SIGNED{CertificateListContent}
+
+CertificateListContent ::= SEQUENCE {
+ version Version OPTIONAL,
+ -- if present, version shall be v2
+ signature AlgorithmIdentifier{{SupportedAlgorithms}},
+ issuer Name,
+ thisUpdate Time,
+ nextUpdate Time OPTIONAL,
+ revokedCertificates
+ SEQUENCE OF
+ SEQUENCE {serialNumber CertificateSerialNumber,
+ revocationDate Time,
+ crlEntryExtensions Extensions OPTIONAL} OPTIONAL,
+ crlExtensions [0] Extensions OPTIONAL
+}
+
+-- PKI object classes
+pkiUser OBJECT-CLASS ::= {
+ SUBCLASS OF {top}
+ KIND auxiliary
+ MAY CONTAIN {userCertificate}
+ ID id-oc-pkiUser
+}
+
+pkiCA OBJECT-CLASS ::= {
+ SUBCLASS OF {top}
+ KIND auxiliary
+ MAY CONTAIN
+ {cACertificate | certificateRevocationList | authorityRevocationList |
+ crossCertificatePair}
+ ID id-oc-pkiCA
+}
+
+cRLDistributionPoint OBJECT-CLASS ::= {
+ SUBCLASS OF {top}
+ KIND structural
+ MUST CONTAIN {commonName}
+ MAY CONTAIN
+ {certificateRevocationList | authorityRevocationList | deltaRevocationList}
+ ID id-oc-cRLDistributionPoint
+}
+
+cRLDistPtNameForm NAME-FORM ::= {
+ NAMES cRLDistributionPoint
+ WITH ATTRIBUTES {commonName}
+ ID id-nf-cRLDistPtNameForm
+}
+
+deltaCRL OBJECT-CLASS ::= {
+ SUBCLASS OF {top}
+ KIND auxiliary
+ MAY CONTAIN {deltaRevocationList}
+ ID id-oc-deltaCRL
+}
+
+cpCps OBJECT-CLASS ::= {
+ SUBCLASS OF {top}
+ KIND auxiliary
+ MAY CONTAIN {certificatePolicy | certificationPracticeStmt}
+ ID id-oc-cpCps
+}
+
+pkiCertPath OBJECT-CLASS ::= {
+ SUBCLASS OF {top}
+ KIND auxiliary
+ MAY CONTAIN {pkiPath}
+ ID id-oc-pkiCertPath
+}
+
+-- PKI directory attributes
+userCertificate ATTRIBUTE ::= {
+ WITH SYNTAX Certificate
+ EQUALITY MATCHING RULE certificateExactMatch
+ ID id-at-userCertificate
+}
+
+cACertificate ATTRIBUTE ::= {
+ WITH SYNTAX Certificate
+ EQUALITY MATCHING RULE certificateExactMatch
+ ID id-at-cAcertificate
+}
+
+crossCertificatePair ATTRIBUTE ::= {
+ WITH SYNTAX CertificatePair
+ EQUALITY MATCHING RULE certificatePairExactMatch
+ ID id-at-crossCertificatePair
+}
+
+CertificatePair ::= SEQUENCE {
+ forward [0] Certificate OPTIONAL,
+ reverse [1] Certificate OPTIONAL
+ -- at least one of the pair shall be present
+}
+(WITH COMPONENTS {
+ ...,
+ forward PRESENT
+ } | WITH COMPONENTS {
+ ...,
+ reverse PRESENT
+ })
+
+certificateRevocationList ATTRIBUTE ::= {
+ WITH SYNTAX CertificateList
+ EQUALITY MATCHING RULE certificateListExactMatch
+ ID id-at-certificateRevocationList
+}
+
+authorityRevocationList ATTRIBUTE ::= {
+ WITH SYNTAX CertificateList
+ EQUALITY MATCHING RULE certificateListExactMatch
+ ID id-at-authorityRevocationList
+}
+
+deltaRevocationList ATTRIBUTE ::= {
+ WITH SYNTAX CertificateList
+ EQUALITY MATCHING RULE certificateListExactMatch
+ ID id-at-deltaRevocationList
+}
+
+supportedAlgorithms ATTRIBUTE ::= {
+ WITH SYNTAX SupportedAlgorithm
+ EQUALITY MATCHING RULE algorithmIdentifierMatch
+ ID id-at-supportedAlgorithms
+}
+
+SupportedAlgorithm ::= SEQUENCE {
+ algorithmIdentifier AlgorithmIdentifier{{SupportedAlgorithms}},
+ intendedUsage [0] KeyUsage OPTIONAL,
+ intendedCertificatePolicies [1] CertificatePoliciesSyntax OPTIONAL
+}
+
+certificationPracticeStmt ATTRIBUTE ::= {
+ WITH SYNTAX InfoSyntax
+ ID id-at-certificationPracticeStmt
+}
+
+InfoSyntax ::= CHOICE {
+ content UnboundedDirectoryString,
+ pointer SEQUENCE {name GeneralNames,
+ hash HASH{HashedPolicyInfo} OPTIONAL}
+}
+
+POLICY ::= TYPE-IDENTIFIER
+
+HashedPolicyInfo ::= POLICY.&Type({Policies})
+
+Policies POLICY ::=
+ {...} -- Defined by implementors
+
+certificatePolicy ATTRIBUTE ::= {
+ WITH SYNTAX PolicySyntax
+ ID id-at-certificatePolicy
+}
+
+PolicySyntax ::= SEQUENCE {
+ policyIdentifier PolicyID,
+ policySyntax InfoSyntax
+}
+
+PolicyID ::= CertPolicyId
+
+pkiPath ATTRIBUTE ::= {WITH SYNTAX PkiPath
+ ID id-at-pkiPath
+}
+
+userPassword ATTRIBUTE ::= {
+ WITH SYNTAX OCTET STRING(SIZE (0..MAX))
+ EQUALITY MATCHING RULE octetStringMatch
+ ID id-at-userPassword
+}
+
+-- object identifier assignments
+-- object classes
+id-oc-cRLDistributionPoint OBJECT IDENTIFIER ::=
+ {id-oc 19}
+
+id-oc-pkiUser OBJECT IDENTIFIER ::= {id-oc 21}
+
+id-oc-pkiCA OBJECT IDENTIFIER ::= {id-oc 22}
+
+id-oc-deltaCRL OBJECT IDENTIFIER ::= {id-oc 23}
+
+id-oc-cpCps OBJECT IDENTIFIER ::= {id-oc 30}
+
+id-oc-pkiCertPath OBJECT IDENTIFIER ::= {id-oc 31}
+
+-- name forms
+id-nf-cRLDistPtNameForm OBJECT IDENTIFIER ::= {id-nf 14}
+
+-- directory attributes
+id-at-userPassword OBJECT IDENTIFIER ::= {id-at 35}
+
+id-at-userCertificate OBJECT IDENTIFIER ::= {id-at 36}
+
+id-at-cAcertificate OBJECT IDENTIFIER ::= {id-at 37}
+
+id-at-authorityRevocationList OBJECT IDENTIFIER ::= {id-at 38}
+
+id-at-certificateRevocationList OBJECT IDENTIFIER ::= {id-at 39}
+
+id-at-crossCertificatePair OBJECT IDENTIFIER ::= {id-at 40}
+
+id-at-supportedAlgorithms OBJECT IDENTIFIER ::= {id-at 52}
+
+id-at-deltaRevocationList OBJECT IDENTIFIER ::= {id-at 53}
+
+id-at-certificationPracticeStmt OBJECT IDENTIFIER ::= {id-at 68}
+
+id-at-certificatePolicy OBJECT IDENTIFIER ::= {id-at 69}
+
+id-at-pkiPath OBJECT IDENTIFIER ::= {id-at 70}
+
+END -- AuthenticationFramework
diff --git a/lib/public_key/asn1/InformationFramework.asn1 b/lib/public_key/asn1/InformationFramework.asn1
new file mode 100644
index 0000000000..4aed43a39e
--- /dev/null
+++ b/lib/public_key/asn1/InformationFramework.asn1
@@ -0,0 +1,682 @@
+InformationFramework {joint-iso-itu-t ds(5) module(1) informationFramework(1)
+ 6} DEFINITIONS ::=
+BEGIN
+
+-- EXPORTS All
+-- The types and values defined in this module are exported for use in the other ASN.1 modules contained
+-- within the Directory Specifications, and for the use of other applications which will use them to access
+-- Directory services. Other applications may use them for their own purposes, but this will not constrain
+-- extensions and modifications needed to maintain or improve the Directory service.
+IMPORTS
+ -- from ITU-T Rec. X.501 | ISO/IEC 9594-2
+ directoryAbstractService, id-ar, id-at, id-mr, id-nf, id-oa, id-oc,
+ id-sc, selectedAttributeTypes, serviceAdministration
+ FROM UsefulDefinitions {joint-iso-itu-t ds(5) module(1)
+ usefulDefinitions(0) 6}
+ SearchRule
+ FROM ServiceAdministration serviceAdministration
+ -- from ITU-T Rec. X.511 | ISO/IEC 9594-3
+ TypeAndContextAssertion
+ FROM DirectoryAbstractService directoryAbstractService
+ -- from ITU-T Rec. X.520 | ISO/IEC 9594-6
+ booleanMatch, commonName, generalizedTimeMatch, generalizedTimeOrderingMatch,
+ integerFirstComponentMatch, integerMatch, integerOrderingMatch,
+ objectIdentifierFirstComponentMatch, UnboundedDirectoryString
+ FROM SelectedAttributeTypes selectedAttributeTypes;
+
+-- attribute data types
+Attribute{ATTRIBUTE:SupportedAttributes} ::= SEQUENCE {
+ type ATTRIBUTE.&id({SupportedAttributes}),
+ values
+ SET SIZE (0..MAX) OF ATTRIBUTE.&Type({SupportedAttributes}{@type}),
+ valuesWithContext
+ SET SIZE (1..MAX) OF
+ SEQUENCE {value ATTRIBUTE.&Type({SupportedAttributes}{@type}),
+ contextList SET SIZE (1..MAX) OF Context} OPTIONAL
+}
+
+AttributeType ::= ATTRIBUTE.&id
+
+AttributeValue ::= ATTRIBUTE.&Type
+
+Context ::= SEQUENCE {
+ contextType CONTEXT.&id({SupportedContexts}),
+ contextValues
+ SET SIZE (1..MAX) OF CONTEXT.&Type({SupportedContexts}{@contextType}),
+ fallback BOOLEAN DEFAULT FALSE
+}
+
+AttributeValueAssertion ::= SEQUENCE {
+ type ATTRIBUTE.&id({SupportedAttributes}),
+ assertion
+ ATTRIBUTE.&equality-match.&AssertionType
+ ({SupportedAttributes}{@type}),
+ assertedContexts
+ CHOICE {allContexts [0] NULL,
+ selectedContexts [1] SET SIZE (1..MAX) OF ContextAssertion
+ } OPTIONAL
+}
+
+ContextAssertion ::= SEQUENCE {
+ contextType CONTEXT.&id({SupportedContexts}),
+ contextValues
+ SET SIZE (1..MAX) OF
+ CONTEXT.&Assertion({SupportedContexts}{@contextType})
+}
+
+AttributeTypeAssertion ::= SEQUENCE {
+ type ATTRIBUTE.&id({SupportedAttributes}),
+ assertedContexts SEQUENCE SIZE (1..MAX) OF ContextAssertion OPTIONAL
+}
+
+-- Definition of the following information object set is deferred, perhaps to standardized
+-- profiles or to protocol implementation conformance statements. The set is required to
+-- specify a table constraint on the values component of Attribute, the value component
+-- of AttributeTypeAndValue, and the assertion component of AttributeValueAssertion.
+SupportedAttributes ATTRIBUTE ::=
+ {objectClass | aliasedEntryName, ...}
+
+-- Definition of the following information object set is deferred, perhaps to standardized
+-- profiles or to protocol implementation conformance statements. The set is required to
+-- specify a table constraint on the context specifications
+SupportedContexts CONTEXT ::=
+ {...}
+
+-- naming data types
+Name ::= CHOICE { -- only one possibility for now --rdnSequence RDNSequence
+}
+
+RDNSequence ::= SEQUENCE OF RelativeDistinguishedName
+
+DistinguishedName ::= RDNSequence
+
+RelativeDistinguishedName ::=
+ SET SIZE (1..MAX) OF AttributeTypeAndDistinguishedValue
+
+AttributeTypeAndDistinguishedValue ::= SEQUENCE {
+ type ATTRIBUTE.&id({SupportedAttributes}),
+ value ATTRIBUTE.&Type({SupportedAttributes}{@type}),
+ primaryDistinguished BOOLEAN DEFAULT TRUE,
+ valuesWithContext
+ SET SIZE (1..MAX) OF
+ SEQUENCE {distingAttrValue
+ [0] ATTRIBUTE.&Type({SupportedAttributes}{@type})
+ OPTIONAL,
+ contextList SET SIZE (1..MAX) OF Context} OPTIONAL
+}
+
+-- subtree data types
+SubtreeSpecification ::= SEQUENCE {
+ base [0] LocalName DEFAULT {},
+ COMPONENTS OF ChopSpecification,
+ specificationFilter [4] Refinement OPTIONAL
+}
+
+-- empty sequence specifies whole administrative area
+LocalName ::= RDNSequence
+
+ChopSpecification ::= SEQUENCE {
+ specificExclusions
+ [1] SET SIZE (1..MAX) OF
+ CHOICE {chopBefore [0] LocalName,
+ chopAfter [1] LocalName} OPTIONAL,
+ minimum [2] BaseDistance DEFAULT 0,
+ maximum [3] BaseDistance OPTIONAL
+}
+
+BaseDistance ::= INTEGER(0..MAX)
+
+Refinement ::= CHOICE {
+ item [0] OBJECT-CLASS.&id,
+ and [1] SET SIZE (1..MAX) OF Refinement,
+ or [2] SET SIZE (1..MAX) OF Refinement,
+ not [3] Refinement
+}
+
+-- OBJECT-CLASS information object class specification
+OBJECT-CLASS ::= CLASS {
+ &Superclasses OBJECT-CLASS OPTIONAL,
+ &kind ObjectClassKind DEFAULT structural,
+ &MandatoryAttributes ATTRIBUTE OPTIONAL,
+ &OptionalAttributes ATTRIBUTE OPTIONAL,
+ &id OBJECT IDENTIFIER UNIQUE
+}
+WITH SYNTAX {
+ [SUBCLASS OF &Superclasses]
+ [KIND &kind]
+ [MUST CONTAIN &MandatoryAttributes]
+ [MAY CONTAIN &OptionalAttributes]
+ ID &id
+}
+
+ObjectClassKind ::= ENUMERATED {abstract(0), structural(1), auxiliary(2)}
+
+-- object classes
+top OBJECT-CLASS ::= {
+ KIND abstract
+ MUST CONTAIN {objectClass}
+ ID id-oc-top
+}
+
+alias OBJECT-CLASS ::= {
+ SUBCLASS OF {top}
+ MUST CONTAIN {aliasedEntryName}
+ ID id-oc-alias
+}
+
+parent OBJECT-CLASS ::= {KIND abstract
+ ID id-oc-parent
+}
+
+child OBJECT-CLASS ::= {KIND auxiliary
+ ID id-oc-child
+}
+
+-- ATTRIBUTE information object class specification
+ATTRIBUTE ::= CLASS {
+ &derivation ATTRIBUTE OPTIONAL,
+ &Type OPTIONAL, -- either &Type or &derivation required
+ &equality-match MATCHING-RULE OPTIONAL,
+ &ordering-match MATCHING-RULE OPTIONAL,
+ &substrings-match MATCHING-RULE OPTIONAL,
+ &single-valued BOOLEAN DEFAULT FALSE,
+ &collective BOOLEAN DEFAULT FALSE,
+ &dummy BOOLEAN DEFAULT FALSE,
+ -- operational extensions
+ &no-user-modification BOOLEAN DEFAULT FALSE,
+ &usage AttributeUsage DEFAULT userApplications,
+ &id OBJECT IDENTIFIER UNIQUE
+}
+WITH SYNTAX {
+ [SUBTYPE OF &derivation]
+ [WITH SYNTAX &Type]
+ [EQUALITY MATCHING RULE &equality-match]
+ [ORDERING MATCHING RULE &ordering-match]
+ [SUBSTRINGS MATCHING RULE &substrings-match]
+ [SINGLE VALUE &single-valued]
+ [COLLECTIVE &collective]
+ [DUMMY &dummy]
+ [NO USER MODIFICATION &no-user-modification]
+ [USAGE &usage]
+ ID &id
+}
+
+AttributeUsage ::= ENUMERATED {
+ userApplications(0), directoryOperation(1), distributedOperation(2),
+ dSAOperation(3)}
+
+-- attributes
+objectClass ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT IDENTIFIER
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ ID id-at-objectClass
+}
+
+aliasedEntryName ATTRIBUTE ::= {
+ WITH SYNTAX DistinguishedName
+ EQUALITY MATCHING RULE distinguishedNameMatch
+ SINGLE VALUE TRUE
+ ID id-at-aliasedEntryName
+}
+
+-- MATCHING-RULE information object class specification
+MATCHING-RULE ::= CLASS {
+ &ParentMatchingRules MATCHING-RULE OPTIONAL,
+ &AssertionType OPTIONAL,
+ &uniqueMatchIndicator ATTRIBUTE OPTIONAL,
+ &id OBJECT IDENTIFIER UNIQUE
+}
+WITH SYNTAX {
+ [PARENT &ParentMatchingRules]
+ [SYNTAX &AssertionType]
+ [UNIQUE-MATCH-INDICATOR &uniqueMatchIndicator]
+ ID &id
+}
+
+-- matching rules
+objectIdentifierMatch MATCHING-RULE ::= {
+ SYNTAX OBJECT IDENTIFIER
+ ID id-mr-objectIdentifierMatch
+}
+
+distinguishedNameMatch MATCHING-RULE ::= {
+ SYNTAX DistinguishedName
+ ID id-mr-distinguishedNameMatch
+}
+
+MAPPING-BASED-MATCHING{SelectedBy, BOOLEAN:combinable, MappingResult,
+ OBJECT IDENTIFIER:matchingRule} ::= CLASS {
+ &selectBy SelectedBy OPTIONAL,
+ &ApplicableTo ATTRIBUTE,
+ &subtypesIncluded BOOLEAN DEFAULT TRUE,
+ &combinable BOOLEAN(combinable),
+ &mappingResults MappingResult OPTIONAL,
+ &userControl BOOLEAN DEFAULT FALSE,
+ &exclusive BOOLEAN DEFAULT TRUE,
+ &matching-rule MATCHING-RULE.&id(matchingRule),
+ &id OBJECT IDENTIFIER UNIQUE
+}
+WITH SYNTAX {
+ [SELECT BY &selectBy]
+ APPLICABLE TO &ApplicableTo
+ [SUBTYPES INCLUDED &subtypesIncluded]
+ COMBINABLE &combinable
+ [MAPPING RESULTS &mappingResults]
+ [USER CONTROL &userControl]
+ [EXCLUSIVE &exclusive]
+ MATCHING RULE &matching-rule
+ ID &id
+}
+
+-- NAME-FORM information object class specification
+NAME-FORM ::= CLASS {
+ &namedObjectClass OBJECT-CLASS,
+ &MandatoryAttributes ATTRIBUTE,
+ &OptionalAttributes ATTRIBUTE OPTIONAL,
+ &id OBJECT IDENTIFIER UNIQUE
+}
+WITH SYNTAX {
+ NAMES &namedObjectClass
+ WITH ATTRIBUTES &MandatoryAttributes
+ [AND OPTIONALLY &OptionalAttributes]
+ ID &id
+}
+
+-- STRUCTURE-RULE class and DIT structure rule data types
+DITStructureRule ::= SEQUENCE {
+ ruleIdentifier RuleIdentifier,
+ -- shall be unique within the scope of the subschema
+ nameForm NAME-FORM.&id,
+ superiorStructureRules SET SIZE (1..MAX) OF RuleIdentifier OPTIONAL
+}
+
+RuleIdentifier ::= INTEGER
+
+STRUCTURE-RULE ::= CLASS {
+ &nameForm NAME-FORM,
+ &SuperiorStructureRules STRUCTURE-RULE OPTIONAL,
+ &id RuleIdentifier
+}
+WITH SYNTAX {
+ NAME FORM &nameForm
+ [SUPERIOR RULES &SuperiorStructureRules]
+ ID &id
+}
+
+-- DIT content rule data type and CONTENT-RULE class
+DITContentRule ::= SEQUENCE {
+ structuralObjectClass OBJECT-CLASS.&id,
+ auxiliaries SET SIZE (1..MAX) OF OBJECT-CLASS.&id OPTIONAL,
+ mandatory [1] SET SIZE (1..MAX) OF ATTRIBUTE.&id OPTIONAL,
+ optional [2] SET SIZE (1..MAX) OF ATTRIBUTE.&id OPTIONAL,
+ precluded [3] SET SIZE (1..MAX) OF ATTRIBUTE.&id OPTIONAL
+}
+
+CONTENT-RULE ::= CLASS {
+ &structuralClass OBJECT-CLASS.&id UNIQUE,
+ &Auxiliaries OBJECT-CLASS OPTIONAL,
+ &Mandatory ATTRIBUTE OPTIONAL,
+ &Optional ATTRIBUTE OPTIONAL,
+ &Precluded ATTRIBUTE OPTIONAL
+}
+WITH SYNTAX {
+ STRUCTURAL OBJECT-CLASS &structuralClass
+ [AUXILIARY OBJECT-CLASSES &Auxiliaries]
+ [MUST CONTAIN &Mandatory]
+ [MAY CONTAIN &Optional]
+ [MUST-NOT CONTAIN &Precluded]
+}
+
+CONTEXT ::= CLASS {
+ &Type ,
+ &DefaultValue OPTIONAL,
+ &Assertion OPTIONAL,
+ &absentMatch BOOLEAN DEFAULT TRUE,
+ &id OBJECT IDENTIFIER UNIQUE
+}
+WITH SYNTAX {
+ WITH SYNTAX &Type
+ [DEFAULT-VALUE &DefaultValue]
+ [ASSERTED AS &Assertion]
+ [ABSENT-MATCH &absentMatch]
+ ID &id
+}
+
+DITContextUse ::= SEQUENCE {
+ attributeType ATTRIBUTE.&id,
+ mandatoryContexts [1] SET SIZE (1..MAX) OF CONTEXT.&id OPTIONAL,
+ optionalContexts [2] SET SIZE (1..MAX) OF CONTEXT.&id OPTIONAL
+}
+
+DIT-CONTEXT-USE-RULE ::= CLASS {
+ &attributeType ATTRIBUTE.&id UNIQUE,
+ &Mandatory CONTEXT OPTIONAL,
+ &Optional CONTEXT OPTIONAL
+}
+WITH SYNTAX {
+ ATTRIBUTE TYPE &attributeType
+ [MANDATORY CONTEXTS &Mandatory]
+ [OPTIONAL CONTEXTS &Optional]
+}
+
+FRIENDS ::= CLASS {
+ &anchor ATTRIBUTE.&id UNIQUE,
+ &Friends ATTRIBUTE
+}WITH SYNTAX {ANCHOR &anchor
+ FRIENDS &Friends
+}
+
+-- system schema information objects
+-- object classes
+subentry OBJECT-CLASS ::= {
+ SUBCLASS OF {top}
+ KIND structural
+ MUST CONTAIN {commonName | subtreeSpecification}
+ ID id-sc-subentry
+}
+
+subentryNameForm NAME-FORM ::= {
+ NAMES subentry
+ WITH ATTRIBUTES {commonName}
+ ID id-nf-subentryNameForm
+}
+
+subtreeSpecification ATTRIBUTE ::= {
+ WITH SYNTAX SubtreeSpecification
+ USAGE directoryOperation
+ ID id-oa-subtreeSpecification
+}
+
+administrativeRole ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT-CLASS.&id
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ USAGE directoryOperation
+ ID id-oa-administrativeRole
+}
+
+createTimestamp ATTRIBUTE ::= {
+ WITH SYNTAX GeneralizedTime
+ -- as per 46.3 b) or c) of ITU-T Rec. X.680 | ISO/IEC 8824-1
+ EQUALITY MATCHING RULE generalizedTimeMatch
+ ORDERING MATCHING RULE generalizedTimeOrderingMatch
+ SINGLE VALUE TRUE
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-createTimestamp
+}
+
+modifyTimestamp ATTRIBUTE ::= {
+ WITH SYNTAX GeneralizedTime
+ -- as per 46.3 b) or c) of ITU-T Rec. X.680 | ISO/IEC 8824-1
+ EQUALITY MATCHING RULE generalizedTimeMatch
+ ORDERING MATCHING RULE generalizedTimeOrderingMatch
+ SINGLE VALUE TRUE
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-modifyTimestamp
+}
+
+subschemaTimestamp ATTRIBUTE ::= {
+ WITH SYNTAX GeneralizedTime
+ -- as per 46.3 b) or c) of ITU-T Rec. X.680 | ISO/IEC 8824-1
+ EQUALITY MATCHING RULE generalizedTimeMatch
+ ORDERING MATCHING RULE generalizedTimeOrderingMatch
+ SINGLE VALUE TRUE
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-subschemaTimestamp
+}
+
+creatorsName ATTRIBUTE ::= {
+ WITH SYNTAX DistinguishedName
+ EQUALITY MATCHING RULE distinguishedNameMatch
+ SINGLE VALUE TRUE
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-creatorsName
+}
+
+modifiersName ATTRIBUTE ::= {
+ WITH SYNTAX DistinguishedName
+ EQUALITY MATCHING RULE distinguishedNameMatch
+ SINGLE VALUE TRUE
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-modifiersName
+}
+
+subschemaSubentryList ATTRIBUTE ::= {
+ WITH SYNTAX DistinguishedName
+ EQUALITY MATCHING RULE distinguishedNameMatch
+ SINGLE VALUE TRUE
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-subschemaSubentryList
+}
+
+accessControlSubentryList ATTRIBUTE ::= {
+ WITH SYNTAX DistinguishedName
+ EQUALITY MATCHING RULE distinguishedNameMatch
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-accessControlSubentryList
+}
+
+collectiveAttributeSubentryList ATTRIBUTE ::= {
+ WITH SYNTAX DistinguishedName
+ EQUALITY MATCHING RULE distinguishedNameMatch
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-collectiveAttributeSubentryList
+}
+
+contextDefaultSubentryList ATTRIBUTE ::= {
+ WITH SYNTAX DistinguishedName
+ EQUALITY MATCHING RULE distinguishedNameMatch
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-contextDefaultSubentryList
+}
+
+serviceAdminSubentryList ATTRIBUTE ::= {
+ WITH SYNTAX DistinguishedName
+ EQUALITY MATCHING RULE distinguishedNameMatch
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-serviceAdminSubentryList
+}
+
+hasSubordinates ATTRIBUTE ::= {
+ WITH SYNTAX BOOLEAN
+ EQUALITY MATCHING RULE booleanMatch
+ SINGLE VALUE TRUE
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-hasSubordinates
+}
+
+accessControlSubentry OBJECT-CLASS ::= {
+ KIND auxiliary
+ ID id-sc-accessControlSubentry
+}
+
+collectiveAttributeSubentry OBJECT-CLASS ::= {
+ KIND auxiliary
+ ID id-sc-collectiveAttributeSubentry
+}
+
+collectiveExclusions ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT IDENTIFIER
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ USAGE directoryOperation
+ ID id-oa-collectiveExclusions
+}
+
+contextAssertionSubentry OBJECT-CLASS ::= {
+ KIND auxiliary
+ MUST CONTAIN {contextAssertionDefaults}
+ ID id-sc-contextAssertionSubentry
+}
+
+contextAssertionDefaults ATTRIBUTE ::= {
+ WITH SYNTAX TypeAndContextAssertion
+ EQUALITY MATCHING RULE objectIdentifierFirstComponentMatch
+ USAGE directoryOperation
+ ID id-oa-contextAssertionDefault
+}
+
+serviceAdminSubentry OBJECT-CLASS ::= {
+ KIND auxiliary
+ MUST CONTAIN {searchRules}
+ ID id-sc-serviceAdminSubentry
+}
+
+searchRules ATTRIBUTE ::= {
+ WITH SYNTAX SearchRuleDescription
+ EQUALITY MATCHING RULE integerFirstComponentMatch
+ USAGE directoryOperation
+ ID id-oa-searchRules
+}
+
+SearchRuleDescription ::= SEQUENCE {
+ COMPONENTS OF SearchRule,
+ name [28] SET SIZE (1..MAX) OF UnboundedDirectoryString OPTIONAL,
+ description [29] UnboundedDirectoryString OPTIONAL
+}
+
+hierarchyLevel ATTRIBUTE ::= {
+ WITH SYNTAX HierarchyLevel
+ EQUALITY MATCHING RULE integerMatch
+ ORDERING MATCHING RULE integerOrderingMatch
+ SINGLE VALUE TRUE
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-hierarchyLevel
+}
+
+HierarchyLevel ::= INTEGER
+
+hierarchyBelow ATTRIBUTE ::= {
+ WITH SYNTAX HierarchyBelow
+ EQUALITY MATCHING RULE booleanMatch
+ SINGLE VALUE TRUE
+ NO USER MODIFICATION TRUE
+ USAGE directoryOperation
+ ID id-oa-hierarchyBelow
+}
+
+HierarchyBelow ::= BOOLEAN
+
+hierarchyParent ATTRIBUTE ::= {
+ WITH SYNTAX DistinguishedName
+ EQUALITY MATCHING RULE distinguishedNameMatch
+ SINGLE VALUE TRUE
+ USAGE directoryOperation
+ ID id-oa-hierarchyParent
+}
+
+hierarchyTop ATTRIBUTE ::= {
+ WITH SYNTAX DistinguishedName
+ EQUALITY MATCHING RULE distinguishedNameMatch
+ SINGLE VALUE TRUE
+ USAGE directoryOperation
+ ID id-oa-hierarchyTop
+}
+
+-- object identifier assignments
+-- object classes
+id-oc-top OBJECT IDENTIFIER ::=
+ {id-oc 0}
+
+id-oc-alias OBJECT IDENTIFIER ::= {id-oc 1}
+
+id-oc-parent OBJECT IDENTIFIER ::= {id-oc 28}
+
+id-oc-child OBJECT IDENTIFIER ::= {id-oc 29}
+
+-- attributes
+id-at-objectClass OBJECT IDENTIFIER ::= {id-at 0}
+
+id-at-aliasedEntryName OBJECT IDENTIFIER ::= {id-at 1}
+
+-- matching rules
+id-mr-objectIdentifierMatch OBJECT IDENTIFIER ::= {id-mr 0}
+
+id-mr-distinguishedNameMatch OBJECT IDENTIFIER ::= {id-mr 1}
+
+-- operational attributes
+id-oa-excludeAllCollectiveAttributes OBJECT IDENTIFIER ::=
+ {id-oa 0}
+
+id-oa-createTimestamp OBJECT IDENTIFIER ::= {id-oa 1}
+
+id-oa-modifyTimestamp OBJECT IDENTIFIER ::= {id-oa 2}
+
+id-oa-creatorsName OBJECT IDENTIFIER ::= {id-oa 3}
+
+id-oa-modifiersName OBJECT IDENTIFIER ::= {id-oa 4}
+
+id-oa-administrativeRole OBJECT IDENTIFIER ::= {id-oa 5}
+
+id-oa-subtreeSpecification OBJECT IDENTIFIER ::= {id-oa 6}
+
+id-oa-collectiveExclusions OBJECT IDENTIFIER ::= {id-oa 7}
+
+id-oa-subschemaTimestamp OBJECT IDENTIFIER ::= {id-oa 8}
+
+id-oa-hasSubordinates OBJECT IDENTIFIER ::= {id-oa 9}
+
+id-oa-subschemaSubentryList OBJECT IDENTIFIER ::= {id-oa 10}
+
+id-oa-accessControlSubentryList OBJECT IDENTIFIER ::= {id-oa 11}
+
+id-oa-collectiveAttributeSubentryList OBJECT IDENTIFIER ::= {id-oa 12}
+
+id-oa-contextDefaultSubentryList OBJECT IDENTIFIER ::= {id-oa 13}
+
+id-oa-contextAssertionDefault OBJECT IDENTIFIER ::= {id-oa 14}
+
+id-oa-serviceAdminSubentryList OBJECT IDENTIFIER ::= {id-oa 15}
+
+id-oa-searchRules OBJECT IDENTIFIER ::= {id-oa 16}
+
+id-oa-hierarchyLevel OBJECT IDENTIFIER ::= {id-oa 17}
+
+id-oa-hierarchyBelow OBJECT IDENTIFIER ::= {id-oa 18}
+
+id-oa-hierarchyParent OBJECT IDENTIFIER ::= {id-oa 19}
+
+id-oa-hierarchyTop OBJECT IDENTIFIER ::= {id-oa 20}
+
+-- subentry classes
+id-sc-subentry OBJECT IDENTIFIER ::= {id-sc 0}
+
+id-sc-accessControlSubentry OBJECT IDENTIFIER ::= {id-sc 1}
+
+id-sc-collectiveAttributeSubentry OBJECT IDENTIFIER ::= {id-sc 2}
+
+id-sc-contextAssertionSubentry OBJECT IDENTIFIER ::= {id-sc 3}
+
+id-sc-serviceAdminSubentry OBJECT IDENTIFIER ::= {id-sc 4}
+
+-- Name forms
+id-nf-subentryNameForm OBJECT IDENTIFIER ::= {id-nf 16}
+
+-- administrative roles
+id-ar-autonomousArea OBJECT IDENTIFIER ::= {id-ar 1}
+
+id-ar-accessControlSpecificArea OBJECT IDENTIFIER ::= {id-ar 2}
+
+id-ar-accessControlInnerArea OBJECT IDENTIFIER ::= {id-ar 3}
+
+id-ar-subschemaAdminSpecificArea OBJECT IDENTIFIER ::= {id-ar 4}
+
+id-ar-collectiveAttributeSpecificArea OBJECT IDENTIFIER ::= {id-ar 5}
+
+id-ar-collectiveAttributeInnerArea OBJECT IDENTIFIER ::= {id-ar 6}
+
+id-ar-contextDefaultSpecificArea OBJECT IDENTIFIER ::= {id-ar 7}
+
+id-ar-serviceSpecificArea OBJECT IDENTIFIER ::= {id-ar 8}
+
+END -- InformationFramework
diff --git a/lib/public_key/asn1/Makefile b/lib/public_key/asn1/Makefile
index 4bd043ee5d..957c332cad 100644
--- a/lib/public_key/asn1/Makefile
+++ b/lib/public_key/asn1/Makefile
@@ -40,7 +40,8 @@ RELSYSDIR = $(RELEASE_PATH)/lib/public_key-$(VSN)
ASN_TOP = OTP-PUB-KEY PKCS-FRAME
ASN_MODULES = PKIX1Explicit88 PKIX1Implicit88 PKIX1Algorithms88 \
- PKIXAttributeCertificate PKCS-1 PKCS-3 PKCS-8 PKCS5v2-0 OTP-PKIX
+ PKIXAttributeCertificate PKCS-1 PKCS-3 PKCS-7 PKCS-8 PKCS-10 PKCS5v2-0 OTP-PKIX \
+ InformationFramework
ASN_ASNS = $(ASN_MODULES:%=%.asn1)
ASN_ERLS = $(ASN_TOP:%=%.erl)
ASN_HRLS = $(ASN_TOP:%=%.hrl)
@@ -112,9 +113,12 @@ OTP-PUB-KEY.asn1db: PKIX1Algorithms88.asn1 \
PKIXAttributeCertificate.asn1 \
PKCS-1.asn1\
PKCS-3.asn1\
+ PKCS-7.asn1\
+ PKCS-10.asn1\
+ InformationFramework.asn1\
OTP-PKIX.asn1
$(EBIN)/PKCS-FRAME.beam: PKCS-FRAME.erl PKCS-FRAME.hrl
-PKCS-FRAME.erl PKCS-FRAME.hrl: PKCS-FRAME.asn1db
-PKCS-FRAME.asn1db: PKCS-8.asn1\
- PKCS5v2-0.asn1 \ No newline at end of file
+PKCS-FRAME.erl PKCS-FRAME.hrl: PKCS-FRAME.asn1db
+PKCS-FRAME.asn1db: PKCS5v2-0.asn1\
+ PKCS-8.asn1\
diff --git a/lib/public_key/asn1/OTP-PUB-KEY.set.asn b/lib/public_key/asn1/OTP-PUB-KEY.set.asn
index 5c76d13115..f8fb318c93 100644
--- a/lib/public_key/asn1/OTP-PUB-KEY.set.asn
+++ b/lib/public_key/asn1/OTP-PUB-KEY.set.asn
@@ -6,3 +6,5 @@ PKIX1Algorithms88.asn1
PKCS-1.asn1
PKCS-3.asn1
DSS.asn1
+PKCS-7.asn1
+PKCS-10.asn1
diff --git a/lib/public_key/asn1/PKCS-10.asn1 b/lib/public_key/asn1/PKCS-10.asn1
new file mode 100644
index 0000000000..333104d230
--- /dev/null
+++ b/lib/public_key/asn1/PKCS-10.asn1
@@ -0,0 +1,70 @@
+PKCS-10 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1)
+pkcs-10(10) modules(1) pkcs-10(1)}
+
+-- $Revision: 1.3 $ --
+
+DEFINITIONS IMPLICIT TAGS ::=
+
+BEGIN
+
+-- EXPORTS All --
+-- All types and values defined in this module are exported for use
+-- in other ASN.1 modules.
+
+IMPORTS
+
+--informationFramework, authenticationFramework
+-- FROM UsefulDefinitions {joint-iso-itu-t(2) ds(5) module(1)
+-- usefulDefinitions(0) 3}
+
+ ATTRIBUTE
+ FROM InformationFramework informationFramework
+
+ Name
+ FROM PKIX1Explicit88 --InformationFramework informationFramework
+
+ ALGORITHM
+ FROM PKCS-7; --AuthenticationFramework authenticationFramework;
+
+-- Certificate requests
+
+CertificationRequestInfo ::= SEQUENCE {
+ version INTEGER { v1(0) } (v1,...),
+ subject Name,
+ subjectPKInfo SubjectPublicKeyInfo-PKCS-10{{ PKInfoAlgorithms }},
+ attributes [0] Attributes{{ CRIAttributes }}
+}
+
+SubjectPublicKeyInfo-PKCS-10 {ALGORITHM: IOSet} ::= SEQUENCE {
+ algorithm AlgorithmIdentifierPKCS-10{{IOSet}},
+ subjectPublicKey BIT STRING
+}
+
+PKInfoAlgorithms ALGORITHM ::= {
+ ... -- add any locally defined algorithms here -- }
+
+Attributes { ATTRIBUTE:IOSet } ::= SET OF AttributePKCS-10{{ IOSet }}
+
+CRIAttributes ATTRIBUTE ::= {
+... -- add any locally defined attributes here -- }
+
+AttributePKCS-10 { ATTRIBUTE:IOSet } ::= SEQUENCE {
+ type ATTRIBUTE.&id({IOSet}),
+ values SET SIZE(1..MAX) OF ATTRIBUTE.&Type({IOSet}{@type})
+}
+
+CertificationRequest ::= SEQUENCE {
+ certificationRequestInfo CertificationRequestInfo,
+ signatureAlgorithm AlgorithmIdentifierPKCS-10{{ SignatureAlgorithms }},
+ signature BIT STRING
+}
+
+AlgorithmIdentifierPKCS-10 {ALGORITHM:IOSet } ::= SEQUENCE {
+ algorithm ALGORITHM.&id({IOSet}),
+ parameters ALGORITHM.&Type({IOSet}{@algorithm}) OPTIONAL
+}
+
+SignatureAlgorithms ALGORITHM ::= {
+ ... -- add any locally defined algorithms here -- }
+
+END
diff --git a/lib/public_key/asn1/PKCS-7.asn1 b/lib/public_key/asn1/PKCS-7.asn1
new file mode 100644
index 0000000000..a6dfd57d80
--- /dev/null
+++ b/lib/public_key/asn1/PKCS-7.asn1
@@ -0,0 +1,387 @@
+PKCS-7 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-7(7)
+ modules(0) pkcs-7(1)}
+
+DEFINITIONS EXPLICIT TAGS ::=
+BEGIN
+
+--
+-- 3. Definitions
+--
+
+-- EXPORTS All;
+
+IMPORTS
+
+informationFramework, authenticationFramework
+ FROM UsefulDefinitions {joint-iso-itu-t ds(5) module(1)
+ usefulDefinitions(0) 3}
+
+ ATTRIBUTE
+ FROM InformationFramework informationFramework
+
+ Name, Certificate, CertificateSerialNumber,
+ CertificateList, Time
+ FROM PKIX1Explicit88; -- AuthenticationFramework authenticationFramework;
+
+-- contentType, messageDigest, signingTime
+-- , counterSignature
+-- FROM PKCS-9 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1)
+-- pkcs-9(9) modules(0) pkcs-9(1)};
+--
+-- 6. Useful types
+--
+
+-- inlined from AuthenticationFramework
+
+ALGORITHM ::= CLASS {&Type OPTIONAL,
+ &id OBJECT IDENTIFIER UNIQUE
+}WITH SYNTAX {[&Type]
+ IDENTIFIED BY &id
+}
+
+-- inlined from PKCS-9
+
+pkcs-9 OBJECT IDENTIFIER ::= {iso(1) member-body(2) us(840)
+ rsadsi(113549) pkcs(1) 9}
+
+contentType ATTRIBUTE ::= {
+ WITH SYNTAX ContentType
+-- EQUALITY MATCHING RULE objectIdentifierMatch
+ SINGLE VALUE TRUE
+ ID pkcs-9-at-contentType
+}
+
+pkcs-9-at-contentType OBJECT IDENTIFIER ::= {pkcs-9 3}
+pkcs-9-at-messageDigest OBJECT IDENTIFIER ::= {pkcs-9 4}
+pkcs-9-at-signingTime OBJECT IDENTIFIER ::= {pkcs-9 5}
+pkcs-9-at-counterSignature OBJECT IDENTIFIER ::= {pkcs-9 6}
+
+counterSignature ATTRIBUTE ::= {
+ WITH SYNTAX SignerInfo
+ ID pkcs-9-at-counterSignature
+}
+messageDigest ATTRIBUTE ::= {
+ WITH SYNTAX MessageDigest
+-- EQUALITY MATCHING RULE octetStringMatch
+ SINGLE VALUE TRUE
+ ID pkcs-9-at-messageDigest
+}
+
+MessageDigest ::= OCTET STRING
+
+signingTime ATTRIBUTE ::= {
+ WITH SYNTAX SigningTime
+-- EQUALITY MATCHING RULE signingTimeMatch
+ SINGLE VALUE TRUE
+ ID pkcs-9-at-signingTime
+}
+
+SigningTime ::= Time -- imported from ISO/IEC 9594-8
+
+
+-- Also defined in X.509
+-- Redeclared here as a parameterized type
+AlgorithmIdentifierPKSC-7 {ALGORITHM:IOSet} ::= SEQUENCE {
+ algorithm ALGORITHM.&id({IOSet}),
+ parameters ALGORITHM.&Type({IOSet}{@algorithm}) OPTIONAL
+}
+
+-- Also defined in X.501
+-- Redeclared here as a parameterized type
+AttributePKCS-7 { ATTRIBUTE:IOSet } ::= SEQUENCE {
+ type ATTRIBUTE.&id({IOSet}),
+ values SET SIZE (1..MAX) OF ATTRIBUTE.&Type({IOSet}{@type})
+}
+
+CertificateRevocationLists ::=
+ SET OF CertificateList
+
+Certificates ::=
+ SEQUENCE OF Certificate
+
+CRLSequence ::=
+ SEQUENCE OF CertificateList
+
+ContentEncryptionAlgorithmIdentifier ::=
+ AlgorithmIdentifierPKSC-7 {{ContentEncryptionAlgorithms}}
+
+ContentEncryptionAlgorithms ALGORITHM ::= {
+ ... -- add any application-specific algorithms here
+}
+
+DigestAlgorithmIdentifier ::=
+ AlgorithmIdentifierPKSC-7 {{DigestAlgorithms}}
+
+DigestAlgorithms ALGORITHM ::= {
+ ... -- add any application-specific algorithms here
+}
+
+DigestEncryptionAlgorithmIdentifier ::=
+ AlgorithmIdentifierPKSC-7 {{DigestEncryptionAlgorithms}}
+
+DigestEncryptionAlgorithms ALGORITHM ::= {
+ ... -- add any application-specific algorithms here
+}
+
+ExtendedCertificateOrCertificate ::= CHOICE {
+ certificate Certificate, -- X.509
+ extendedCertificate [0] IMPLICIT ExtendedCertificate -- PKCS#6
+}
+
+ExtendedCertificate ::= Certificate -- cheating
+
+ExtendedCertificatesAndCertificates ::=
+ SET OF ExtendedCertificateOrCertificate
+
+IssuerAndSerialNumber ::= SEQUENCE {
+ issuer Name,
+ serialNumber CertificateSerialNumber
+}
+
+KeyEncryptionAlgorithmIdentifier ::=
+ AlgorithmIdentifierPKSC-7 {{KeyEncryptionAlgorithms}}
+
+KeyEncryptionAlgorithms ALGORITHM ::= {
+ ... -- add any application-specific algorithms here
+}
+
+--
+-- 7. General syntax
+--
+
+ContentInfo ::= SEQUENCE {
+-- contentType ContentType,
+ contentType CONTENTS.&id({Contents}),
+ content [0] EXPLICIT CONTENTS.&Type({Contents}{@contentType})
+OPTIONAL
+}
+
+CONTENTS ::= TYPE-IDENTIFIER
+
+Contents CONTENTS ::= {
+ {Data IDENTIFIED BY data} |
+ {SignedData IDENTIFIED BY signedData} |
+ {EnvelopedData IDENTIFIED BY envelopedData} |
+ {SignedAndEnvelopedData IDENTIFIED BY signedAndEnvelopedData} |
+ {DigestedData IDENTIFIED BY digestedData} |
+ {EncryptedData IDENTIFIED BY encryptedData},
+ ... -- add any application-specific types/contents here
+}
+
+ContentType ::= CONTENTS.&id({Contents})
+
+--
+-- 8. Data content type
+--
+
+Data ::= OCTET STRING
+
+--
+-- 9. Signed-data content type
+--
+
+SignedData ::= SEQUENCE {
+-- version INTEGER {sdVer1(1), sdVer2(2)} (sdVer1 | sdVer2),
+ version INTEGER {sdVer1(1), sdVer2(2)},
+ digestAlgorithms
+ DigestAlgorithmIdentifiers,
+ contentInfo ContentInfo,
+ certificates CHOICE {
+ certSet [0] IMPLICIT ExtendedCertificatesAndCertificates,
+ certSequence [2] IMPLICIT Certificates
+ } OPTIONAL,
+ crls CHOICE {
+ crlSet [1] IMPLICIT CertificateRevocationLists,
+ crlSequence [3] IMPLICIT CRLSequence
+ } OPTIONAL,
+ signerInfos SignerInfos
+} (WITH COMPONENTS { ..., version (sdVer1),
+ digestAlgorithms (WITH COMPONENTS { ..., daSet PRESENT }),
+ certificates (WITH COMPONENTS { ..., certSequence ABSENT }),
+ crls (WITH COMPONENTS { ..., crlSequence ABSENT }),
+ signerInfos (WITH COMPONENTS { ..., siSet PRESENT })
+ } |
+ WITH COMPONENTS { ..., version (sdVer2),
+ digestAlgorithms (WITH COMPONENTS { ..., daSequence PRESENT }),
+ certificates (WITH COMPONENTS { ..., certSet ABSENT }),
+ crls (WITH COMPONENTS { ..., crlSet ABSENT }),
+ signerInfos (WITH COMPONENTS { ..., siSequence PRESENT })
+})
+
+SignerInfos ::= CHOICE {
+ siSet SET OF SignerInfo,
+ siSequence SEQUENCE OF SignerInfo
+}
+
+DigestAlgorithmIdentifiers ::= CHOICE {
+ daSet SET OF DigestAlgorithmIdentifier,
+ daSequence SEQUENCE OF DigestAlgorithmIdentifier
+}
+
+SignerInfo ::= SEQUENCE {
+-- version INTEGER {siVer1(1), siVer2(2)} (siVer1 | siVer2),
+ version INTEGER {siVer1(1), siVer2(2)},
+ issuerAndSerialNumber
+ IssuerAndSerialNumber,
+ digestAlgorithm DigestAlgorithmIdentifier,
+ authenticatedAttributes CHOICE {
+ aaSet [0] IMPLICIT SET OF AttributePKCS-7 {{Authenticated}},
+ aaSequence [2] EXPLICIT SEQUENCE OF AttributePKCS-7 {{Authenticated}}
+ -- Explicit because easier to compute digest on sequence of attributes and then reuse
+ -- encoded sequence in aaSequence.
+ } OPTIONAL,
+ digestEncryptionAlgorithm
+ DigestEncryptionAlgorithmIdentifier,
+ encryptedDigest EncryptedDigest,
+ unauthenticatedAttributes CHOICE {
+ uaSet [1] IMPLICIT SET OF AttributePKCS-7 {{Unauthenticated}},
+ uaSequence [3] IMPLICIT SEQUENCE OF AttributePKCS-7 {{Unauthenticated}}
+ } OPTIONAL
+} (WITH COMPONENTS { ..., version (siVer1),
+ authenticatedAttributes (WITH COMPONENTS { ..., aaSequence ABSENT }),
+ unauthenticatedAttributes (WITH COMPONENTS { ..., uaSequence ABSENT })
+} | WITH COMPONENTS { ..., version (siVer2),
+ authenticatedAttributes (WITH COMPONENTS { ..., aaSet ABSENT }),
+ unauthenticatedAttributes (WITH COMPONENTS { ..., uaSet ABSENT })
+})
+
+Authenticated ATTRIBUTE ::= {
+ contentType |
+ messageDigest,
+ ..., -- add application-specific attributes here
+ signingTime
+}
+
+Unauthenticated ATTRIBUTE ::= {
+ contentType |
+ messageDigest,
+ ..., -- add application-specific attributes here
+ counterSignature
+-- ..., add application-specific attributes here
+-- counterSignature
+}
+
+EncryptedDigest ::= OCTET STRING
+
+DigestInfo ::= SEQUENCE {
+ digestAlgorithm DigestAlgorithmIdentifier,
+ digest Digest
+}
+
+Digest ::= OCTET STRING
+
+--
+-- 10. Enveloped-data content type
+--
+
+EnvelopedData ::= SEQUENCE {
+-- version INTEGER {edVer0(0), edVer1(1)} (edVer0 | edVer1),
+ version INTEGER {edVer0(0), edVer1(1)},
+ recipientInfos RecipientInfos,
+ encryptedContentInfo
+ EncryptedContentInfo
+} (WITH COMPONENTS { ..., version (edVer0),
+ recipientInfos (WITH COMPONENTS { ..., riSet PRESENT })
+} | WITH COMPONENTS { ..., version (edVer1),
+ recipientInfos (WITH COMPONENTS { ..., riSequence PRESENT })
+})
+
+RecipientInfos ::= CHOICE {
+ riSet SET OF RecipientInfo,
+ riSequence SEQUENCE OF RecipientInfo
+}
+
+EncryptedContentInfo ::= SEQUENCE {
+ contentType ContentType,
+ contentEncryptionAlgorithm
+ ContentEncryptionAlgorithmIdentifier,
+ encryptedContent
+ [0] IMPLICIT EncryptedContent OPTIONAL
+}
+
+EncryptedContent ::= OCTET STRING
+
+RecipientInfo ::= SEQUENCE {
+-- version INTEGER {riVer0(0)} (riVer0),
+ version INTEGER {riVer0(0)},
+ issuerAndSerialNumber
+ IssuerAndSerialNumber,
+ keyEncryptionAlgorithm
+ KeyEncryptionAlgorithmIdentifier,
+ encryptedKey EncryptedKey
+}
+
+EncryptedKey ::= OCTET STRING
+
+--
+-- 11. Signed-and-enveloped-data content type
+--
+
+SignedAndEnvelopedData ::= SEQUENCE {
+-- version INTEGER {seVer1(1), seVer2(2)} (seVer1 | seVer2),
+ version INTEGER {seVer1(1), seVer2(2)},
+ recipientInfos RecipientInfos,
+ digestAlgorithms
+ DigestAlgorithmIdentifiers,
+ encryptedContentInfo
+ EncryptedContentInfo,
+ certificates CHOICE {
+ certSet [0] IMPLICIT ExtendedCertificatesAndCertificates,
+ certSequence [2] IMPLICIT Certificates
+ } OPTIONAL,
+ crls CHOICE {
+ crlSet [1] IMPLICIT CertificateRevocationLists,
+ crlSequence [3] IMPLICIT CRLSequence
+ } OPTIONAL,
+ signerInfos SignerInfos
+} (WITH COMPONENTS { ..., version (seVer1),
+ recipientInfos (WITH COMPONENTS { ..., riSet PRESENT }),
+ digestAlgorithms (WITH COMPONENTS { ..., daSet PRESENT }),
+ certificates (WITH COMPONENTS { ..., certSequence ABSENT }),
+ crls (WITH COMPONENTS { ..., crlSequence ABSENT }),
+ signerInfos (WITH COMPONENTS { ..., siSet PRESENT })
+} |
+ WITH COMPONENTS { ..., version (seVer2),
+ recipientInfos (WITH COMPONENTS { ..., riSequence PRESENT }),
+ digestAlgorithms (WITH COMPONENTS { ..., daSequence PRESENT }),
+ certificates (WITH COMPONENTS { ..., certSet ABSENT }),
+ crls (WITH COMPONENTS { ..., crlSet ABSENT }),
+ signerInfos (WITH COMPONENTS { ..., siSequence PRESENT })
+})
+
+--
+-- 12. Digested-data content type
+--pbeWithSHAAnd3-KeyTripleDES-CBC
+
+DigestedData ::= SEQUENCE {
+-- version INTEGER {ddVer0(0)} (ddVer0),
+ version INTEGER {ddVer0(0)},
+ digestAlgorithm DigestAlgorithmIdentifier,
+ contentInfo ContentInfo,
+ digest Digest
+}
+
+--
+-- 13. Encrypted-data content type
+--
+
+EncryptedData ::= SEQUENCE {
+-- version INTEGER {edVer0(0)} (edVer0),
+ version INTEGER {edVer0(0)},
+ encryptedContentInfo EncryptedContentInfo
+}
+
+--
+-- 14. Object Identifiers
+--
+
+pkcs-7 OBJECT IDENTIFIER ::=
+ { iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 7 }
+data OBJECT IDENTIFIER ::= { pkcs-7 1 }
+signedData OBJECT IDENTIFIER ::= { pkcs-7 2 }
+envelopedData OBJECT IDENTIFIER ::= { pkcs-7 3 }
+signedAndEnvelopedData OBJECT IDENTIFIER ::= { pkcs-7 4 }
+digestedData OBJECT IDENTIFIER ::= { pkcs-7 5 }
+encryptedData OBJECT IDENTIFIER ::= { pkcs-7 6 }
+
+END
diff --git a/lib/public_key/asn1/PKIX1Explicit88.asn1 b/lib/public_key/asn1/PKIX1Explicit88.asn1
index 03e9da3e05..91758d7269 100644
--- a/lib/public_key/asn1/PKIX1Explicit88.asn1
+++ b/lib/public_key/asn1/PKIX1Explicit88.asn1
@@ -206,13 +206,12 @@ DomainComponent ::= IA5String
-- Legacy attributes
-pkcs-9 OBJECT IDENTIFIER ::=
- { iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) 9 }
-
id-emailAddress AttributeType ::= { pkcs-9 1 }
EmailAddress ::= IA5String (SIZE (1..ub-emailaddress-length))
+-- Legacy attributes
+
-- naming data types --
Name ::= CHOICE { -- only one possibility for now --
diff --git a/lib/public_key/asn1/SelectedAttributeTypes.asn1 b/lib/public_key/asn1/SelectedAttributeTypes.asn1
new file mode 100644
index 0000000000..3ef7077370
--- /dev/null
+++ b/lib/public_key/asn1/SelectedAttributeTypes.asn1
@@ -0,0 +1,1575 @@
+SelectedAttributeTypes {joint-iso-itu-t ds(5) module(1)
+ selectedAttributeTypes(5) 6} DEFINITIONS ::=
+BEGIN
+
+-- EXPORTS All
+-- The types and values defined in this module are exported for use in the other ASN.1 modules contained
+-- within the Directory Specifications, and for the use of other applications which will use them to access
+-- Directory services. Other applications may use them for their own purposes, but this will not constrain
+-- extensions and modifications needed to maintain or improve the Directory service.
+IMPORTS
+ -- from ITU-T Rec. X.501 | ISO/IEC 9594-2
+ directoryAbstractService, id-at, id-avc, id-cat, id-mr, id-not, id-pr,
+ informationFramework, serviceAdministration
+ FROM UsefulDefinitions {joint-iso-itu-t ds(5) module(1)
+ usefulDefinitions(0) 6}
+ Attribute{}, ATTRIBUTE, AttributeType, AttributeValueAssertion, CONTEXT,
+ ContextAssertion, DistinguishedName, distinguishedNameMatch,
+ MAPPING-BASED-MATCHING{}, MATCHING-RULE, OBJECT-CLASS,
+ objectIdentifierMatch, SupportedAttributes
+ FROM InformationFramework informationFramework
+ AttributeCombination, ContextCombination, MRMapping
+ FROM ServiceAdministration serviceAdministration
+ -- from ITU-T Rec. X.511 | ISO/IEC 9594-3
+ FilterItem, HierarchySelections, SearchControlOptions, ServiceControlOptions
+ FROM DirectoryAbstractService directoryAbstractService
+ -- from ITU-T Rec. X.411 | ISO/IEC 10021-4
+ G3FacsimileNonBasicParameters
+ FROM MTSAbstractService {joint-iso-itu-t mhs(6) mts(3) modules(0)
+ mts-abstract-service(1) version-1999(1)};
+
+/*from IETF RFC 3727
+
+The following import is provided for information only (see 7.2.16), it is not referenced by any ASN.1 construct within these Directory Specifications. Note that the ASN.1 module in RFC 3727 imports from the InformationFramework module of edition 4 of ITU-T Rec. X.501 | ISO/IEC 9594-2. A specification importing from both these Directory Specifications and from RFC 3727 should take corrective actions, e.g., by making a copy of the ASN.1 module of
+RFC 3727 and then update the IMPORT statement.
+
+ allComponentsMatch, componentFilterMatch, directoryComponentsMatch, presentMatch, rdnMatch
+ FROM ComponentMatching {iso(1) 2 36 79672281 xed(3) module (0)
+ component-matching(4)} */
+-- Directory string type
+UnboundedDirectoryString ::= CHOICE {
+ teletexString TeletexString(SIZE (1..MAX)),
+ printableString PrintableString(SIZE (1..MAX)),
+ bmpString BMPString(SIZE (1..MAX)),
+ universalString UniversalString(SIZE (1..MAX)),
+ uTF8String UTF8String(SIZE (1..MAX))
+}
+
+DirectoryString{INTEGER:maxSize} ::= CHOICE {
+ teletexString TeletexString(SIZE (1..maxSize)),
+ printableString PrintableString(SIZE (1..maxSize)),
+ bmpString BMPString(SIZE (1..maxSize)),
+ universalString UniversalString(SIZE (1..maxSize)),
+ uTF8String UTF8String(SIZE (1..maxSize))
+}
+
+-- Attribute types
+knowledgeInformation ATTRIBUTE ::= {
+ WITH SYNTAX UnboundedDirectoryString
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ ID id-at-knowledgeInformation
+}
+
+name ATTRIBUTE ::= {
+ WITH SYNTAX UnboundedDirectoryString
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreSubstringsMatch
+ ID id-at-name
+}
+
+commonName ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-commonName
+}
+
+surname ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-surname
+}
+
+givenName ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-givenName
+}
+
+initials ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-initials
+}
+
+generationQualifier ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-generationQualifier
+}
+
+uniqueIdentifier ATTRIBUTE ::= {
+ WITH SYNTAX UniqueIdentifier
+ EQUALITY MATCHING RULE bitStringMatch
+ ID id-at-uniqueIdentifier
+}
+
+UniqueIdentifier ::= BIT STRING
+
+dnQualifier ATTRIBUTE ::= {
+ WITH SYNTAX PrintableString
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ ORDERING MATCHING RULE caseIgnoreOrderingMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreSubstringsMatch
+ ID id-at-dnQualifier
+}
+
+serialNumber ATTRIBUTE ::= {
+ WITH SYNTAX PrintableString(SIZE (1..MAX))
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreSubstringsMatch
+ ID id-at-serialNumber
+}
+
+pseudonym ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-pseudonym
+}
+
+uUIDPair ATTRIBUTE ::= {
+ WITH SYNTAX UUIDPair
+ EQUALITY MATCHING RULE uUIDPairMatch
+ ID id-at-uuidpair
+}
+
+UUIDPair ::= SEQUENCE {issuerUUID UUID,
+ subjectUUID UUID
+}
+
+UUID ::= OCTET STRING(SIZE (16)) -- UUID format only
+
+
+countryName ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX CountryName
+ SINGLE VALUE TRUE
+ ID id-at-countryName
+}
+
+CountryName ::= PrintableString(SIZE (2)) -- ISO 3166 codes only
+
+
+localityName ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-localityName
+}
+
+collectiveLocalityName ATTRIBUTE ::= {
+ SUBTYPE OF localityName
+ COLLECTIVE TRUE
+ ID id-at-collectiveLocalityName
+}
+
+stateOrProvinceName ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-stateOrProvinceName
+}
+
+collectiveStateOrProvinceName ATTRIBUTE ::= {
+ SUBTYPE OF stateOrProvinceName
+ COLLECTIVE TRUE
+ ID id-at-collectiveStateOrProvinceName
+}
+
+streetAddress ATTRIBUTE ::= {
+ WITH SYNTAX UnboundedDirectoryString
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreSubstringsMatch
+ ID id-at-streetAddress
+}
+
+collectiveStreetAddress ATTRIBUTE ::= {
+ SUBTYPE OF streetAddress
+ COLLECTIVE TRUE
+ ID id-at-collectiveStreetAddress
+}
+
+houseIdentifier ATTRIBUTE ::= {
+ WITH SYNTAX UnboundedDirectoryString
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreSubstringsMatch
+ ID id-at-houseIdentifier
+}
+
+organizationName ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-organizationName
+}
+
+collectiveOrganizationName ATTRIBUTE ::= {
+ SUBTYPE OF organizationName
+ COLLECTIVE TRUE
+ ID id-at-collectiveOrganizationName
+}
+
+organizationalUnitName ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-organizationalUnitName
+}
+
+collectiveOrganizationalUnitName ATTRIBUTE ::= {
+ SUBTYPE OF organizationalUnitName
+ COLLECTIVE TRUE
+ ID id-at-collectiveOrganizationalUnitName
+}
+
+title ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-title
+}
+
+description ATTRIBUTE ::= {
+ WITH SYNTAX UnboundedDirectoryString
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreSubstringsMatch
+ ID id-at-description
+}
+
+searchGuide ATTRIBUTE ::= {WITH SYNTAX Guide
+ ID id-at-searchGuide
+}
+
+Guide ::= SET {
+ objectClass [0] OBJECT-CLASS.&id OPTIONAL,
+ criteria [1] Criteria
+}
+
+Criteria ::= CHOICE {
+ type [0] CriteriaItem,
+ and [1] SET OF Criteria,
+ or [2] SET OF Criteria,
+ not [3] Criteria
+}
+
+CriteriaItem ::= CHOICE {
+ equality [0] AttributeType,
+ substrings [1] AttributeType,
+ greaterOrEqual [2] AttributeType,
+ lessOrEqual [3] AttributeType,
+ approximateMatch [4] AttributeType
+}
+
+enhancedSearchGuide ATTRIBUTE ::= {
+ WITH SYNTAX EnhancedGuide
+ ID id-at-enhancedSearchGuide
+}
+
+EnhancedGuide ::= SEQUENCE {
+ objectClass [0] OBJECT-CLASS.&id,
+ criteria [1] Criteria,
+ subset
+ [2] INTEGER {baseObject(0), oneLevel(1), wholeSubtree(2)} DEFAULT oneLevel
+}
+
+businessCategory ATTRIBUTE ::= {
+ WITH SYNTAX UnboundedDirectoryString
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreSubstringsMatch
+ ID id-at-businessCategory
+}
+
+postalAddress ATTRIBUTE ::= {
+ WITH SYNTAX PostalAddress
+ EQUALITY MATCHING RULE caseIgnoreListMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreListSubstringsMatch
+ ID id-at-postalAddress
+}
+
+PostalAddress ::= SEQUENCE SIZE (1..MAX) OF UnboundedDirectoryString
+
+collectivePostalAddress ATTRIBUTE ::= {
+ SUBTYPE OF postalAddress
+ COLLECTIVE TRUE
+ ID id-at-collectivePostalAddress
+}
+
+postalCode ATTRIBUTE ::= {
+ WITH SYNTAX UnboundedDirectoryString
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreSubstringsMatch
+ ID id-at-postalCode
+}
+
+collectivePostalCode ATTRIBUTE ::= {
+ SUBTYPE OF postalCode
+ COLLECTIVE TRUE
+ ID id-at-collectivePostalCode
+}
+
+postOfficeBox ATTRIBUTE ::= {
+ WITH SYNTAX UnboundedDirectoryString
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreSubstringsMatch
+ ID id-at-postOfficeBox
+}
+
+collectivePostOfficeBox ATTRIBUTE ::= {
+ SUBTYPE OF postOfficeBox
+ COLLECTIVE TRUE
+ ID id-at-collectivePostOfficeBox
+}
+
+physicalDeliveryOfficeName ATTRIBUTE ::= {
+ WITH SYNTAX UnboundedDirectoryString
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreSubstringsMatch
+ ID id-at-physicalDeliveryOfficeName
+}
+
+collectivePhysicalDeliveryOfficeName ATTRIBUTE ::= {
+ SUBTYPE OF physicalDeliveryOfficeName
+ COLLECTIVE TRUE
+ ID id-at-collectivePhysicalDeliveryOfficeName
+}
+
+telephoneNumber ATTRIBUTE ::= {
+ WITH SYNTAX TelephoneNumber
+ EQUALITY MATCHING RULE telephoneNumberMatch
+ SUBSTRINGS MATCHING RULE telephoneNumberSubstringsMatch
+ ID id-at-telephoneNumber
+}
+
+TelephoneNumber ::= PrintableString(SIZE (1..ub-telephone-number))
+
+-- String complying with ITU-T Rec. E.123 only
+ub-telephone-number INTEGER ::=
+ 32
+
+collectiveTelephoneNumber ATTRIBUTE ::= {
+ SUBTYPE OF telephoneNumber
+ COLLECTIVE TRUE
+ ID id-at-collectiveTelephoneNumber
+}
+
+telexNumber ATTRIBUTE ::= {
+ WITH SYNTAX TelexNumber
+ ID id-at-telexNumber
+}
+
+TelexNumber ::= SEQUENCE {
+ telexNumber PrintableString(SIZE (1..ub-telex-number)),
+ countryCode PrintableString(SIZE (1..ub-country-code)),
+ answerback PrintableString(SIZE (1..ub-answerback))
+}
+
+ub-telex-number INTEGER ::= 14
+
+ub-country-code INTEGER ::= 4
+
+ub-answerback INTEGER ::= 8
+
+collectiveTelexNumber ATTRIBUTE ::= {
+ SUBTYPE OF telexNumber
+ COLLECTIVE TRUE
+ ID id-at-collectiveTelexNumber
+}
+
+facsimileTelephoneNumber ATTRIBUTE ::= {
+ WITH SYNTAX FacsimileTelephoneNumber
+ EQUALITY MATCHING RULE facsimileNumberMatch
+ SUBSTRINGS MATCHING RULE facsimileNumberSubstringsMatch
+ ID id-at-facsimileTelephoneNumber
+}
+
+FacsimileTelephoneNumber ::= SEQUENCE {
+ telephoneNumber TelephoneNumber,
+ parameters G3FacsimileNonBasicParameters OPTIONAL
+}
+
+collectiveFacsimileTelephoneNumber ATTRIBUTE ::= {
+ SUBTYPE OF facsimileTelephoneNumber
+ COLLECTIVE TRUE
+ ID id-at-collectiveFacsimileTelephoneNumber
+}
+
+x121Address ATTRIBUTE ::= {
+ WITH SYNTAX X121Address
+ EQUALITY MATCHING RULE numericStringMatch
+ SUBSTRINGS MATCHING RULE numericStringSubstringsMatch
+ ID id-at-x121Address
+}
+
+X121Address ::= NumericString(SIZE (1..ub-x121-address))
+
+-- String as defined by ITU-T Rec. X.121
+ub-x121-address INTEGER ::= 15
+
+internationalISDNNumber ATTRIBUTE ::= {
+ WITH SYNTAX InternationalISDNNumber
+ EQUALITY MATCHING RULE numericStringMatch
+ SUBSTRINGS MATCHING RULE numericStringSubstringsMatch
+ ID id-at-internationalISDNNumber
+}
+
+InternationalISDNNumber ::=
+ NumericString(SIZE (1..ub-international-isdn-number))
+
+-- String complying with ITU-T Rec. E.164 only
+ub-international-isdn-number INTEGER ::=
+ 16
+
+collectiveInternationalISDNNumber ATTRIBUTE ::= {
+ SUBTYPE OF internationalISDNNumber
+ COLLECTIVE TRUE
+ ID id-at-collectiveInternationalISDNNumber
+}
+
+registeredAddress ATTRIBUTE ::= {
+ SUBTYPE OF postalAddress
+ WITH SYNTAX PostalAddress
+ ID id-at-registeredAddress
+}
+
+destinationIndicator ATTRIBUTE ::= {
+ WITH SYNTAX DestinationIndicator
+ EQUALITY MATCHING RULE caseIgnoreMatch
+ SUBSTRINGS MATCHING RULE caseIgnoreSubstringsMatch
+ ID id-at-destinationIndicator
+}
+
+DestinationIndicator ::= PrintableString(SIZE (1..MAX))
+
+-- alphabetical characters only
+communicationsService ATTRIBUTE ::= {
+ WITH SYNTAX CommunicationsService
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ ID id-at-communicationsService
+}
+
+CommunicationsService ::= OBJECT IDENTIFIER
+
+communicationsNetwork ATTRIBUTE ::= {
+ WITH SYNTAX CommunicationsNetwork
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ SINGLE VALUE TRUE
+ ID id-at-communicationsNetwork
+}
+
+CommunicationsNetwork ::= OBJECT IDENTIFIER
+
+preferredDeliveryMethod ATTRIBUTE ::= {
+ WITH SYNTAX PreferredDeliveryMethod
+ SINGLE VALUE TRUE
+ ID id-at-preferredDeliveryMethod
+}
+
+PreferredDeliveryMethod ::=
+ SEQUENCE OF
+ INTEGER {any-delivery-method(0), mhs-delivery(1), physical-delivery(2),
+ telex-delivery(3), teletex-delivery(4), g3-facsimile-delivery(5),
+ g4-facsimile-delivery(6), ia5-terminal-delivery(7),
+ videotex-delivery(8), telephone-delivery(9)}
+
+presentationAddress ATTRIBUTE ::= {
+ WITH SYNTAX PresentationAddress
+ EQUALITY MATCHING RULE presentationAddressMatch
+ SINGLE VALUE TRUE
+ ID id-at-presentationAddress
+}
+
+PresentationAddress ::= SEQUENCE {
+ pSelector [0] OCTET STRING OPTIONAL,
+ sSelector [1] OCTET STRING OPTIONAL,
+ tSelector [2] OCTET STRING OPTIONAL,
+ nAddresses [3] SET SIZE (1..MAX) OF OCTET STRING
+}
+
+supportedApplicationContext ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT IDENTIFIER
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ ID id-at-supportedApplicationContext
+}
+
+protocolInformation ATTRIBUTE ::= {
+ WITH SYNTAX ProtocolInformation
+ EQUALITY MATCHING RULE protocolInformationMatch
+ ID id-at-protocolInformation
+}
+
+ProtocolInformation ::= SEQUENCE {
+ nAddress OCTET STRING,
+ profiles SET OF OBJECT IDENTIFIER
+}
+
+distinguishedName ATTRIBUTE ::= {
+ WITH SYNTAX DistinguishedName
+ EQUALITY MATCHING RULE distinguishedNameMatch
+ ID id-at-distinguishedName
+}
+
+member ATTRIBUTE ::= {SUBTYPE OF distinguishedName
+ ID id-at-member
+}
+
+uniqueMember ATTRIBUTE ::= {
+ WITH SYNTAX NameAndOptionalUID
+ EQUALITY MATCHING RULE uniqueMemberMatch
+ ID id-at-uniqueMember
+}
+
+NameAndOptionalUID ::= SEQUENCE {
+ dn DistinguishedName,
+ uid UniqueIdentifier OPTIONAL
+}
+
+owner ATTRIBUTE ::= {SUBTYPE OF distinguishedName
+ ID id-at-owner
+}
+
+roleOccupant ATTRIBUTE ::= {
+ SUBTYPE OF distinguishedName
+ ID id-at-roleOccupant
+}
+
+seeAlso ATTRIBUTE ::= {SUBTYPE OF distinguishedName
+ ID id-at-seeAlso
+}
+
+dmdName ATTRIBUTE ::= {
+ SUBTYPE OF name
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-dmdName
+}
+
+-- Attributes for tag-based identification
+tagOid ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT IDENTIFIER
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ SINGLE VALUE TRUE
+ ID id-at-tagOid
+}
+
+uiiFormat ATTRIBUTE ::= {
+ WITH SYNTAX UnboundedDirectoryString
+ SINGLE VALUE TRUE
+ ID id-at-uiiFormat
+}
+
+uiiInUrn ATTRIBUTE ::= {
+ WITH SYNTAX UTF8String
+ EQUALITY MATCHING RULE caseExactMatch
+ SINGLE VALUE TRUE
+ ID id-at-uiiInUrn
+}
+
+contentUri ATTRIBUTE ::= {
+ WITH SYNTAX UnboundedDirectoryString
+ ID id-at-contentUri
+}
+
+-- Notification attributes
+dSAProblem ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT IDENTIFIER
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ ID id-not-dSAProblem
+}
+
+searchServiceProblem ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT IDENTIFIER
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ SINGLE VALUE TRUE
+ ID id-not-searchServiceProblem
+}
+
+serviceType ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT IDENTIFIER
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ SINGLE VALUE TRUE
+ ID id-not-serviceType
+}
+
+attributeTypeList ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT IDENTIFIER
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ ID id-not-attributeTypeList
+}
+
+matchingRuleList ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT IDENTIFIER
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ ID id-not-matchingRuleList
+}
+
+filterItem ATTRIBUTE ::= {
+ WITH SYNTAX FilterItem
+ ID id-not-filterItem
+}
+
+attributeCombinations ATTRIBUTE ::= {
+ WITH SYNTAX AttributeCombination
+ ID id-not-attributeCombinations
+}
+
+contextTypeList ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT IDENTIFIER
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ ID id-not-contextTypeList
+}
+
+contextList ATTRIBUTE ::= {
+ WITH SYNTAX ContextAssertion
+ ID id-not-contextList
+}
+
+contextCombinations ATTRIBUTE ::= {
+ WITH SYNTAX ContextCombination
+ ID id-not-contextCombinations
+}
+
+hierarchySelectList ATTRIBUTE ::= {
+ WITH SYNTAX HierarchySelections
+ SINGLE VALUE TRUE
+ ID id-not-hierarchySelectList
+}
+
+searchControlOptionsList ATTRIBUTE ::= {
+ WITH SYNTAX SearchControlOptions
+ SINGLE VALUE TRUE
+ ID id-not-searchControlOptionsList
+}
+
+serviceControlOptionsList ATTRIBUTE ::= {
+ WITH SYNTAX ServiceControlOptions
+ SINGLE VALUE TRUE
+ ID id-not-serviceControlOptionsList
+}
+
+multipleMatchingLocalities ATTRIBUTE ::= {
+ WITH SYNTAX MultipleMatchingLocalities
+ ID id-not-multipleMatchingLocalities
+}
+
+MultipleMatchingLocalities ::= SEQUENCE {
+ matchingRuleUsed MATCHING-RULE.&id OPTIONAL,
+ attributeList SEQUENCE OF AttributeValueAssertion
+}
+
+proposedRelaxation ATTRIBUTE ::= {
+ WITH SYNTAX MRMappings
+ ID id-not-proposedRelaxation
+}
+
+MRMappings ::= SEQUENCE OF MRMapping
+
+appliedRelaxation ATTRIBUTE ::= {
+ WITH SYNTAX OBJECT IDENTIFIER
+ EQUALITY MATCHING RULE objectIdentifierMatch
+ ID id-not-appliedRelaxation
+}
+
+-- Matching rules
+caseExactMatch MATCHING-RULE ::= {
+ SYNTAX UnboundedDirectoryString
+ ID id-mr-caseExactMatch
+}
+
+caseIgnoreMatch MATCHING-RULE ::= {
+ SYNTAX UnboundedDirectoryString
+ ID id-mr-caseIgnoreMatch
+}
+
+caseExactOrderingMatch MATCHING-RULE ::= {
+ SYNTAX UnboundedDirectoryString
+ ID id-mr-caseExactOrderingMatch
+}
+
+caseIgnoreOrderingMatch MATCHING-RULE ::= {
+ SYNTAX UnboundedDirectoryString
+ ID id-mr-caseIgnoreOrderingMatch
+}
+
+caseExactSubstringsMatch MATCHING-RULE ::= {
+ SYNTAX SubstringAssertion -- only the PrintableString choice
+ ID id-mr-caseExactSubstringsMatch
+}
+
+caseIgnoreSubstringsMatch MATCHING-RULE ::= {
+ SYNTAX SubstringAssertion
+ ID id-mr-caseIgnoreSubstringsMatch
+}
+
+SubstringAssertion ::=
+ SEQUENCE OF
+ CHOICE {initial [0] UnboundedDirectoryString,
+ any [1] UnboundedDirectoryString,
+ final [2] UnboundedDirectoryString,
+ control Attribute{{SupportedAttributes}}
+ } -- Used to specify interpretation of the following items
+
+-- at most one initial and one final component
+numericStringMatch MATCHING-RULE ::= {
+ SYNTAX NumericString
+ ID id-mr-numericStringMatch
+}
+
+numericStringOrderingMatch MATCHING-RULE ::= {
+ SYNTAX NumericString
+ ID id-mr-numericStringOrderingMatch
+}
+
+numericStringSubstringsMatch MATCHING-RULE ::= {
+ SYNTAX SubstringAssertion
+ ID id-mr-numericStringSubstringsMatch
+}
+
+caseIgnoreListMatch MATCHING-RULE ::= {
+ SYNTAX CaseIgnoreList
+ ID id-mr-caseIgnoreListMatch
+}
+
+CaseIgnoreList ::= SEQUENCE OF UnboundedDirectoryString
+
+caseIgnoreListSubstringsMatch MATCHING-RULE ::= {
+ SYNTAX SubstringAssertion
+ ID id-mr-caseIgnoreListSubstringsMatch
+}
+
+storedPrefixMatch MATCHING-RULE ::= {
+ SYNTAX UnboundedDirectoryString
+ ID id-mr-storedPrefixMatch
+}
+
+booleanMatch MATCHING-RULE ::= {SYNTAX BOOLEAN
+ ID id-mr-booleanMatch
+}
+
+integerMatch MATCHING-RULE ::= {SYNTAX INTEGER
+ ID id-mr-integerMatch
+}
+
+integerOrderingMatch MATCHING-RULE ::= {
+ SYNTAX INTEGER
+ ID id-mr-integerOrderingMatch
+}
+
+bitStringMatch MATCHING-RULE ::= {
+ SYNTAX BIT STRING
+ ID id-mr-bitStringMatch
+}
+
+octetStringMatch MATCHING-RULE ::= {
+ SYNTAX OCTET STRING
+ ID id-mr-octetStringMatch
+}
+
+octetStringOrderingMatch MATCHING-RULE ::= {
+ SYNTAX OCTET STRING
+ ID id-mr-octetStringOrderingMatch
+}
+
+octetStringSubstringsMatch MATCHING-RULE ::= {
+ SYNTAX OctetSubstringAssertion
+ ID id-mr-octetStringSubstringsMatch
+}
+
+OctetSubstringAssertion ::=
+ SEQUENCE OF
+ CHOICE {initial [0] OCTET STRING,
+ any [1] OCTET STRING,
+ final [2] OCTET STRING}
+
+-- at most one initial and one final component
+telephoneNumberMatch MATCHING-RULE ::= {
+ SYNTAX TelephoneNumber
+ ID id-mr-telephoneNumberMatch
+}
+
+telephoneNumberSubstringsMatch MATCHING-RULE ::= {
+ SYNTAX SubstringAssertion
+ ID id-mr-telephoneNumberSubstringsMatch
+}
+
+presentationAddressMatch MATCHING-RULE ::= {
+ SYNTAX PresentationAddress
+ ID id-mr-presentationAddressMatch
+}
+
+uniqueMemberMatch MATCHING-RULE ::= {
+ SYNTAX NameAndOptionalUID
+ ID id-mr-uniqueMemberMatch
+}
+
+protocolInformationMatch MATCHING-RULE ::= {
+ SYNTAX OCTET STRING
+ ID id-mr-protocolInformationMatch
+}
+
+facsimileNumberMatch MATCHING-RULE ::= {
+ SYNTAX TelephoneNumber
+ ID id-mr-facsimileNumberMatch
+}
+
+facsimileNumberSubstringsMatch MATCHING-RULE ::= {
+ SYNTAX SubstringAssertion
+ ID id-mr-facsimileNumberSubstringsMatch
+}
+
+uUIDPairMatch MATCHING-RULE ::= {SYNTAX UUIDPair
+ ID id-mr-uuidpairmatch
+}
+
+uTCTimeMatch MATCHING-RULE ::= {SYNTAX UTCTime
+ ID id-mr-uTCTimeMatch
+}
+
+uTCTimeOrderingMatch MATCHING-RULE ::= {
+ SYNTAX UTCTime
+ ID id-mr-uTCTimeOrderingMatch
+}
+
+generalizedTimeMatch MATCHING-RULE ::= {
+ SYNTAX GeneralizedTime
+ -- as per 46.3 b) or c) of ITU-T Rec. X.680 | ISO/IEC 8824-1
+ ID id-mr-generalizedTimeMatch
+}
+
+generalizedTimeOrderingMatch MATCHING-RULE ::= {
+ SYNTAX GeneralizedTime
+ -- as per 46.3 b) or c) of ITU-T Rec. X.680 | ISO/IEC 8824-1
+ ID id-mr-generalizedTimeOrderingMatch
+}
+
+systemProposedMatch MATCHING-RULE ::= {ID id-mr-systemProposedMatch
+}
+
+integerFirstComponentMatch MATCHING-RULE ::= {
+ SYNTAX INTEGER
+ ID id-mr-integerFirstComponentMatch
+}
+
+objectIdentifierFirstComponentMatch MATCHING-RULE ::= {
+ SYNTAX OBJECT IDENTIFIER
+ ID id-mr-objectIdentifierFirstComponentMatch
+}
+
+directoryStringFirstComponentMatch MATCHING-RULE ::= {
+ SYNTAX UnboundedDirectoryString
+ ID id-mr-directoryStringFirstComponentMatch
+}
+
+wordMatch MATCHING-RULE ::= {
+ SYNTAX UnboundedDirectoryString
+ ID id-mr-wordMatch
+}
+
+keywordMatch MATCHING-RULE ::= {
+ SYNTAX UnboundedDirectoryString
+ ID id-mr-keywordMatch
+}
+
+generalWordMatch MATCHING-RULE ::= {
+ SYNTAX SubstringAssertion
+ ID id-mr-generalWordMatch
+}
+
+sequenceMatchType ATTRIBUTE ::= {
+ WITH SYNTAX SequenceMatchType
+ SINGLE VALUE TRUE
+ ID id-cat-sequenceMatchType
+} -- defaulting to sequenceExact
+
+SequenceMatchType ::= ENUMERATED {
+ sequenceExact(0), sequenceDeletion(1), sequenceRestrictedDeletion(2),
+ sequencePermutation(3), sequencePermutationAndDeletion(4),
+ sequenceProviderDefined(5)}
+
+wordMatchTypes ATTRIBUTE ::= {
+ WITH SYNTAX WordMatchTypes
+ SINGLE VALUE TRUE
+ ID id-cat-wordMatchType
+} -- defaulting to wordExact
+
+WordMatchTypes ::= ENUMERATED {
+ wordExact(0), wordTruncated(1), wordPhonetic(2), wordProviderDefined(3)
+}
+
+characterMatchTypes ATTRIBUTE ::= {
+ WITH SYNTAX CharacterMatchTypes
+ SINGLE VALUE TRUE
+ ID id-cat-characterMatchTypes
+}
+
+CharacterMatchTypes ::= ENUMERATED {
+ characterExact(0), characterCaseIgnore(1), characterMapped(2)}
+
+selectedContexts ATTRIBUTE ::= {
+ WITH SYNTAX ContextAssertion
+ ID id-cat-selectedContexts
+}
+
+approximateStringMatch MATCHING-RULE ::= {ID id-mr-approximateStringMatch
+}
+
+ignoreIfAbsentMatch MATCHING-RULE ::= {ID id-mr-ignoreIfAbsentMatch
+}
+
+nullMatch MATCHING-RULE ::= {ID id-mr-nullMatch
+}
+
+ZONAL-MATCHING ::=
+ MAPPING-BASED-MATCHING{ZonalSelect, TRUE, ZonalResult, zonalMatch.&id}
+
+ZonalSelect ::= SEQUENCE OF AttributeType
+
+ZonalResult ::= ENUMERATED {
+ cannot-select-mapping(0), zero-mappings(2), multiple-mappings(3)}
+
+zonalMatch MATCHING-RULE ::= {
+ UNIQUE-MATCH-INDICATOR multipleMatchingLocalities
+ ID id-mr-zonalMatch
+}
+
+-- Contexts
+languageContext CONTEXT ::= {
+ WITH SYNTAX LanguageContextSyntax
+ ID id-avc-language
+}
+
+LanguageContextSyntax ::= PrintableString(SIZE (2..3)) -- ISO 639-2 codes only
+
+
+temporalContext CONTEXT ::= {
+ WITH SYNTAX TimeSpecification
+ ASSERTED AS TimeAssertion
+ ID id-avc-temporal
+}
+
+TimeSpecification ::= SEQUENCE {
+ time
+ CHOICE {absolute
+ SEQUENCE {startTime [0] GeneralizedTime OPTIONAL,
+ endTime [1] GeneralizedTime OPTIONAL},
+ periodic SET SIZE (1..MAX) OF Period},
+ notThisTime BOOLEAN DEFAULT FALSE,
+ timeZone TimeZone OPTIONAL
+}
+
+Period ::= SEQUENCE {
+ timesOfDay [0] SET SIZE (1..MAX) OF DayTimeBand OPTIONAL,
+ days
+ [1] CHOICE {intDay SET OF INTEGER,
+ bitDay
+ BIT STRING {sunday(0), monday(1), tuesday(2), wednesday(3),
+ thursday(4), friday(5), saturday(6)},
+ dayOf XDayOf} OPTIONAL,
+ weeks
+ [2] CHOICE {allWeeks NULL,
+ intWeek SET OF INTEGER,
+ bitWeek
+ BIT STRING {week1(0), week2(1), week3(2), week4(3), week5(4)}
+ } OPTIONAL,
+ months
+ [3] CHOICE {allMonths NULL,
+ intMonth SET OF INTEGER,
+ bitMonth
+ BIT STRING {january(0), february(1), march(2), april(3),
+ may(4), june(5), july(6), august(7),
+ september(8), october(9), november(10),
+ december(11)}} OPTIONAL,
+ years [4] SET OF INTEGER(1000..MAX) OPTIONAL
+}
+
+XDayOf ::= CHOICE {
+ first [1] NamedDay,
+ second [2] NamedDay,
+ third [3] NamedDay,
+ fourth [4] NamedDay,
+ fifth [5] NamedDay
+}
+
+NamedDay ::= CHOICE {
+ intNamedDays
+ ENUMERATED {sunday(1), monday(2), tuesday(3), wednesday(4), thursday(5),
+ friday(6), saturday(7)},
+ bitNamedDays
+ BIT STRING {sunday(0), monday(1), tuesday(2), wednesday(3), thursday(4),
+ friday(5), saturday(6)}
+}
+
+DayTimeBand ::= SEQUENCE {
+ startDayTime [0] DayTime DEFAULT {hour 0},
+ endDayTime [1] DayTime DEFAULT {hour 23, minute 59, second 59}
+}
+
+DayTime ::= SEQUENCE {
+ hour [0] INTEGER(0..23),
+ minute [1] INTEGER(0..59) DEFAULT 0,
+ second [2] INTEGER(0..59) DEFAULT 0
+}
+
+TimeZone ::= INTEGER(-12..12)
+
+TimeAssertion ::= CHOICE {
+ now NULL,
+ at GeneralizedTime,
+ between
+ SEQUENCE {startTime [0] GeneralizedTime,
+ endTime [1] GeneralizedTime OPTIONAL,
+ entirely BOOLEAN DEFAULT FALSE}
+}
+
+localeContext CONTEXT ::= {
+ WITH SYNTAX LocaleContextSyntax
+ ID id-avc-locale
+}
+
+LocaleContextSyntax ::= CHOICE {
+ localeID1 OBJECT IDENTIFIER,
+ localeID2 UnboundedDirectoryString
+}
+
+ldapAttributeOptionContext CONTEXT ::= {
+ WITH SYNTAX AttributeOptionList
+ ASSERTED AS AttributeOptionList
+ ABSENT-MATCH FALSE
+ ID id-avc-ldapAttributeOption
+}
+
+AttributeOptionList ::= SEQUENCE OF UTF8String
+
+-- Object identifier assignments
+-- object identifiers assigned in other modules are shown in comments
+-- Attributes
+-- id-at-objectClass OBJECT IDENTIFIER ::= {id-at 0}
+-- id-at-aliasedEntryName OBJECT IDENTIFIER ::= {id-at 1}
+-- id-at-encryptedAliasedEntryName OBJECT IDENTIFIER ::= {id-at 1 2}
+id-at-knowledgeInformation OBJECT IDENTIFIER ::=
+ {id-at 2}
+
+id-at-commonName OBJECT IDENTIFIER ::= {id-at 3}
+
+-- id-at-encryptedCommonName OBJECT IDENTIFIER ::= {id-at 3 2}
+id-at-surname OBJECT IDENTIFIER ::=
+ {id-at 4}
+
+-- id-at-encryptedSurname OBJECT IDENTIFIER ::= {id-at 4 2}
+id-at-serialNumber OBJECT IDENTIFIER ::=
+ {id-at 5}
+
+-- id-at-encryptedSerialNumbe r OBJECT IDENTIFIER ::= {id-at 5 2}
+id-at-countryName OBJECT IDENTIFIER ::=
+ {id-at 6}
+
+-- id-at-encryptedCountryName OBJECT IDENTIFIER ::= {id-at 6 2}
+id-at-localityName OBJECT IDENTIFIER ::=
+ {id-at 7}
+
+-- id-at-encryptedLocalityName OBJECT IDENTIFIER ::= {id-at 7 2}
+id-at-collectiveLocalityName OBJECT IDENTIFIER ::=
+ {id-at 7 1}
+
+-- id-at-encryptedCollectiveLocalityName OBJECT IDENTIFIER ::= {id-at 7 1 2}
+id-at-stateOrProvinceName OBJECT IDENTIFIER ::=
+ {id-at 8}
+
+-- id-at-encryptedStateOrProvinceName OBJECT IDENTIFIER ::= {id-at 8 2}
+id-at-collectiveStateOrProvinceName OBJECT IDENTIFIER ::=
+ {id-at 8 1}
+
+-- id-at-encryptedCollectiveStateOrProvinceName OBJECT IDENTIFIER ::= {id-at 8 1 2}
+id-at-streetAddress OBJECT IDENTIFIER ::=
+ {id-at 9}
+
+-- id-at-encryptedStreetAddress OBJECT IDENTIFIER ::= {id-at 9 2}
+id-at-collectiveStreetAddress OBJECT IDENTIFIER ::=
+ {id-at 9 1}
+
+-- id-at-encryptedCollectiveStreetAddress OBJECT IDENTIFIER ::= {id-at 9 1 2}
+id-at-organizationName OBJECT IDENTIFIER ::=
+ {id-at 10}
+
+-- id-at-encryptedOrganizationName OBJECT IDENTIFIER ::= {id-at 10 2}
+id-at-collectiveOrganizationName OBJECT IDENTIFIER ::=
+ {id-at 10 1}
+
+-- id-at-encryptedCollectiveOrganizationName OBJECT IDENTIFIER ::= {id-at 10 1 2}
+id-at-organizationalUnitName OBJECT IDENTIFIER ::=
+ {id-at 11}
+
+-- id-at-encryptedOrganizationalUnitName OBJECT IDENTIFIER ::= {id-at 11 2}
+id-at-collectiveOrganizationalUnitName OBJECT IDENTIFIER ::=
+ {id-at 11 1}
+
+-- id-at-encryptedCollectiveOrganizationalUnitNam OBJECT IDENTIFIER ::= {id-at 11 1 2}
+id-at-title OBJECT IDENTIFIER ::=
+ {id-at 12}
+
+-- id-at-encryptedTitle OBJECT IDENTIFIER ::= {id-at 12 2}
+id-at-description OBJECT IDENTIFIER ::=
+ {id-at 13}
+
+-- id-at-encryptedDescription OBJECT IDENTIFIER ::= {id-at 13 2}
+id-at-searchGuide OBJECT IDENTIFIER ::=
+ {id-at 14}
+
+-- id-at-encryptedSearchGuide OBJECT IDENTIFIER ::= {id-at 14 2}
+id-at-businessCategory OBJECT IDENTIFIER ::=
+ {id-at 15}
+
+-- id-at-encryptedBusinessCategory OBJECT IDENTIFIER ::= {id-at 15 2}
+id-at-postalAddress OBJECT IDENTIFIER ::=
+ {id-at 16}
+
+-- id-at-encryptedPostalAddress OBJECT IDENTIFIER ::= {id-at 16 2}
+id-at-collectivePostalAddress OBJECT IDENTIFIER ::=
+ {id-at 16 1}
+
+-- id-at-encryptedCollectivePostalAddress OBJECT IDENTIFIER ::= {id-at 16 1 2}
+id-at-postalCode OBJECT IDENTIFIER ::=
+ {id-at 17}
+
+-- id-at-encryptedPostalCode OBJECT IDENTIFIER ::= {id-at 17 2}
+id-at-collectivePostalCode OBJECT IDENTIFIER ::=
+ {id-at 17 1}
+
+-- id-at-encryptedCollectivePostalCode OBJECT IDENTIFIER ::= {id-at 17 1 2}
+id-at-postOfficeBox OBJECT IDENTIFIER ::=
+ {id-at 18}
+
+id-at-collectivePostOfficeBox OBJECT IDENTIFIER ::= {id-at 18 1}
+
+-- id-at-encryptedPostOfficeBox OBJECT IDENTIFIER ::= {id-at 18 2}
+-- id-at-encryptedCollectivePostOfficeBox OBJECT IDENTIFIER ::= {id-at 18 1 2}
+id-at-physicalDeliveryOfficeName OBJECT IDENTIFIER ::=
+ {id-at 19}
+
+id-at-collectivePhysicalDeliveryOfficeName OBJECT IDENTIFIER ::= {id-at 19 1}
+
+-- id-at-encryptedPhysicalDeliveryOfficeName OBJECT IDENTIFIER ::= {id-at 19 2}
+-- id-at-encryptedCollectivePhysicalDeliveryOfficeName OBJECT IDENTIFIER ::= {id-at 19 1 2}
+id-at-telephoneNumber OBJECT IDENTIFIER ::=
+ {id-at 20}
+
+-- id-at-encryptedTelephoneNumber OBJECT IDENTIFIER ::= {id-at 20 2}
+id-at-collectiveTelephoneNumber OBJECT IDENTIFIER ::=
+ {id-at 20 1}
+
+-- id-at-encryptedCollectiveTelephoneNumber OBJECT IDENTIFIER ::= {id-at 20 1 2}
+id-at-telexNumber OBJECT IDENTIFIER ::=
+ {id-at 21}
+
+-- id-at-encryptedTelexNumber OBJECT IDENTIFIER ::= {id-at 21 2}
+id-at-collectiveTelexNumber OBJECT IDENTIFIER ::=
+ {id-at 21 1}
+
+-- id-at-encryptedCollectiveTelexNumber OBJECT IDENTIFIER ::= {id-at 21 1 2}
+-- id-at-teletexTerminalIdentifier OBJECT IDENTIFIER ::= {id-at 22}
+-- id-at-encryptedTeletexTerminalIdentifier OBJECT IDENTIFIER ::= {id-at 22 2}
+-- id-at-collectiveTeletexTerminalIdentifier OBJECT IDENTIFIER ::= {id-at 22 1}
+-- id-at-encryptedCollectiveTeletexTerminalIdentifier OBJECT IDENTIFIER ::= {id-at 22 1 2}
+id-at-facsimileTelephoneNumber OBJECT IDENTIFIER ::=
+ {id-at 23}
+
+-- id-at-encryptedFacsimileTelephoneNumber OBJECT IDENTIFIER ::= {id-at 23 2}
+id-at-collectiveFacsimileTelephoneNumber OBJECT IDENTIFIER ::=
+ {id-at 23 1}
+
+-- id-at-encryptedCollectiveFacsimileTelephoneNumber OBJECT IDENTIFIER ::= {id-at 23 1 2}
+id-at-x121Address OBJECT IDENTIFIER ::=
+ {id-at 24}
+
+-- id-at-encryptedX121Address OBJECT IDENTIFIER ::= {id-at 24 2}
+id-at-internationalISDNNumber OBJECT IDENTIFIER ::=
+ {id-at 25}
+
+-- id-at-encryptedInternationalISDNNumber OBJECT IDENTIFIER ::= {id-at 25 2}
+id-at-collectiveInternationalISDNNumber OBJECT IDENTIFIER ::=
+ {id-at 25 1}
+
+-- id-at-encryptedCollectiveInternationalISDNNumber OBJECT IDENTIFIER ::= {id-at 25 1 2}
+id-at-registeredAddress OBJECT IDENTIFIER ::=
+ {id-at 26}
+
+-- id-at-encryptedRegisteredAddress OBJECT IDENTIFIER ::= {id-at 26 2}
+id-at-destinationIndicator OBJECT IDENTIFIER ::=
+ {id-at 27}
+
+-- id-at-encryptedDestinationIndicator OBJECT IDENTIFIER ::= {id-at 27 2}
+id-at-preferredDeliveryMethod OBJECT IDENTIFIER ::=
+ {id-at 28}
+
+-- id-at-encryptedPreferredDeliveryMethod OBJECT IDENTIFIER ::= {id-at 28 2}
+id-at-presentationAddress OBJECT IDENTIFIER ::=
+ {id-at 29}
+
+-- id-at-encryptedPresentationAddress OBJECT IDENTIFIER ::= {id-at 29 2}
+id-at-supportedApplicationContext OBJECT IDENTIFIER ::=
+ {id-at 30}
+
+-- id-at-encryptedSupportedApplicationContext OBJECT IDENTIFIER ::= {id-at 30 2}
+id-at-member OBJECT IDENTIFIER ::=
+ {id-at 31}
+
+-- id-at-encryptedMember OBJECT IDENTIFIER ::= {id-at 31 2}
+id-at-owner OBJECT IDENTIFIER ::=
+ {id-at 32}
+
+-- id-at-encryptedOwner OBJECT IDENTIFIER ::= {id-at 32 2}
+id-at-roleOccupant OBJECT IDENTIFIER ::=
+ {id-at 33}
+
+-- id-at-encryptedRoleOccupant OBJECT IDENTIFIER ::= {id-at 33 2}
+id-at-seeAlso OBJECT IDENTIFIER ::=
+ {id-at 34}
+
+-- id-at-encryptedSeeAlso OBJECT IDENTIFIER ::= {id-at 34 2}
+-- id-at-userPassword OBJECT IDENTIFIER ::= {id-at 35} X.509|Part8
+-- id-at-encryptedUserPassword OBJECT IDENTIFIER ::= {id-at 35 2}
+-- id-at-userCertificate OBJECT IDENTIFIER ::= {id-at 36} X.509|Part8
+-- id-at-encryptedUserCertificate OBJECT IDENTIFIER ::= {id-at 36 2}
+-- id-at-cACertificate OBJECT IDENTIFIER ::= {id-at 37} X.509|Part8
+-- id-at-encryptedCACertificate OBJECT IDENTIFIER ::= {id-at 37 2}
+-- id-at-authorityRevocationList OBJECT IDENTIFIER ::= {id-at 38} X.509|Part8
+-- id-at-encryptedAuthorityRevocationList OBJECT IDENTIFIER ::= {id-at 38 2}
+-- id-at-certificateRevocationList OBJECT IDENTIFIER ::= {id-at 39} X.509|Part8
+-- id-at-encryptedCertificateRevocationList OBJECT IDENTIFIER ::= {id-at 39 2}
+-- id-at-crossCertificatePair OBJECT IDENTIFIER ::= {id-at 40} X.509|Part8
+-- id-at-encryptedCrossCertificatePair OBJECT IDENTIFIER ::= {id-at 40 2}
+id-at-name OBJECT IDENTIFIER ::=
+ {id-at 41}
+
+id-at-givenName OBJECT IDENTIFIER ::= {id-at 42}
+
+-- id-at-encryptedGivenName OBJECT IDENTIFIER ::= {id-at 42 2}
+id-at-initials OBJECT IDENTIFIER ::=
+ {id-at 43}
+
+-- id-at-encryptedInitials OBJECT IDENTIFIER ::= {id-at 43 2}
+id-at-generationQualifier OBJECT IDENTIFIER ::=
+ {id-at 44}
+
+-- id-at-encryptedGenerationQualifier OBJECT IDENTIFIER ::= {id-at 44 2}
+id-at-uniqueIdentifier OBJECT IDENTIFIER ::=
+ {id-at 45}
+
+-- id-at-encryptedUniqueIdentifier OBJECT IDENTIFIER ::= {id-at 45 2}
+id-at-dnQualifier OBJECT IDENTIFIER ::=
+ {id-at 46}
+
+-- id-at-encryptedDnQualifier OBJECT IDENTIFIER ::= {id-at 46 2}
+id-at-enhancedSearchGuide OBJECT IDENTIFIER ::=
+ {id-at 47}
+
+-- id-at-encryptedEnhancedSearchGuide OBJECT IDENTIFIER ::= {id-at 47 2}
+id-at-protocolInformation OBJECT IDENTIFIER ::=
+ {id-at 48}
+
+-- id-at-encryptedProtocolInformation OBJECT IDENTIFIER ::= {id-at 48 2}
+id-at-distinguishedName OBJECT IDENTIFIER ::=
+ {id-at 49}
+
+-- id-at-encryptedDistinguishedName OBJECT IDENTIFIER ::= {id-at 49 2}
+id-at-uniqueMember OBJECT IDENTIFIER ::=
+ {id-at 50}
+
+-- id-at-encryptedUniqueMember OBJECT IDENTIFIER ::= {id-at 50 2}
+id-at-houseIdentifier OBJECT IDENTIFIER ::=
+ {id-at 51}
+
+-- id-at-encryptedHouseIdentifier OBJECT IDENTIFIER ::= {id-at 51 2}
+-- id-at-supportedAlgorithms OBJECT IDENTIFIER ::= {id-at 52} X.509|Part8
+-- id-at-encryptedSupportedAlgorithms OBJECT IDENTIFIER ::= {id-at 52 2}
+-- id-at-deltaRevocationList OBJECT IDENTIFIER ::= {id-at 53} X.509|Part8
+-- id-at-encryptedDeltaRevocationList OBJECT IDENTIFIER ::= {id-at 53 2}
+id-at-dmdName OBJECT IDENTIFIER ::=
+ {id-at 54}
+
+-- id-at-encryptedDmdName OBJECT IDENTIFIER ::= {id-at 54 2}
+-- id-at-clearance OBJECT IDENTIFIER ::= {id-at 55}
+-- id-at-encryptedClearance OBJECT IDENTIFIER ::= {id-at 55 2}
+-- id-at-defaultDirQop OBJECT IDENTIFIER ::= {id-at 56}
+-- id-at-encryptedDefaultDirQop OBJECT IDENTIFIER ::= {id-at 56 2}
+-- id-at-attributeIntegrityInfo OBJECT IDENTIFIER ::= {id-at 57}
+-- id-at-encryptedAttributeIntegrityInfo OBJECT IDENTIFIER ::= {id-at 57 2}
+-- id-at-attributeCertificate OBJECT IDENTIFIER ::= {id-at 58} X.509|Part8
+-- id-at-encryptedAttributeCertificate OBJECT IDENTIFIER ::= {id-at 58 2}
+-- id-at-attributeCertificateRevocationList OBJECT IDENTIFIER ::= {id-at 59} X.509|Part8
+-- id-at-encryptedAttributeCertificateRevocationList OBJECT IDENTIFIER ::= {id-at 59 2}
+-- id-at-confKeyInfo OBJECT IDENTIFIER ::= {id-at 60}
+-- id-at-encryptedConfKeyInfo OBJECT IDENTIFIER ::= {id-at 60 2}
+-- id-at-aACertificate OBJECT IDENTIFIER ::= {id-at 61} X.509|Part8
+-- id-at-attributeDescriptorCertificate OBJECT IDENTIFIER ::= {id-at 62} X.509|Part8
+-- id-at-attributeAuthorityRevocationList OBJECT IDENTIFIER ::= {id-at 63} X.509|Part8
+-- id-at-family-information OBJECT IDENTIFIER ::= {id-at 64}
+id-at-pseudonym OBJECT IDENTIFIER ::=
+ {id-at 65}
+
+id-at-communicationsService OBJECT IDENTIFIER ::= {id-at 66}
+
+id-at-communicationsNetwork OBJECT IDENTIFIER ::= {id-at 67}
+
+-- id-at-certificationPracticeStmt OBJECT IDENTIFIER ::= {id-at 68} X.509|Part8
+-- id-at-certificatePolicy OBJECT IDENTIFIER ::= {id-at 69} X.509|Part8
+-- id-at-pkiPath OBJECT IDENTIFIER ::= {id-at 70} X.509|Part8
+-- id-at-privPolicy OBJECT IDENTIFIER ::= {id-at 71} X.509|Part8
+-- id-at-role OBJECT IDENTIFIER ::= {id-at 72} X.509|Part8
+-- id-at-delegationPath OBJECT IDENTIFIER ::= {id-at 73} X.509|Part8
+-- id-at-protPrivPolicy OBJECT IDENTIFIER ::= {id-at 74} X.509|Part8
+-- id-at-xMLPrivilegeInfo OBJECT IDENTIFIER ::= {id-at 75} X.509|Part8
+-- id-at-xmlPrivPolicy OBJECT IDENTIFIER ::= {id-at 76} X.509|Part8
+id-at-uuidpair OBJECT IDENTIFIER ::=
+ {id-at 77}
+
+id-at-tagOid OBJECT IDENTIFIER ::= {id-at 78}
+
+id-at-uiiFormat OBJECT IDENTIFIER ::= {id-at 79}
+
+id-at-uiiInUrn OBJECT IDENTIFIER ::= {id-at 80}
+
+id-at-contentUri OBJECT IDENTIFIER ::= {id-at 81}
+
+-- id-at-permission OBJECT IDENTIFIER ::= {id-at 82} X.509|Part8
+-- Control attributes
+id-cat-sequenceMatchType OBJECT IDENTIFIER ::=
+ {id-cat 1}
+
+id-cat-wordMatchType OBJECT IDENTIFIER ::= {id-cat 2}
+
+id-cat-characterMatchTypes OBJECT IDENTIFIER ::= {id-cat 3}
+
+id-cat-selectedContexts OBJECT IDENTIFIER ::= {id-cat 4}
+
+-- Notification attributes
+id-not-dSAProblem OBJECT IDENTIFIER ::= {id-not 0}
+
+id-not-searchServiceProblem OBJECT IDENTIFIER ::= {id-not 1}
+
+id-not-serviceType OBJECT IDENTIFIER ::= {id-not 2}
+
+id-not-attributeTypeList OBJECT IDENTIFIER ::= {id-not 3}
+
+id-not-matchingRuleList OBJECT IDENTIFIER ::= {id-not 4}
+
+id-not-filterItem OBJECT IDENTIFIER ::= {id-not 5}
+
+id-not-attributeCombinations OBJECT IDENTIFIER ::= {id-not 6}
+
+id-not-contextTypeList OBJECT IDENTIFIER ::= {id-not 7}
+
+id-not-contextList OBJECT IDENTIFIER ::= {id-not 8}
+
+id-not-contextCombinations OBJECT IDENTIFIER ::= {id-not 9}
+
+id-not-hierarchySelectList OBJECT IDENTIFIER ::= {id-not 10}
+
+id-not-searchControlOptionsList OBJECT IDENTIFIER ::= {id-not 11}
+
+id-not-serviceControlOptionsList OBJECT IDENTIFIER ::= {id-not 12}
+
+id-not-multipleMatchingLocalities OBJECT IDENTIFIER ::= {id-not 13}
+
+id-not-proposedRelaxation OBJECT IDENTIFIER ::= {id-not 14}
+
+id-not-appliedRelaxation OBJECT IDENTIFIER ::= {id-not 15}
+
+-- Problem definitions
+id-pr-targetDsaUnavailable OBJECT IDENTIFIER ::=
+ {id-pr 1}
+
+id-pr-dataSourceUnavailable OBJECT IDENTIFIER ::= {id-pr 2}
+
+id-pr-unidentifiedOperation OBJECT IDENTIFIER ::= {id-pr 3}
+
+id-pr-unavailableOperation OBJECT IDENTIFIER ::= {id-pr 4}
+
+id-pr-searchAttributeViolation OBJECT IDENTIFIER ::= {id-pr 5}
+
+id-pr-searchAttributeCombinationViolation OBJECT IDENTIFIER ::= {id-pr 6}
+
+id-pr-searchValueNotAllowed OBJECT IDENTIFIER ::= {id-pr 7}
+
+id-pr-missingSearchAttribute OBJECT IDENTIFIER ::= {id-pr 8}
+
+id-pr-searchValueViolation OBJECT IDENTIFIER ::= {id-pr 9}
+
+id-pr-attributeNegationViolation OBJECT IDENTIFIER ::= {id-pr 10}
+
+id-pr-searchValueRequired OBJECT IDENTIFIER ::= {id-pr 11}
+
+id-pr-invalidSearchValue OBJECT IDENTIFIER ::= {id-pr 12}
+
+id-pr-searchContextViolation OBJECT IDENTIFIER ::= {id-pr 13}
+
+id-pr-searchContextCombinationViolation OBJECT IDENTIFIER ::= {id-pr 14}
+
+id-pr-missingSearchContext OBJECT IDENTIFIER ::= {id-pr 15}
+
+id-pr-searchContextValueViolation OBJECT IDENTIFIER ::= {id-pr 16}
+
+id-pr-searchContextValueRequired OBJECT IDENTIFIER ::= {id-pr 17}
+
+id-pr-invalidContextSearchValue OBJECT IDENTIFIER ::= {id-pr 18}
+
+id-pr-unsupportedMatchingRule OBJECT IDENTIFIER ::= {id-pr 19}
+
+id-pr-attributeMatchingViolation OBJECT IDENTIFIER ::= {id-pr 20}
+
+id-pr-unsupportedMatchingUse OBJECT IDENTIFIER ::= {id-pr 21}
+
+id-pr-matchingUseViolation OBJECT IDENTIFIER ::= {id-pr 22}
+
+id-pr-hierarchySelectForbidden OBJECT IDENTIFIER ::= {id-pr 23}
+
+id-pr-invalidHierarchySelect OBJECT IDENTIFIER ::= {id-pr 24}
+
+id-pr-unavailableHierarchySelect OBJECT IDENTIFIER ::= {id-pr 25}
+
+id-pr-invalidSearchControlOptions OBJECT IDENTIFIER ::= {id-pr 26}
+
+id-pr-invalidServiceControlOptions OBJECT IDENTIFIER ::= {id-pr 27}
+
+id-pr-searchSubsetViolation OBJECT IDENTIFIER ::= {id-pr 28}
+
+id-pr-unmatchedKeyAttributes OBJECT IDENTIFIER ::= {id-pr 29}
+
+id-pr-ambiguousKeyAttributes OBJECT IDENTIFIER ::= {id-pr 30}
+
+id-pr-unavailableRelaxationLevel OBJECT IDENTIFIER ::= {id-pr 31}
+
+id-pr-emptyHierarchySelection OBJECT IDENTIFIER ::= {id-pr 32}
+
+id-pr-administratorImposedLimit OBJECT IDENTIFIER ::= {id-pr 33}
+
+id-pr-permanentRestriction OBJECT IDENTIFIER ::= {id-pr 34}
+
+id-pr-temporaryRestriction OBJECT IDENTIFIER ::= {id-pr 35}
+
+id-pr-relaxationNotSupported OBJECT IDENTIFIER ::= {id-pr 36}
+
+-- Matching rules
+-- id-mr-objectIdentifierMatch OBJECT IDENTIFIER ::= {id-mr 0} X.501|Part2
+-- id-mr-distinguishedNameMatch OBJECT IDENTIFIER ::= {id-mr 1} X.501|Part2
+id-mr-caseIgnoreMatch OBJECT IDENTIFIER ::=
+ {id-mr 2}
+
+id-mr-caseIgnoreOrderingMatch OBJECT IDENTIFIER ::= {id-mr 3}
+
+id-mr-caseIgnoreSubstringsMatch OBJECT IDENTIFIER ::= {id-mr 4}
+
+id-mr-caseExactMatch OBJECT IDENTIFIER ::= {id-mr 5}
+
+id-mr-caseExactOrderingMatch OBJECT IDENTIFIER ::= {id-mr 6}
+
+id-mr-caseExactSubstringsMatch OBJECT IDENTIFIER ::= {id-mr 7}
+
+id-mr-numericStringMatch OBJECT IDENTIFIER ::= {id-mr 8}
+
+id-mr-numericStringOrderingMatch OBJECT IDENTIFIER ::= {id-mr 9}
+
+id-mr-numericStringSubstringsMatch OBJECT IDENTIFIER ::= {id-mr 10}
+
+id-mr-caseIgnoreListMatch OBJECT IDENTIFIER ::= {id-mr 11}
+
+id-mr-caseIgnoreListSubstringsMatch OBJECT IDENTIFIER ::= {id-mr 12}
+
+id-mr-booleanMatch OBJECT IDENTIFIER ::= {id-mr 13}
+
+id-mr-integerMatch OBJECT IDENTIFIER ::= {id-mr 14}
+
+id-mr-integerOrderingMatch OBJECT IDENTIFIER ::= {id-mr 15}
+
+id-mr-bitStringMatch OBJECT IDENTIFIER ::= {id-mr 16}
+
+id-mr-octetStringMatch OBJECT IDENTIFIER ::= {id-mr 17}
+
+id-mr-octetStringOrderingMatch OBJECT IDENTIFIER ::= {id-mr 18}
+
+id-mr-octetStringSubstringsMatch OBJECT IDENTIFIER ::= {id-mr 19}
+
+id-mr-telephoneNumberMatch OBJECT IDENTIFIER ::= {id-mr 20}
+
+id-mr-telephoneNumberSubstringsMatch OBJECT IDENTIFIER ::= {id-mr 21}
+
+id-mr-presentationAddressMatch OBJECT IDENTIFIER ::= {id-mr 22}
+
+id-mr-uniqueMemberMatch OBJECT IDENTIFIER ::= {id-mr 23}
+
+id-mr-protocolInformationMatch OBJECT IDENTIFIER ::= {id-mr 24}
+
+id-mr-uTCTimeMatch OBJECT IDENTIFIER ::= {id-mr 25}
+
+id-mr-uTCTimeOrderingMatch OBJECT IDENTIFIER ::= {id-mr 26}
+
+id-mr-generalizedTimeMatch OBJECT IDENTIFIER ::= {id-mr 27}
+
+id-mr-generalizedTimeOrderingMatch OBJECT IDENTIFIER ::= {id-mr 28}
+
+id-mr-integerFirstComponentMatch OBJECT IDENTIFIER ::= {id-mr 29}
+
+id-mr-objectIdentifierFirstComponentMatch OBJECT IDENTIFIER ::= {id-mr 30}
+
+id-mr-directoryStringFirstComponentMatch OBJECT IDENTIFIER ::= {id-mr 31}
+
+id-mr-wordMatch OBJECT IDENTIFIER ::= {id-mr 32}
+
+id-mr-keywordMatch OBJECT IDENTIFIER ::= {id-mr 33}
+
+-- id-mr-certificateExactMatch OBJECT IDENTIFIER ::= {id-mr 34} X.509|Part8
+-- id-mr-certificateMatch OBJECT IDENTIFIER ::= {id-mr 35} X.509|Part8
+-- id-mr-certificatePairExactMatch OBJECT IDENTIFIER ::= {id-mr 36} X.509|Part8
+-- id-mr-certificatePairMatch OBJECT IDENTIFIER ::= {id-mr 37} X.509|Part8
+-- id-mr-certificateListExactMatch OBJECT IDENTIFIER ::= {id-mr 38} X.509|Part8
+-- id-mr-certificateListMatch OBJECT IDENTIFIER ::= {id-mr 39} X.509|Part8
+-- id-mr-algorithmIdentifierMatch OBJECT IDENTIFIER ::= {id-mr 40} X.509|Part8
+id-mr-storedPrefixMatch OBJECT IDENTIFIER ::=
+ {id-mr 41}
+
+-- id-mr-attributeCertificateMatch OBJECT IDENTIFIER ::= {id-mr 42} X.509|Part8
+-- id-mr-readerAndKeyIDMatch OBJECT IDENTIFIER ::= {id-mr 43}
+-- id-mr-attributeIntegrityMatch OBJECT IDENTIFIER ::= {id-mr 44}
+-- id-mr-attributeCertificateExactMatch OBJECT IDENTIFIER ::= {id-mr 45} X.509|Part8
+-- id-mr-holderIssuerMatch OBJECT IDENTIFIER ::= {id-mr 46} X.509|Part8
+id-mr-systemProposedMatch OBJECT IDENTIFIER ::=
+ {id-mr 47}
+
+id-mr-generalWordMatch OBJECT IDENTIFIER ::= {id-mr 48}
+
+id-mr-approximateStringMatch OBJECT IDENTIFIER ::= {id-mr 49}
+
+id-mr-ignoreIfAbsentMatch OBJECT IDENTIFIER ::= {id-mr 50}
+
+id-mr-nullMatch OBJECT IDENTIFIER ::= {id-mr 51}
+
+id-mr-zonalMatch OBJECT IDENTIFIER ::= {id-mr 52}
+
+-- id-mr-authAttIdMatch OBJECT IDENTIFIER ::= {id-mr 53} X.509|Part8
+-- id-mr-roleSpecCertIdMatch OBJECT IDENTIFIER ::= {id-mr 54} X.509|Part8
+-- id-mr-basicAttConstraintsMatch OBJECT IDENTIFIER ::= {id-mr 55} X.509|Part8
+-- id-mr-delegatedNameConstraintsMatch OBJECT IDENTIFIER ::= {id-mr 56} X.509|Part8
+-- id-mr-timeSpecMatch OBJECT IDENTIFIER ::= {id-mr 57} X.509|Part8
+-- id-mr-attDescriptorMatch OBJECT IDENTIFIER ::= {id-mr 58} X.509|Part8
+-- id-mr-acceptableCertPoliciesMatch OBJECT IDENTIFIER ::= {id-mr 59} X.509|Part8
+-- id-mr-policyMatch OBJECT IDENTIFIER ::= {id-mr 60} X.509|Part8
+-- id-mr-delegationPathMatch OBJECT IDENTIFIER ::= {id-mr 61} X.509|Part8
+-- id-mr-pkiPathMatch OBJECT IDENTIFIER ::= {id-mr 62} X.509|Part8
+id-mr-facsimileNumberMatch OBJECT IDENTIFIER ::=
+ {id-mr 63}
+
+id-mr-facsimileNumberSubstringsMatch OBJECT IDENTIFIER ::= {id-mr 64}
+
+-- id-mr-enhancedCertificateMatch OBJECT IDENTIFIER ::= {id-mr 65} X.509|Part8
+-- id-mr-sOAIdentifierMatch OBJECT IDENTIFIER ::= {id-mr 66} X.509|Part8
+-- id-mr-extensionPresenceMatch OBJECT IDENTIFIER ::= {id-mr 67} X.509|Part8
+id-mr-uuidpairmatch OBJECT IDENTIFIER ::=
+ {id-mr 68}
+
+-- id-mr-dualStringMatch OBJECT IDENTIFIER ::= {id-mr 69} X.509|Part8
+-- contexts
+id-avc-language OBJECT IDENTIFIER ::=
+ {id-avc 0}
+
+id-avc-temporal OBJECT IDENTIFIER ::= {id-avc 1}
+
+id-avc-locale OBJECT IDENTIFIER ::= {id-avc 2}
+
+-- id-avc-attributeValueSecurityLabelContext OBJECT IDENTIFIER ::= {id-avc 3}
+-- id-avc-attributeValueIntegrityInfoContext OBJECT IDENTIFIER ::= {id-avc 4}
+id-avc-ldapAttributeOption OBJECT IDENTIFIER ::=
+ {id-avc 5}
+
+END -- SelectedAttributeTypes
diff --git a/lib/public_key/asn1/UsefulDefinitions.asn1 b/lib/public_key/asn1/UsefulDefinitions.asn1
new file mode 100644
index 0000000000..a200aac6e2
--- /dev/null
+++ b/lib/public_key/asn1/UsefulDefinitions.asn1
@@ -0,0 +1,234 @@
+UsefulDefinitions {joint-iso-itu-t ds(5) module(1) usefulDefinitions(0) 3}
+DEFINITIONS ::=
+BEGIN
+
+-- EXPORTS All -
+-- The types and values defined in this module are exported for use in the other ASN.1 modules contained
+-- within the Directory Specifications, and for the use of other applications which will use them to access
+-- Directory services. Other applications may use them for their own purposes, but this will not constrain
+-- extensions and modifications needed to maintain or improve the Directory service.
+ID ::= OBJECT IDENTIFIER
+
+ds ID ::= {joint-iso-itu-t ds(5)}
+
+-- categories of information object
+module ID ::= {ds 1}
+
+serviceElement ID ::= {ds 2}
+
+applicationContext ID ::= {ds 3}
+
+attributeType ID ::= {ds 4}
+
+attributeSyntax ID ::= {ds 5}
+
+objectClass ID ::= {ds 6}
+
+-- attributeSet ID ::= {ds 7}
+algorithm ID ::= {ds 8}
+
+abstractSyntax ID ::= {ds 9}
+
+-- object ID ::= {ds 10}
+-- port ID ::= {ds 11}
+dsaOperationalAttribute ID ::=
+ {ds 12}
+
+matchingRule ID ::= {ds 13}
+
+knowledgeMatchingRule ID ::= {ds 14}
+
+nameForm ID ::= {ds 15}
+
+group ID ::= {ds 16}
+
+subentry ID ::= {ds 17}
+
+operationalAttributeType ID ::= {ds 18}
+
+operationalBinding ID ::= {ds 19}
+
+schemaObjectClass ID ::= {ds 20}
+
+schemaOperationalAttribute ID ::= {ds 21}
+
+administrativeRoles ID ::= {ds 23}
+
+accessControlAttribute ID ::= {ds 24}
+
+rosObject ID ::= {ds 25}
+
+contract ID ::= {ds 26}
+
+package ID ::= {ds 27}
+
+accessControlSchemes ID ::= {ds 28}
+
+certificateExtension ID ::= {ds 29}
+
+managementObject ID ::= {ds 30}
+
+attributeValueContext ID ::= {ds 31}
+
+-- securityExchange ID ::= {ds 32}
+idmProtocol ID ::= {ds 33}
+
+problem ID ::= {ds 34}
+
+notification ID ::= {ds 35}
+
+matchingRestriction ID ::=
+ {ds 36} -- None are currently defined by this specification
+
+controlAttributeType ID ::= {ds 37}
+
+-- modules
+usefulDefinitions ID ::= {module usefulDefinitions(0) 3}
+
+informationFramework ID ::= {module informationFramework(1) 3}
+
+directoryAbstractService ID ::= {module directoryAbstractService(2) 3}
+
+distributedOperations ID ::= {module distributedOperations(3) 3}
+
+protocolObjectIdentifiers ID ::= {module protocolObjectIdentifiers(4) 3}
+
+selectedAttributeTypes ID ::= {module selectedAttributeTypes(5) 3}
+
+selectedObjectClasses ID ::= {module selectedObjectClasses(6) 3}
+
+authenticationFramework ID ::= {module authenticationFramework(7) 3}
+
+algorithmObjectIdentifiers ID ::= {module algorithmObjectIdentifiers(8) 3}
+
+directoryObjectIdentifiers ID ::= {module directoryObjectIdentifiers(9) 3}
+
+upperBounds ID ::= {module upperBounds(10) 3}
+
+dap ID ::= {module dap(11) 3}
+
+dsp ID ::= {module dsp(12) 3}
+
+distributedDirectoryOIDs ID ::= {module distributedDirectoryOIDs(13) 3}
+
+directoryShadowOIDs ID ::= {module directoryShadowOIDs(14) 3}
+
+directoryShadowAbstractService ID ::=
+ {module directoryShadowAbstractService(15) 3}
+
+disp ID ::= {module disp(16) 3}
+
+dop ID ::= {module dop(17) 3}
+
+opBindingManagement ID ::= {module opBindingManagement(18) 3}
+
+opBindingOIDs ID ::= {module opBindingOIDs(19) 3}
+
+hierarchicalOperationalBindings ID ::=
+ {module hierarchicalOperationalBindings(20) 3}
+
+dsaOperationalAttributeTypes ID ::= {module dsaOperationalAttributeTypes(22) 3}
+
+schemaAdministration ID ::= {module schemaAdministration(23) 3}
+
+basicAccessControl ID ::= {module basicAccessControl(24) 3}
+
+directoryOperationalBindingTypes ID ::=
+ {module directoryOperationalBindingTypes(25) 3}
+
+certificateExtensions ID ::= {module certificateExtensions(26) 0}
+
+directoryManagement ID ::= {module directoryManagement(27) 1}
+
+enhancedSecurity ID ::= {module enhancedSecurity(28) 1}
+
+iDMProtocolSpecification ID ::= {module iDMProtocolSpecification(30) 4}
+
+directoryIDMProtocols ID ::= {module directoryIDMProtocols(31) 4}
+
+-- directorySecurityExchanges ID ::= {module directorySecurityExchanges (29) 1}
+-- synonyms
+id-oc ID ::=
+ objectClass
+
+id-at ID ::= attributeType
+
+id-as ID ::= abstractSyntax
+
+id-mr ID ::= matchingRule
+
+id-nf ID ::= nameForm
+
+id-sc ID ::= subentry
+
+id-oa ID ::= operationalAttributeType
+
+id-ob ID ::= operationalBinding
+
+id-doa ID ::= dsaOperationalAttribute
+
+id-kmr ID ::= knowledgeMatchingRule
+
+id-soc ID ::= schemaObjectClass
+
+id-soa ID ::= schemaOperationalAttribute
+
+id-ar ID ::= administrativeRoles
+
+id-aca ID ::= accessControlAttribute
+
+id-ac ID ::= applicationContext
+
+id-rosObject ID ::= rosObject
+
+id-contract ID ::= contract
+
+id-package ID ::= package
+
+id-acScheme ID ::= accessControlSchemes
+
+id-ce ID ::= certificateExtension
+
+id-mgt ID ::= managementObject
+
+id-idm ID ::= idmProtocol
+
+id-avc ID ::= attributeValueContext
+
+-- id-se ID ::= securityExchange
+id-pr ID ::= problem
+
+id-not ID ::= notification
+
+id-mre ID ::= matchingRestriction
+
+id-cat ID ::= controlAttributeType
+
+-- obsolete module identifiers
+-- usefulDefinition ID ::= {module 0}
+-- informationFramework ID ::= {module 1}
+-- directoryAbstractService ID ::= {module 2}
+-- distributedOperations ID ::= {module 3}
+-- protocolObjectIdentifiers ID ::= {module 4}
+-- selectedAttributeTypes ID ::= {module 5}
+-- selectedObjectClasses ID ::= {module 6}
+-- authenticationFramework ID ::= {module 7}
+-- algorithmObjectIdentifiers ID ::= {module 8}
+-- directoryObjectIdentifiers ID ::= {module 9}
+-- upperBounds ID ::= {module 10}
+-- dap ID ::= {module 11}
+-- dsp ID ::= {module 12}
+-- distributedDirectoryObjectIdentifiers ID ::= {module 13}
+-- unused module identifiers
+-- directoryShadowOIDs ID ::= {module 14}
+-- directoryShadowAbstractService ID ::= {module 15}
+-- disp ID ::= {module 16}
+-- dop ID ::= {module 17}
+-- opBindingManagement ID ::= {module 18}
+-- opBindingOIDs ID ::= {module 19}
+-- hierarchicalOperationalBindings ID ::= {module 20}
+-- dsaOperationalAttributeTypes ID ::= {module 22}
+-- schemaAdministration ID ::= {module 23}
+-- basicAccessControl ID ::= {module 24}
+-- operationalBindingOIDs ID ::= {module 25}
+END -- UsefulDefinitions
diff --git a/lib/public_key/doc/src/cert_records.xml b/lib/public_key/doc/src/cert_records.xml
index ad4f5812cb..edef664245 100644
--- a/lib/public_key/doc/src/cert_records.xml
+++ b/lib/public_key/doc/src/cert_records.xml
@@ -5,7 +5,7 @@
<header>
<copyright>
<year>2008</year>
- <year>2011</year>
+ <year>2012</year>
<holder>Ericsson AB, All Rights Reserved</holder>
</copyright>
<legalnotice>
@@ -37,7 +37,10 @@
<p>This chapter briefly describes erlang records derived from asn1
specifications used to handle X509 certificates. The intent is to
describe the data types and not to specify the meaning of each
- component for this we refer you to RFC 5280.
+ component for this we refer you to <url
+ href="http://www.ietf.org/rfc/rfc5280.txt">RFC 5280</url>. Also
+ descirbed is <p>CertificationRequest</p> that is defined by <url
+ href=http://www.rsa.com/rsalabs/node.asp?id=2124">PKCS-10</url>.
</p>
<p>Use the following include directive to get access to the
@@ -630,6 +633,40 @@ oid names see table below. Ex: ?'id-dsa-with-sha1'</p>
aACompromise
</c></p>
</section>
-
+
+ <section>
+ <marker id="PKCS10"></marker>
+ <title>PKCS#10 Certification Request</title>
+ <code>
+#'CertificationRequest'{
+ certificationRequestInfo #'CertificationRequestInfo'{},
+ signatureAlgorithm #'CertificationRequest_signatureAlgorithm'{}}.
+ signature {0, binary()} - asn1 compact bitstring
+ }
+
+#'CertificationRequestInfo'{
+ version atom(),
+ subject {rdnSequence, [#AttributeTypeAndValue'{}]} ,
+ subjectPKInfo #'CertificationRequestInfo_subjectPKInfo'{},
+ attributes [#AttributeTypeAndValue'{}]
+ }
+
+#'CertificationRequestInfo_subjectPKInfo'{
+ algorithm #'CertificationRequestInfo_subjectPKInfo_algorithm'{}
+ subjectPublicKey {0, binary()} - asn1 compact bitstring
+ }
+
+#'CertificationRequestInfo_subjectPKInfo_algorithm'{
+ algorithm = oid(),
+ parameters = asn1_der_encoded()
+}
+
+#'CertificationRequest_signatureAlgorithm'{
+ algorithm = oid(),
+ parameters = asn1_der_encoded()
+ }
+ </code>
+ </section>
+
</section>
</chapter>
diff --git a/lib/public_key/doc/src/introduction.xml b/lib/public_key/doc/src/introduction.xml
index a21fcf3576..b1d1114a6c 100644
--- a/lib/public_key/doc/src/introduction.xml
+++ b/lib/public_key/doc/src/introduction.xml
@@ -1,11 +1,11 @@
-<?xml version="1.0" encoding="latin1" ?>
+<?xml version="1.0" encoding="iso-8859-1" ?>
<!DOCTYPE chapter SYSTEM "chapter.dtd">
<chapter>
<header>
<copyright>
<year>2008</year>
- <year>2011</year>
+ <year>2012</year>
<holder>Ericsson AB, All Rights Reserved</holder>
</copyright>
<legalnotice>
@@ -37,15 +37,15 @@
<section>
<title>Purpose</title>
<p> This application provides an API to public key infrastructure
- from RFC 3280 (X.509 certificates) and public key formats defined
- by the PKCS-standard.</p>
+ from <url href="http://www.ietf.org/rfc/rfc5280.txt">RFC
+ 5280</url> (X.509 certificates) and public key formats defined by
+ the <url href=http://www.rsa.com/rsalabs/node.asp?id=2124"> PKCS-standard</url></p>
</section>
<section>
<title>Prerequisites</title>
- <p>It is assumed that the reader is familiar with the Erlang
- programming language, concepts of OTP and has a basic understanding
- of the concepts of using public keys.</p>
+ <p>It is assumed that the reader has a basic understanding
+ of the concepts of using public keys and digital certificates.</p>
</section>
<section>
diff --git a/lib/public_key/doc/src/public_key.xml b/lib/public_key/doc/src/public_key.xml
index 5c227557f2..2ec1fcff9d 100644
--- a/lib/public_key/doc/src/public_key.xml
+++ b/lib/public_key/doc/src/public_key.xml
@@ -34,7 +34,7 @@
<modulesummary> API module for public key infrastructure.</modulesummary>
<description>
<p>This module provides functions to handle public key infrastructure
- from RFC 5280 - X.509 certificates and some parts of the PKCS-standard.
+ from <url href="http://www.ietf.org/rfc/rfc5280.txt">RFC 5280</url>- X.509 certificates and some parts of the PKCS-standard.
</p>
</description>
@@ -61,7 +61,7 @@
<p><code>string = [bytes()]</code></p>
<p><code>pki_asn1_type() = 'Certificate' | 'RSAPrivateKey'| 'RSAPublicKey'
- 'DSAPrivateKey' | 'DSAPublicKey' | 'DHParameter' | 'SubjectPublicKeyInfo'| 'PrivateKeyInfo'</code></p>
+ 'DSAPrivateKey' | 'DSAPublicKey' | 'DHParameter' | 'SubjectPublicKeyInfo'| 'PrivateKeyInfo' | 'CertificationRequest'</code></p>
<p><code>pem_entry () = {pki_asn1_type(), binary(), %% DER or encrypted DER
not_encrypted | cipher_info()} </code></p>
diff --git a/lib/public_key/include/public_key.hrl b/lib/public_key/include/public_key.hrl
index 2475295974..2dfdbbb8f3 100644
--- a/lib/public_key/include/public_key.hrl
+++ b/lib/public_key/include/public_key.hrl
@@ -78,7 +78,7 @@
-type dsa_public_key() :: {integer(), #'Dss-Parms'{}}.
-type pki_asn1_type() :: 'Certificate' | 'RSAPrivateKey' | 'RSAPublicKey'
| 'DSAPrivateKey' | 'DSAPublicKey' | 'DHParameter'
- | 'SubjectPublicKeyInfo'.
+ | 'SubjectPublicKeyInfo' | 'CertificationRequest'.
-type pem_entry() :: {pki_asn1_type(), binary(), %% DER or Encrypted DER
not_encrypted | {Cipher :: string(), Salt :: binary()}}.
-type asn1_type() :: atom(). %% see "OTP-PUB-KEY.hrl
diff --git a/lib/public_key/src/pubkey_pem.erl b/lib/public_key/src/pubkey_pem.erl
index 910473d629..4012825f20 100644
--- a/lib/public_key/src/pubkey_pem.erl
+++ b/lib/public_key/src/pubkey_pem.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -194,7 +194,12 @@ pem_start('SubjectPublicKeyInfo') ->
pem_start('DSAPrivateKey') ->
<<"-----BEGIN DSA PRIVATE KEY-----">>;
pem_start('DHParameter') ->
- <<"-----BEGIN DH PARAMETERS-----">>.
+ <<"-----BEGIN DH PARAMETERS-----">>;
+pem_start('CertificationRequest') ->
+ <<"-----BEGIN CERTIFICATE REQUEST-----">>;
+pem_start('ContentInfo') ->
+ <<"-----BEGIN PKCS7-----">>.
+
pem_end(<<"-----BEGIN CERTIFICATE-----">>) ->
<<"-----END CERTIFICATE-----">>;
pem_end(<<"-----BEGIN RSA PRIVATE KEY-----">>) ->
@@ -211,6 +216,10 @@ pem_end(<<"-----BEGIN PRIVATE KEY-----">>) ->
<<"-----END PRIVATE KEY-----">>;
pem_end(<<"-----BEGIN ENCRYPTED PRIVATE KEY-----">>) ->
<<"-----END ENCRYPTED PRIVATE KEY-----">>;
+pem_end(<<"-----BEGIN CERTIFICATE REQUEST-----">>) ->
+ <<"-----END CERTIFICATE REQUEST-----">>;
+pem_end(<<"-----BEGIN PKCS7-----">>) ->
+ <<"-----END PKCS7-----">>;
pem_end(_) ->
undefined.
@@ -229,7 +238,11 @@ asn1_type(<<"-----BEGIN DH PARAMETERS-----">>) ->
asn1_type(<<"-----BEGIN PRIVATE KEY-----">>) ->
'PrivateKeyInfo';
asn1_type(<<"-----BEGIN ENCRYPTED PRIVATE KEY-----">>) ->
- 'EncryptedPrivateKeyInfo'.
+ 'EncryptedPrivateKeyInfo';
+asn1_type(<<"-----BEGIN CERTIFICATE REQUEST-----">>) ->
+ 'CertificationRequest';
+asn1_type(<<"-----BEGIN PKCS7-----">>) ->
+ 'ContentInfo'.
pem_decrypt() ->
<<"Proc-Type: 4,ENCRYPTED">>.
diff --git a/lib/public_key/test/public_key_SUITE.erl b/lib/public_key/test/public_key_SUITE.erl
index f2f30dad6e..2b83bc0a5c 100644
--- a/lib/public_key/test/public_key_SUITE.erl
+++ b/lib/public_key/test/public_key_SUITE.erl
@@ -111,7 +111,7 @@ all() ->
groups() ->
[{pem_decode_encode, [], [dsa_pem, rsa_pem, encrypted_pem,
- dh_pem, cert_pem]},
+ dh_pem, cert_pem, pkcs10_pem]},
{ssh_public_key_decode_encode, [],
[ssh_rsa_public_key, ssh_dsa_public_key, ssh_rfc4716_rsa_comment,
ssh_rfc4716_dsa_comment, ssh_rfc4716_rsa_subject, ssh_known_hosts,
@@ -249,7 +249,42 @@ dh_pem(Config) when is_list(Config) ->
DHParameter = public_key:pem_entry_decode(Entry),
Entry = public_key:pem_entry_encode('DHParameter', DHParameter).
-
+
+%%--------------------------------------------------------------------
+
+pkcs10_pem(doc) ->
+ [""];
+pkcs10_pem(suite) ->
+ [];
+pkcs10_pem(Config) when is_list(Config) ->
+ Datadir = ?config(data_dir, Config),
+ [{'CertificationRequest', DerPKCS10, not_encrypted} = Entry] =
+ erl_make_certs:pem_to_der(filename:join(Datadir, "req.pem")),
+
+ erl_make_certs:der_to_pem(filename:join(Datadir, "new_req.pem"), [Entry]),
+
+ PKCS10 = public_key:der_decode('CertificationRequest', DerPKCS10),
+ PKCS10 = public_key:pem_entry_decode(Entry),
+
+ Entry = public_key:pem_entry_encode('CertificationRequest', PKCS10).
+
+%%--------------------------------------------------------------------
+pkcs7_pem(doc) ->
+ [""];
+pkcs7_pem(suite) ->
+ [];
+pkcs7_pem(Config) when is_list(Config) ->
+ Datadir = ?config(data_dir, Config),
+ [{'ContentInfo', DerPKCS7, not_encrypted} = Entry] =
+ erl_make_certs:pem_to_der(filename:join(Datadir, "pkcs7_cert.pem")),
+
+ erl_make_certs:der_to_pem(filename:join(Datadir, "new_pkcs7_cert.pem"), [Entry]),
+
+ PKCS7 = public_key:der_decode('ContentInfo', DerPKCS7),
+ PKCS7 = public_key:pem_entry_decode(Entry),
+
+ Entry = public_key:pem_entry_encode('ContentInfo', PKCS7).
+
%%--------------------------------------------------------------------
cert_pem(doc) ->
[""];
diff --git a/lib/public_key/test/public_key_SUITE_data/pkcs7_cert.pem b/lib/public_key/test/public_key_SUITE_data/pkcs7_cert.pem
new file mode 100644
index 0000000000..9b450a22c5
--- /dev/null
+++ b/lib/public_key/test/public_key_SUITE_data/pkcs7_cert.pem
@@ -0,0 +1,23 @@
+-----BEGIN PKCS7-----
+MIID6QYJKoZIhvcNAQcCoIID2jCCA9YCAQExADALBgkqhkiG9w0BBwGgggO8MIID
+uDCCAyGgAwIBAgIBAjANBgkqhkiG9w0BAQUFADCBgzEOMAwGA1UEAxMFb3RwQ0Ex
+EzARBgNVBAsTCkVybGFuZyBPVFAxFDASBgNVBAoTC0VyaWNzc29uIEFCMQswCQYD
+VQQGEwJTRTESMBAGA1UEBxMJU3RvY2tob2xtMSUwIwYJKoZIhvcNAQkBFhZwZXRl
+ckBlcml4LmVyaWNzc29uLnNlMB4XDTA4MDEwOTA4MjkzMFoXDTE3MTExNzA4Mjkz
+MFowgYQxDzANBgNVBAMTBnNlcnZlcjETMBEGA1UECxMKRXJsYW5nIE9UUDEUMBIG
+A1UEChMLRXJpY3Nzb24gQUIxCzAJBgNVBAYTAlNFMRIwEAYDVQQHEwlTdG9ja2hv
+bG0xJTAjBgkqhkiG9w0BCQEWFnBldGVyQGVyaXguZXJpY3Nzb24uc2UwgZ8wDQYJ
+KoZIhvcNAQEBBQADgY0AMIGJAoGBAKR20HPrkDGdiavHUyWwFEQwta2dmtF2eQZZ
+i9Xk68UJYbuU7CikHs2srkrwzj0OPIqbp/xOBNzJ7Kch0o4yO6vcEAiSCJ6AB4uS
+M742hrYW4qXgc18K6PqTwSuKr94sn3qQuo4hF/ymCxLrnSicrNpzGOz9A0Lf2+Vk
+6hV0BtdHAgMBAAGjggE3MIIBMzAJBgNVHRMEAjAAMAsGA1UdDwQEAwIF4DAdBgNV
+HQ4EFgQUi19l/qhEwHP/CUeaEjWy4GhOBRIwgbMGA1UdIwSBqzCBqIAUBquANDqk
+uHayvZ0uKOVtkd59AZuhgYykgYkwgYYxETAPBgNVBAMTCGVybGFuZ0NBMRMwEQYD
+VQQLEwpFcmxhbmcgT1RQMRQwEgYDVQQKEwtFcmljc3NvbiBBQjESMBAGA1UEBxMJ
+U3RvY2tob2xtMQswCQYDVQQGEwJTRTElMCMGCSqGSIb3DQEJARYWcGV0ZXJAZXJp
+eC5lcmljc3Nvbi5zZYIBATAhBgNVHREEGjAYgRZwZXRlckBlcml4LmVyaWNzc29u
+LnNlMCEGA1UdEgQaMBiBFnBldGVyQGVyaXguZXJpY3Nzb24uc2UwDQYJKoZIhvcN
+AQEFBQADgYEAzHGutrGMSeC3Di7Z8d65SM7jZLrkkusmL+D2oPVIOGrfZbVuyfDK
+U/nImm99z+lhC/N3JEEpB6PgAYSskfVdBL3LoxbUTaCn/+G3A/G8NfRVIYyANTBe
+NW6ueNpjnauLzcwpyXpu3vp1VBg8wBePtGTBIbRHRgtwwHRXAddE/WuhADEA
+-----END PKCS7-----
diff --git a/lib/public_key/vsn.mk b/lib/public_key/vsn.mk
index c8165fa247..b8af89d040 100644
--- a/lib/public_key/vsn.mk
+++ b/lib/public_key/vsn.mk
@@ -1 +1 @@
-PUBLIC_KEY_VSN = 0.16
+PUBLIC_KEY_VSN = 0.17
diff --git a/lib/ssh/doc/src/ssh.xml b/lib/ssh/doc/src/ssh.xml
index 30d4f45a32..aac4b462a2 100644
--- a/lib/ssh/doc/src/ssh.xml
+++ b/lib/ssh/doc/src/ssh.xml
@@ -142,6 +142,11 @@
some reason, the other algorithm is tried. The default is
to try <c><![CDATA[ssh_rsa]]></c> first.</p>
</item>
+ <tag><c><![CDATA[{pref_public_key_algs, list()}]]></c></tag>
+ <item>
+ <p>List of public key algorithms to try to use, ssh_rsa and ssh_dsa available.
+ Will override <c><![CDATA[{public_key_alg, ssh_rsa | ssh_dsa}]]></c></p>
+ </item>
<tag><c><![CDATA[{connect_timeout, timeout()}]]></c></tag>
<item>
<p>Sets a timeout on the transport layer connection. Defaults to infinity.</p>
@@ -178,6 +183,10 @@
<c><![CDATA[add_host_key/3]]></c>. This is considered
somewhat experimental and will be better documented later on.</p>
</item>
+ <tag><c><![CDATA[{quiet_mode, atom() = boolean()}]]></c></tag>
+ <item>
+ <p>If true, the client will not print out anything on authorization.</p>
+ </item>
<tag><c><![CDATA[{fd, file_descriptor()}]]></c></tag>
<item>
<p>Allow an existing file-descriptor to be used
diff --git a/lib/ssh/src/ssh.appup.src b/lib/ssh/src/ssh.appup.src
index d08dbafc32..6ba32e018f 100644
--- a/lib/ssh/src/ssh.appup.src
+++ b/lib/ssh/src/ssh.appup.src
@@ -19,10 +19,12 @@
{"%VSN%",
[
+ {<<"2.1.1">>, [{restart_application, ssh}]},
{<<"2.1">>, [{load_module, ssh_sftpd_file_api, soft_purge, soft_purge, []},
{load_module, ssh_connection, soft_purge, soft_purge, []},
{load_module, ssh_connection_manager, soft_purge, soft_purge, []},
{load_module, ssh_auth, soft_purge, soft_purge, []},
+ {load_module, ssh_connection_handler, soft_purge, soft_purge, []},
{load_module, ssh_channel, soft_purge, soft_purge, []},
{load_module, ssh_file, soft_purge, soft_purge, []}]},
{load_module, ssh, soft_purge, soft_purge, []}]},
@@ -30,14 +32,16 @@
{<<"1\\.*">>, [{restart_application, ssh}]}
],
[
+ {<<"2.1.1">>, [{restart_application, ssh}]},
{<<"2.1">>,[{load_module, ssh_sftpd_file_api, soft_purge, soft_purge, []},
{load_module, ssh_connection, soft_purge, soft_purge, []},
{load_module, ssh_connection_manager, soft_purge, soft_purge, []},
{load_module, ssh_auth, soft_purge, soft_purge, []},
+ {load_module, ssh_connection_handler, soft_purge, soft_purge, []},
{load_module, ssh_channel, soft_purge, soft_purge, []},
{load_module, ssh_file, soft_purge, soft_purge, []}]},
{load_module, ssh, soft_purge, soft_purge, []}]},
{<<"2.0\\.*">>, [{restart_application, ssh}]},
{<<"1\\.*">>, [{restart_application, ssh}]}
]
-}.
+}. \ No newline at end of file
diff --git a/lib/ssh/src/ssh.erl b/lib/ssh/src/ssh.erl
index db753e0e4e..a569298056 100644
--- a/lib/ssh/src/ssh.erl
+++ b/lib/ssh/src/ssh.erl
@@ -79,7 +79,7 @@ connect(Host, Port, Options, Timeout) ->
DisableIpv6 = proplists:get_value(ip_v6_disabled, SshOptions, false),
Inet = inetopt(DisableIpv6),
do_connect(Host, Port, [Inet | SocketOptions],
- [{host, Host} | fix_idle_time(SshOptions)], Timeout, DisableIpv6)
+ [{user_pid, self()}, {host, Host} | fix_idle_time(SshOptions)], Timeout, DisableIpv6)
end.
do_connect(Host, Port, SocketOptions, SshOptions, Timeout, DisableIpv6) ->
@@ -91,30 +91,39 @@ do_connect(Host, Port, SocketOptions, SshOptions, Timeout, DisableIpv6) ->
{ok, ConnectionSup} ->
{ok, Manager} =
ssh_connection_sup:connection_manager(ConnectionSup),
- receive
- {Manager, is_connected} ->
- {ok, Manager};
- %% When the connection fails
- %% ssh_connection_sup:connection_manager
- %% might return undefined as the connection manager
- %% could allready have terminated, so we will not
- %% match the Manager in this case
- {_, not_connected, {error, econnrefused}} when DisableIpv6 == false ->
- do_connect(Host, Port, proplists:delete(inet6, SocketOptions),
- SshOptions, Timeout, true);
- {_, not_connected, {error, Reason}} ->
- {error, Reason};
- {_, not_connected, Other} ->
- {error, Other}
- after Timeout ->
- ssh_connection_manager:stop(Manager),
- {error, timeout}
- end
+ msg_loop(Manager, DisableIpv6, Host, Port, SocketOptions, SshOptions, Timeout)
catch
exit:{noproc, _} ->
{error, ssh_not_started}
end.
-
+msg_loop(Manager, DisableIpv6, Host, Port, SocketOptions, SshOptions, Timeout) ->
+ receive
+ {Manager, is_connected} ->
+ {ok, Manager};
+ %% When the connection fails
+ %% ssh_connection_sup:connection_manager
+ %% might return undefined as the connection manager
+ %% could allready have terminated, so we will not
+ %% match the Manager in this case
+ {_, not_connected, {error, econnrefused}} when DisableIpv6 == false ->
+ do_connect(Host, Port, proplists:delete(inet6, SocketOptions),
+ SshOptions, Timeout, true);
+ {_, not_connected, {error, Reason}} ->
+ {error, Reason};
+ {_, not_connected, Other} ->
+ {error, Other};
+ {From, user_password} ->
+ Pass = io:get_password(),
+ From ! Pass,
+ msg_loop(Manager, DisableIpv6, Host, Port, SocketOptions, SshOptions, Timeout);
+ {From, question} ->
+ Answer = io:get_line(""),
+ From ! Answer,
+ msg_loop(Manager, DisableIpv6, Host, Port, SocketOptions, SshOptions, Timeout)
+ after Timeout ->
+ ssh_connection_manager:stop(Manager),
+ {error, timeout}
+ end.
%%--------------------------------------------------------------------
%% Function: close(ConnectionRef) -> ok
%%
@@ -349,6 +358,10 @@ handle_option([{exec, _} = Opt | Rest], SocketOptions, SshOptions) ->
handle_option(Rest, SocketOptions, [handle_ssh_option(Opt) | SshOptions]);
handle_option([{auth_methods, _} = Opt | Rest], SocketOptions, SshOptions) ->
handle_option(Rest, SocketOptions, [handle_ssh_option(Opt) | SshOptions]);
+handle_option([{pref_public_key_algs, _} = Opt | Rest], SocketOptions, SshOptions) ->
+ handle_option(Rest, SocketOptions, [handle_ssh_option(Opt) | SshOptions]);
+handle_option([{quiet_mode, _} = Opt|Rest], SocketOptions, SshOptions) ->
+ handle_option(Rest, SocketOptions, [handle_ssh_option(Opt) | SshOptions]);
handle_option([{idle_time, _} = Opt | Rest], SocketOptions, SshOptions) ->
handle_option(Rest, SocketOptions, [handle_ssh_option(Opt) | SshOptions]);
handle_option([Opt | Rest], SocketOptions, SshOptions) ->
@@ -366,6 +379,13 @@ handle_ssh_option({user_interaction, Value} = Opt) when Value == true; Value ==
Opt;
handle_ssh_option({public_key_alg, Value} = Opt) when Value == ssh_rsa; Value == ssh_dsa ->
Opt;
+handle_ssh_option({pref_public_key_algs, Value} = Opt) when is_list(Value), length(Value) >= 1 ->
+ case check_pref_algs(Value) of
+ true ->
+ Opt;
+ _ ->
+ throw({error, {eoptions, Opt}})
+ end;
handle_ssh_option({connect_timeout, Value} = Opt) when is_integer(Value); Value == infinity ->
Opt;
handle_ssh_option({user, Value} = Opt) when is_list(Value) ->
@@ -416,6 +436,9 @@ handle_ssh_option({shell, {Module, Function, _}} = Opt) when is_atom(Module),
Opt;
handle_ssh_option({shell, Value} = Opt) when is_function(Value) ->
Opt;
+handle_ssh_option({quiet_mode, Value} = Opt) when Value == true;
+ Value == false ->
+ Opt;
handle_ssh_option({idle_time, Value} = Opt) when is_integer(Value), Value > 0 ->
Opt;
handle_ssh_option(Opt) ->
@@ -435,7 +458,18 @@ handle_inet_option({reuseaddr, _} = Opt) ->
%% Option verified by inet
handle_inet_option(Opt) ->
Opt.
-
+%% Check preferred algs
+check_pref_algs([]) ->
+ true;
+check_pref_algs([H|T]) ->
+ case H of
+ ssh_dsa ->
+ check_pref_algs(T);
+ ssh_rsa ->
+ check_pref_algs(T);
+ _ ->
+ false
+ end.
%% Has IPv6 been disabled?
inetopt(true) ->
inet;
diff --git a/lib/ssh/src/ssh_auth.erl b/lib/ssh/src/ssh_auth.erl
index aa452a8e09..c436793dc4 100644
--- a/lib/ssh/src/ssh_auth.erl
+++ b/lib/ssh/src/ssh_auth.erl
@@ -71,7 +71,7 @@ password_msg([#ssh{opts = Opts, io_cb = IoCb,
ssh_bits:install_messages(userauth_passwd_messages()),
Password = case proplists:get_value(password, Opts) of
undefined ->
- user_interaction(IoCb);
+ user_interaction(IoCb, Ssh);
PW ->
PW
end,
@@ -89,10 +89,10 @@ password_msg([#ssh{opts = Opts, io_cb = IoCb,
Ssh)
end.
-user_interaction(ssh_no_io) ->
+user_interaction(ssh_no_io, _) ->
not_ok;
-user_interaction(IoCb) ->
- IoCb:read_password("ssh password: ").
+user_interaction(IoCb, Ssh) ->
+ IoCb:read_password("ssh password: ", Ssh).
%% See RFC 4256 for info on keyboard-interactive
@@ -118,15 +118,37 @@ init_userauth_request_msg(#ssh{opts = Opts} = Ssh) ->
service = "ssh-connection",
method = "none",
data = <<>>},
- FirstAlg = algorithm(proplists:get_value(public_key_alg, Opts,
- ?PREFERRED_PK_ALG)),
- SecondAlg = other_alg(FirstAlg),
- AllowUserInt = proplists:get_value(user_interaction, Opts, true),
- Prefs = method_preference(FirstAlg, SecondAlg, AllowUserInt),
- ssh_transport:ssh_packet(Msg, Ssh#ssh{user = User,
- userauth_preference = Prefs,
- userauth_methods = none,
- service = "ssh-connection"});
+ case proplists:get_value(pref_public_key_algs, Opts, false) of
+ false ->
+ FirstAlg = algorithm(proplists:get_value(public_key_alg, Opts,
+ ?PREFERRED_PK_ALG)),
+ SecondAlg = other_alg(FirstAlg),
+ AllowUserInt = proplists:get_value(user_interaction, Opts, true),
+ Prefs = method_preference(FirstAlg, SecondAlg, AllowUserInt),
+ ssh_transport:ssh_packet(Msg, Ssh#ssh{user = User,
+ userauth_preference = Prefs,
+ userauth_methods = none,
+ service = "ssh-connection"});
+ Algs ->
+ FirstAlg = algorithm(lists:nth(1, Algs)),
+ case length(Algs) =:= 2 of
+ true ->
+ SecondAlg = other_alg(FirstAlg),
+ AllowUserInt = proplists:get_value(user_interaction, Opts, true),
+ Prefs = method_preference(FirstAlg, SecondAlg, AllowUserInt),
+ ssh_transport:ssh_packet(Msg, Ssh#ssh{user = User,
+ userauth_preference = Prefs,
+ userauth_methods = none,
+ service = "ssh-connection"});
+ _ ->
+ AllowUserInt = proplists:get_value(user_interaction, Opts, true),
+ Prefs = method_preference(FirstAlg, AllowUserInt),
+ ssh_transport:ssh_packet(Msg, Ssh#ssh{user = User,
+ userauth_preference = Prefs,
+ userauth_methods = none,
+ service = "ssh-connection"})
+ end
+ end;
{error, no_user} ->
ErrStr = "Could not determine the users name",
throw(#ssh_msg_disconnect{code = ?SSH_DISCONNECT_ILLEGAL_USER_NAME,
@@ -287,6 +309,15 @@ method_preference(Alg1, Alg2, false) ->
{"publickey", ?MODULE, publickey_msg,[Alg2]},
{"password", ?MODULE, password_msg, []}
].
+method_preference(Alg1, true) ->
+ [{"publickey", ?MODULE, publickey_msg, [Alg1]},
+ {"password", ?MODULE, password_msg, []},
+ {"keyboard-interactive", ?MODULE, keyboard_interactive_msg, []}
+ ];
+method_preference(Alg1, false) ->
+ [{"publickey", ?MODULE, publickey_msg, [Alg1]},
+ {"password", ?MODULE, password_msg, []}
+ ].
user_name(Opts) ->
Env = case os:type() of
@@ -370,11 +401,11 @@ keyboard_interact_get_responses(IoCb, Opts, Name, Instr, PromptInfos) ->
%% Special case/fallback for just one prompt
%% (assumed to be the password prompt)
case proplists:get_value(password, Opts) of
- undefined -> keyboard_interact(IoCb, Name, Instr, PromptInfos);
+ undefined -> keyboard_interact(IoCb, Name, Instr, PromptInfos, Opts);
PW -> [PW]
end;
undefined ->
- keyboard_interact(IoCb, Name, Instr, PromptInfos);
+ keyboard_interact(IoCb, Name, Instr, PromptInfos, Opts);
KbdInteractFun ->
Prompts = lists:map(fun({Prompt, _Echo}) -> Prompt end,
PromptInfos),
@@ -388,15 +419,15 @@ keyboard_interact_get_responses(IoCb, Opts, Name, Instr, PromptInfos) ->
end
end.
-keyboard_interact(IoCb, Name, Instr, Prompts) ->
+keyboard_interact(IoCb, Name, Instr, Prompts, Opts) ->
if Name /= "" -> IoCb:format("~s", [Name]);
true -> ok
end,
if Instr /= "" -> IoCb:format("~s", [Instr]);
true -> ok
end,
- lists:map(fun({Prompt, true}) -> IoCb:read_line(Prompt);
- ({Prompt, false}) -> IoCb:read_password(Prompt)
+ lists:map(fun({Prompt, true}) -> IoCb:read_line(Prompt, Opts);
+ ({Prompt, false}) -> IoCb:read_password(Prompt, Opts)
end,
Prompts).
diff --git a/lib/ssh/src/ssh_connection_handler.erl b/lib/ssh/src/ssh_connection_handler.erl
index 5b3d1b8a1b..d8950a7b67 100644
--- a/lib/ssh/src/ssh_connection_handler.erl
+++ b/lib/ssh/src/ssh_connection_handler.erl
@@ -718,8 +718,18 @@ init_ssh(server = Role, Vsn, Version, Options, Socket) ->
available_host_keys = supported_host_keys(Role, KeyCb, Options)
}.
-supported_host_keys(client, _, _) ->
- ["ssh-rsa", "ssh-dss"];
+supported_host_keys(client, _, Options) ->
+ try
+ case extract_algs(proplists:get_value(pref_public_key_algs, Options, false), []) of
+ false ->
+ ["ssh-rsa", "ssh-dss"];
+ Algs ->
+ Algs
+ end
+ catch
+ exit:Reason ->
+ {stop, {shutdown, Reason}}
+ end;
supported_host_keys(server, KeyCb, Options) ->
lists:foldl(fun(Type, Acc) ->
case available_host_key(KeyCb, Type, Options) of
@@ -731,7 +741,19 @@ supported_host_keys(server, KeyCb, Options) ->
end, [],
%% Prefered alg last so no need to reverse
["ssh-dss", "ssh-rsa"]).
-
+extract_algs(false, _) ->
+ false;
+extract_algs([],[]) ->
+ false;
+extract_algs([], NewList) ->
+ lists:reverse(NewList);
+extract_algs([H|T], NewList) ->
+ case H of
+ ssh_dsa ->
+ extract_algs(T, ["ssh-dss"|NewList]);
+ ssh_rsa ->
+ extract_algs(T, ["ssh-rsa"|NewList])
+ end.
available_host_key(KeyCb, "ssh-dss"= Alg, Opts) ->
case KeyCb:host_key('ssh-dss', Opts) of
{ok, _} ->
diff --git a/lib/ssh/src/ssh_connection_manager.erl b/lib/ssh/src/ssh_connection_manager.erl
index 1384740bfa..0c1eee5186 100644
--- a/lib/ssh/src/ssh_connection_manager.erl
+++ b/lib/ssh/src/ssh_connection_manager.erl
@@ -557,7 +557,7 @@ handle_info({same_user, _}, State) ->
handle_info(ssh_connected, #state{role = client, client = Pid}
= State) ->
Pid ! {self(), is_connected},
- {noreply, State#state{connected = true}};
+ {noreply, State#state{connected = true, opts = handle_password(State#state.opts)}};
handle_info(ssh_connected, #state{role = server} = State) ->
{noreply, State#state{connected = true}};
@@ -570,6 +570,47 @@ handle_info({'DOWN', _Ref, process, ChannelPid, _Reason}, State) ->
handle_info({'EXIT', _Sup, Reason}, State) ->
{stop, Reason, State}.
+handle_password(Opts) ->
+ handle_rsa_password(handle_dsa_password(handle_normal_password(Opts))).
+handle_normal_password(Opts) ->
+ case proplists:get_value(ssh_opts, Opts, false) of
+ false ->
+ Opts;
+ SshOpts ->
+ case proplists:get_value(password, SshOpts, false) of
+ false ->
+ Opts;
+ _Password ->
+ NewOpts = [{password, undefined}|lists:keydelete(password, 1, SshOpts)],
+ [{ssh_opts, NewOpts}|lists:keydelete(ssh_opts, 1, Opts)]
+ end
+ end.
+handle_dsa_password(Opts) ->
+ case proplists:get_value(ssh_opts, Opts, false) of
+ false ->
+ Opts;
+ SshOpts ->
+ case proplists:get_value(dsa_pass_phrase, SshOpts, false) of
+ false ->
+ Opts;
+ _Password ->
+ NewOpts = [{dsa_pass_phrase, undefined}|lists:keydelete(dsa_pass_phrase, 1, SshOpts)],
+ [{ssh_opts, NewOpts}|lists:keydelete(ssh_opts, 1, Opts)]
+ end
+ end.
+handle_rsa_password(Opts) ->
+ case proplists:get_value(ssh_opts, Opts, false) of
+ false ->
+ Opts;
+ SshOpts ->
+ case proplists:get_value(rsa_pass_phrase, SshOpts, false) of
+ false ->
+ Opts;
+ _Password ->
+ NewOpts = [{rsa_pass_phrase, undefined}|lists:keydelete(rsa_pass_phrase, 1, SshOpts)],
+ [{ssh_opts, NewOpts}|lists:keydelete(ssh_opts, 1, Opts)]
+ end
+ end.
%%--------------------------------------------------------------------
%% Function: terminate(Reason, State) -> void()
%% Description: This function is called by a gen_server when it is about to
diff --git a/lib/ssh/src/ssh_io.erl b/lib/ssh/src/ssh_io.erl
index 1dbd097423..17a7cebb4a 100644
--- a/lib/ssh/src/ssh_io.erl
+++ b/lib/ssh/src/ssh_io.erl
@@ -23,37 +23,52 @@
-module(ssh_io).
--export([yes_no/1, read_password/1, read_line/1, format/2]).
+-export([yes_no/2, read_password/2, read_line/2, format/2]).
-import(lists, [reverse/1]).
+-include("ssh.hrl").
+read_line(Prompt, Ssh) ->
+ format("~s", [listify(Prompt)]),
+ proplists:get_value(user_pid, Ssh) ! {self(), question},
+ receive
+ Answer ->
+ Answer
+ end.
-read_line(Prompt) when is_list(Prompt) ->
- io:get_line(list_to_atom(Prompt));
-read_line(Prompt) when is_atom(Prompt) ->
- io:get_line(Prompt).
-
-read_ln(Prompt) ->
- trim(read_line(Prompt)).
-
-yes_no(Prompt) ->
+yes_no(Prompt, Ssh) ->
io:format("~s [y/n]?", [Prompt]),
- case read_ln('') of
- "y" -> yes;
- "n" -> no;
- "Y" -> yes;
- "N" -> no;
- _ ->
- io:format("please answer y or n\n"),
- yes_no(Prompt)
+ proplists:get_value(user_pid, Ssh#ssh.opts) ! {self(), question},
+ receive
+ Answer ->
+ case trim(Answer) of
+ "y" -> yes;
+ "n" -> no;
+ "Y" -> yes;
+ "N" -> no;
+ y -> yes;
+ n -> no;
+ _ ->
+ io:format("please answer y or n\n"),
+ yes_no(Prompt, Ssh)
+ end
end.
-read_password(Prompt) ->
+read_password(Prompt, Ssh) ->
format("~s", [listify(Prompt)]),
- case io:get_password() of
- "" ->
- read_password(Prompt);
- Pass -> Pass
+ case is_list(Ssh) of
+ false ->
+ proplists:get_value(user_pid, Ssh#ssh.opts) ! {self(), user_password};
+ _ ->
+ proplists:get_value(user_pid, Ssh) ! {self(), user_password}
+ end,
+ receive
+ Answer ->
+ case Answer of
+ "" ->
+ read_password(Prompt, Ssh);
+ Pass -> Pass
+ end
end.
listify(A) when is_atom(A) ->
diff --git a/lib/ssh/src/ssh_transport.erl b/lib/ssh/src/ssh_transport.erl
index 1f912c9bdf..7f6e7d9946 100644
--- a/lib/ssh/src/ssh_transport.erl
+++ b/lib/ssh/src/ssh_transport.erl
@@ -133,7 +133,7 @@ kex_dh_gex_messages() ->
].
yes_no(Ssh, Prompt) ->
- (Ssh#ssh.io_cb):yes_no(Prompt).
+ (Ssh#ssh.io_cb):yes_no(Prompt, Ssh).
connect(ConnectionSup, Address, Port, SocketOpts, Opts) ->
Timeout = proplists:get_value(connect_timeout, Opts, infinity),
diff --git a/lib/ssh/vsn.mk b/lib/ssh/vsn.mk
index defa47f824..921ec2206a 100644
--- a/lib/ssh/vsn.mk
+++ b/lib/ssh/vsn.mk
@@ -1,5 +1,5 @@
#-*-makefile-*- ; force emacs to enter makefile-mode
-SSH_VSN = 2.1.1
+SSH_VSN = 2.1.2
APP_VSN = "ssh-$(SSH_VSN)"
diff --git a/lib/ssl/src/ssl.appup.src b/lib/ssl/src/ssl.appup.src
index 76550fa04b..c118c129e8 100644
--- a/lib/ssl/src/ssl.appup.src
+++ b/lib/ssl/src/ssl.appup.src
@@ -1,14 +1,21 @@
%% -*- erlang -*-
{"%VSN%",
[
- {"5.0.1", [{restart_application, ssl}]},
- {"5.0", [{restart_application, ssl}]},
+ {"5.1", [
+ {load_module, ssl_connection, soft_purge, soft_purge, []}
+ ]
+ },
+ {<<"5.0\\*">>, [{restart_application, ssl}]},
{<<"4\\.*">>, [{restart_application, ssl}]},
{<<"3\\.*">>, [{restart_application, ssl}]}
],
[
- {"5.0.1", [{restart_application, ssl}]},
- {"5.0", [{restart_application, ssl}]},
+ {"5.1", [
+ {load_module, ssl_connection, soft_purge, soft_purge, []}
+ ]
+ },
+ {"5.1", [{restart_application, ssl}]},
+ {<<"5.0\\*">>, [{restart_application, ssl}]},
{<<"4\\.*">>, [{restart_application, ssl}]},
{<<"3\\.*">>, [{restart_application, ssl}]}
]}.
diff --git a/lib/ssl/src/ssl.erl b/lib/ssl/src/ssl.erl
index 7788f758ac..771bfa5739 100644
--- a/lib/ssl/src/ssl.erl
+++ b/lib/ssl/src/ssl.erl
@@ -869,10 +869,10 @@ internal_inet_values() ->
socket_options(InetValues) ->
#socket_options{
- mode = proplists:get_value(mode, InetValues),
- header = proplists:get_value(header, InetValues),
- active = proplists:get_value(active, InetValues),
- packet = proplists:get_value(packet, InetValues),
+ mode = proplists:get_value(mode, InetValues, lists),
+ header = proplists:get_value(header, InetValues, 0),
+ active = proplists:get_value(active, InetValues, active),
+ packet = proplists:get_value(packet, InetValues, 0),
packet_size = proplists:get_value(packet_size, InetValues)
}.
diff --git a/lib/ssl/src/ssl_connection.erl b/lib/ssl/src/ssl_connection.erl
index 1319b54d6b..407ec14b0a 100644
--- a/lib/ssl/src/ssl_connection.erl
+++ b/lib/ssl/src/ssl_connection.erl
@@ -119,7 +119,7 @@ send(Pid, Data) ->
sync_send_all_state_event(Pid, {application_data,
%% iolist_to_binary should really
%% be called iodata_to_binary()
- erlang:iolist_to_binary(Data)}, infinity).
+ erlang:iolist_to_binary(Data)}).
%%--------------------------------------------------------------------
-spec recv(pid(), integer(), timeout()) ->
@@ -128,7 +128,7 @@ send(Pid, Data) ->
%% Description: Receives data when active = false
%%--------------------------------------------------------------------
recv(Pid, Length, Timeout) ->
- sync_send_all_state_event(Pid, {recv, Length}, Timeout).
+ sync_send_all_state_event(Pid, {recv, Length, Timeout}).
%%--------------------------------------------------------------------
-spec connect(host(), inet:port_number(), port(), {#ssl_options{}, #socket_options{}},
pid(), tuple(), timeout()) ->
@@ -165,7 +165,7 @@ ssl_accept(Port, Socket, Opts, User, CbInfo, Timeout) ->
%% Description: Starts ssl handshake.
%%--------------------------------------------------------------------
handshake(#sslsocket{pid = Pid}, Timeout) ->
- case sync_send_all_state_event(Pid, start, Timeout) of
+ case sync_send_all_state_event(Pid, {start, Timeout}) of
connected ->
ok;
Error ->
@@ -331,15 +331,15 @@ init([Role, Host, Port, Socket, {SSLOpts0, _} = Options, User, CbInfo]) ->
#state{}) -> gen_fsm_state_return().
%%--------------------------------------------------------------------
hello(start, #state{host = Host, port = Port, role = client,
- ssl_options = SslOpts,
- session = #session{own_certificate = Cert} = Session0,
- session_cache = Cache, session_cache_cb = CacheCb,
- transport_cb = Transport, socket = Socket,
- connection_states = ConnectionStates0,
- renegotiation = {Renegotiation, _}} = State0) ->
+ ssl_options = SslOpts,
+ session = #session{own_certificate = Cert} = Session0,
+ session_cache = Cache, session_cache_cb = CacheCb,
+ transport_cb = Transport, socket = Socket,
+ connection_states = ConnectionStates0,
+ renegotiation = {Renegotiation, _}} = State0) ->
Hello = ssl_handshake:client_hello(Host, Port, ConnectionStates0, SslOpts,
Cache, CacheCb, Renegotiation, Cert),
-
+
Version = Hello#client_hello.client_version,
Handshake0 = ssl_handshake:init_handshake_history(),
{BinMsg, ConnectionStates, Handshake} =
@@ -370,23 +370,22 @@ hello(#server_hello{cipher_suite = CipherSuite,
renegotiation = {Renegotiation, _},
ssl_options = SslOptions} = State0) ->
case ssl_handshake:hello(Hello, SslOptions, ConnectionStates0, Renegotiation) of
- #alert{} = Alert ->
- handle_own_alert(Alert, ReqVersion, hello, State0),
- {stop, normal, State0};
-
+ #alert{} = Alert ->
+ handle_own_alert(Alert, ReqVersion, hello, State0),
+ {stop, {shutdown, own_alert}, State0};
{Version, NewId, ConnectionStates, NextProtocol} ->
{KeyAlgorithm, _, _, _} =
ssl_cipher:suite_definition(CipherSuite),
PremasterSecret = make_premaster_secret(ReqVersion, KeyAlgorithm),
-
- NewNextProtocol = case NextProtocol of
- undefined ->
- State0#state.next_protocol;
- _ ->
- NextProtocol
- end,
-
+
+ NewNextProtocol = case NextProtocol of
+ undefined ->
+ State0#state.next_protocol;
+ _ ->
+ NextProtocol
+ end,
+
State = State0#state{key_algorithm = KeyAlgorithm,
hashsign_algorithm = default_hashsign(Version, KeyAlgorithm),
negotiated_version = Version,
@@ -394,13 +393,13 @@ hello(#server_hello{cipher_suite = CipherSuite,
premaster_secret = PremasterSecret,
expecting_next_protocol_negotiation = NextProtocol =/= undefined,
next_protocol = NewNextProtocol},
-
+
case ssl_session:is_new(OldId, NewId) of
true ->
handle_new_session(NewId, CipherSuite, Compression,
State#state{connection_states = ConnectionStates});
false ->
- handle_resumed_session(NewId, State#state{connection_states = ConnectionStates})
+ handle_resumed_session(NewId, State#state{connection_states = ConnectionStates})
end
end;
@@ -419,8 +418,7 @@ hello(Hello = #client_hello{client_version = ClientVersion},
negotiated_version = Version,
session = Session});
#alert{} = Alert ->
- handle_own_alert(Alert, ClientVersion, hello, State),
- {stop, normal, State}
+ handle_own_alert(Alert, ClientVersion, hello, State)
end;
hello(timeout, State) ->
@@ -451,8 +449,7 @@ abbreviated(#finished{verify_data = Data} = Finished,
next_state_connection(abbreviated,
ack_connection(State#state{connection_states = ConnectionStates}));
#alert{} = Alert ->
- handle_own_alert(Alert, Version, abbreviated, State),
- {stop, normal, State}
+ handle_own_alert(Alert, Version, abbreviated, State)
end;
abbreviated(#finished{verify_data = Data} = Finished,
@@ -472,8 +469,7 @@ abbreviated(#finished{verify_data = Data} = Finished,
connection_states =
ConnectionStates}));
#alert{} = Alert ->
- handle_own_alert(Alert, Version, abbreviated, State),
- {stop, normal, State}
+ handle_own_alert(Alert, Version, abbreviated, State)
end;
abbreviated(timeout, State) ->
@@ -497,8 +493,7 @@ certify(#certificate{asn1_certificates = []},
fail_if_no_peer_cert = true}} =
State) ->
Alert = ?ALERT_REC(?FATAL,?HANDSHAKE_FAILURE),
- handle_own_alert(Alert, Version, certify, State),
- {stop, normal, State};
+ handle_own_alert(Alert, Version, certify, State);
certify(#certificate{asn1_certificates = []},
#state{role = server,
@@ -521,8 +516,7 @@ certify(#certificate{} = Cert,
handle_peer_cert(PeerCert, PublicKeyInfo,
State#state{client_certificate_requested = false});
#alert{} = Alert ->
- handle_own_alert(Alert, Version, certify, State),
- {stop, normal, State}
+ handle_own_alert(Alert, Version, certify, State)
end;
certify(#server_key_exchange{} = KeyExchangeMsg,
@@ -534,8 +528,7 @@ certify(#server_key_exchange{} = KeyExchangeMsg,
{Record, State} = next_record(State1),
next_state(certify, certify, Record, State);
#alert{} = Alert ->
- handle_own_alert(Alert, Version, certify, State0),
- {stop, normal, State0}
+ handle_own_alert(Alert, Version, certify, State0)
end;
certify(#server_key_exchange{} = Msg,
@@ -559,8 +552,7 @@ certify(#server_hello_done{},
State = State0#state{connection_states = ConnectionStates},
client_certify_and_key_exchange(State);
#alert{} = Alert ->
- handle_own_alert(Alert, Version, certify, State0),
- {stop, normal, State0}
+ handle_own_alert(Alert, Version, certify, State0)
end;
%% Master secret is calculated from premaster_secret
@@ -578,8 +570,7 @@ certify(#server_hello_done{},
session = Session},
client_certify_and_key_exchange(State);
#alert{} = Alert ->
- handle_own_alert(Alert, Version, certify, State0),
- {stop, normal, State0}
+ handle_own_alert(Alert, Version, certify, State0)
end;
certify(#client_key_exchange{} = Msg,
@@ -595,8 +586,7 @@ certify(#client_key_exchange{exchange_keys = Keys},
certify_client_key_exchange(ssl_handshake:decode_client_key(Keys, KeyAlg, Version), State)
catch
#alert{} = Alert ->
- handle_own_alert(Alert, Version, certify, State),
- {stop, normal, State}
+ handle_own_alert(Alert, Version, certify, State)
end;
@@ -621,8 +611,7 @@ certify_client_key_exchange(#encrypted_premaster_secret{premaster_secret= EncPMS
{Record, State} = next_record(State1),
next_state(certify, cipher, Record, State);
#alert{} = Alert ->
- handle_own_alert(Alert, Version, certify, State0),
- {stop, normal, State0}
+ handle_own_alert(Alert, Version, certify, State0)
end;
certify_client_key_exchange(#client_diffie_hellman_public{dh_public = ClientPublicDhKey},
@@ -635,8 +624,7 @@ certify_client_key_exchange(#client_diffie_hellman_public{dh_public = ClientPubl
{Record, State} = next_record(State1),
next_state(certify, cipher, Record, State);
#alert{} = Alert ->
- handle_own_alert(Alert, Version, certify, State0),
- {stop, normal, State0}
+ handle_own_alert(Alert, Version, certify, State0)
end.
%%--------------------------------------------------------------------
@@ -665,8 +653,7 @@ cipher(#certificate_verify{signature = Signature, hashsign_algorithm = CertHashS
{Record, State} = next_record(State0),
next_state(cipher, cipher, Record, State);
#alert{} = Alert ->
- handle_own_alert(Alert, Version, cipher, State0),
- {stop, normal, State0}
+ handle_own_alert(Alert, Version, cipher, State0)
end;
% client must send a next protocol message if we are expecting it
@@ -692,8 +679,7 @@ cipher(#finished{verify_data = Data} = Finished,
Session = register_session(Role, Host, Port, Session0),
cipher_role(Role, Data, Session, State);
#alert{} = Alert ->
- handle_own_alert(Alert, Version, cipher, State),
- {stop, normal, State}
+ handle_own_alert(Alert, Version, cipher, State)
end;
% only allowed to send next_protocol message after change cipher spec
@@ -788,7 +774,8 @@ handle_sync_event({application_data, Data}, From, StateName,
State#state{send_queue = queue:in({From, Data}, Queue)},
get_timeout(State)};
-handle_sync_event(start, StartFrom, hello, State) ->
+handle_sync_event({start, Timeout}, StartFrom, hello, State) ->
+ start_or_recv_cancel_timer(Timeout, StartFrom),
hello(start, State#state{start_or_recv_from = StartFrom});
%% The two clauses below could happen if a server upgrades a socket in
@@ -798,12 +785,14 @@ handle_sync_event(start, StartFrom, hello, State) ->
%% mode before telling the client that it is willing to upgrade
%% and before calling ssl:ssl_accept/2. These clauses are
%% here to make sure it is the users problem and not owers if
-%% they upgrade a active socket.
-handle_sync_event(start, _, connection, State) ->
+%% they upgrade an active socket.
+handle_sync_event({start,_}, _, connection, State) ->
{reply, connected, connection, State, get_timeout(State)};
-handle_sync_event(start, _From, error, {Error, State = #state{}}) ->
+handle_sync_event({start,_}, _From, error, {Error, State = #state{}}) ->
{stop, {shutdown, Error}, {error, Error}, State};
-handle_sync_event(start, StartFrom, StateName, State) ->
+
+handle_sync_event({start, Timeout}, StartFrom, StateName, State) ->
+ start_or_recv_cancel_timer(Timeout, StartFrom),
{next_state, StateName, State#state{start_or_recv_from = StartFrom}, get_timeout(State)};
handle_sync_event(close, _, StateName, State) ->
@@ -835,12 +824,14 @@ handle_sync_event({shutdown, How0}, _, StateName,
{stop, normal, Error, State}
end;
-handle_sync_event({recv, N}, RecvFrom, connection = StateName, State0) ->
+handle_sync_event({recv, N, Timeout}, RecvFrom, connection = StateName, State0) ->
+ start_or_recv_cancel_timer(Timeout, RecvFrom),
passive_receive(State0#state{bytes_to_read = N, start_or_recv_from = RecvFrom}, StateName);
%% Doing renegotiate wait with handling request until renegotiate is
%% finished. Will be handled by next_state_is_connection/2.
-handle_sync_event({recv, N}, RecvFrom, StateName, State) ->
+handle_sync_event({recv, N, Timeout}, RecvFrom, StateName, State) ->
+ start_or_recv_cancel_timer(Timeout, RecvFrom),
{next_state, StateName, State#state{bytes_to_read = N, start_or_recv_from = RecvFrom},
get_timeout(State)};
@@ -963,7 +954,7 @@ handle_info({Protocol, _, Data}, StateName,
next_state(StateName, StateName, Record, State);
#alert{} = Alert ->
handle_normal_shutdown(Alert, StateName, State0),
- {stop, normal, State0}
+ {stop, {shutdown, own_alert}, State0}
end;
handle_info({CloseTag, Socket}, StateName,
@@ -984,7 +975,7 @@ handle_info({CloseTag, Socket}, StateName,
ok
end,
handle_normal_shutdown(?ALERT_REC(?FATAL, ?CLOSE_NOTIFY), StateName, State),
- {stop, normal, State};
+ {stop, {shutdown, transport_closed}, State};
handle_info({ErrorTag, Socket, econnaborted}, StateName,
#state{socket = Socket, start_or_recv_from = StartFrom, role = Role,
@@ -1005,7 +996,14 @@ handle_info({'DOWN', MonitorRef, _, _, _}, _,
handle_info(allow_renegotiate, StateName, State) ->
{next_state, StateName, State#state{allow_renegotiate = true}, get_timeout(State)};
-
+
+handle_info({cancel_start_or_recv, RecvFrom}, connection = StateName, #state{start_or_recv_from = RecvFrom} = State) ->
+ gen_fsm:reply(RecvFrom, {error, timeout}),
+ {next_state, StateName, State#state{start_or_recv_from = undefined}, get_timeout(State)};
+
+handle_info({cancel_start_or_recv, _RecvFrom}, StateName, State) ->
+ {next_state, StateName, State, get_timeout(State)};
+
handle_info(Msg, StateName, State) ->
Report = io_lib:format("SSL: Got unexpected info: ~p ~n", [Msg]),
error_logger:info_report(Report),
@@ -1022,6 +1020,20 @@ terminate(_, _, #state{terminated = true}) ->
%% we want to guarantee that Transport:close has been called
%% when ssl:close/1 returns.
ok;
+
+terminate({shutdown, transport_closed}, StateName, #state{send_queue = SendQueue,
+ renegotiation = Renegotiate} = State) ->
+ handle_unrecv_data(StateName, State),
+ handle_trusted_certs_db(State),
+ notify_senders(SendQueue),
+ notify_renegotiater(Renegotiate);
+
+terminate({shutdown, own_alert}, _StateName, #state{send_queue = SendQueue,
+ renegotiation = Renegotiate} = State) ->
+ handle_trusted_certs_db(State),
+ notify_senders(SendQueue),
+ notify_renegotiater(Renegotiate);
+
terminate(Reason, connection, #state{negotiated_version = Version,
connection_states = ConnectionStates,
transport_cb = Transport,
@@ -1032,16 +1044,14 @@ terminate(Reason, connection, #state{negotiated_version = Version,
notify_renegotiater(Renegotiate),
BinAlert = terminate_alert(Reason, Version, ConnectionStates),
Transport:send(Socket, BinAlert),
- workaround_transport_delivery_problems(Socket, Transport, Reason),
- Transport:close(Socket);
+ workaround_transport_delivery_problems(Socket, Transport);
-terminate(Reason, _StateName, #state{transport_cb = Transport,
+terminate(_Reason, _StateName, #state{transport_cb = Transport,
socket = Socket, send_queue = SendQueue,
renegotiation = Renegotiate} = State) ->
handle_trusted_certs_db(State),
notify_senders(SendQueue),
notify_renegotiater(Renegotiate),
- workaround_transport_delivery_problems(Socket, Transport, Reason),
Transport:close(Socket).
%%--------------------------------------------------------------------
@@ -1216,18 +1226,13 @@ init_diffie_hellman(DbHandle,_, DHParamFile, server) ->
end.
sync_send_all_state_event(FsmPid, Event) ->
- sync_send_all_state_event(FsmPid, Event, infinity).
-
-sync_send_all_state_event(FsmPid, Event, Timeout) ->
- try gen_fsm:sync_send_all_state_event(FsmPid, Event, Timeout)
+ try gen_fsm:sync_send_all_state_event(FsmPid, Event, infinity)
catch
exit:{noproc, _} ->
{error, closed};
- exit:{timeout, _} ->
- {error, timeout};
exit:{normal, _} ->
{error, closed};
- exit:{shutdown, _} ->
+ exit:{{shutdown, _},_} ->
{error, closed}
end.
@@ -1324,8 +1329,7 @@ new_server_hello(#server_hello{cipher_suite = CipherSuite,
next_state(hello, certify, Record, State)
catch
#alert{} = Alert ->
- handle_own_alert(Alert, Version, hello, State0),
- {stop, normal, State0}
+ handle_own_alert(Alert, Version, hello, State0)
end.
resumed_server_hello(#state{session = Session,
@@ -1345,8 +1349,7 @@ resumed_server_hello(#state{session = Session,
{Record, State} = next_record(State2),
next_state(hello, abbreviated, Record, State);
#alert{} = Alert ->
- handle_own_alert(Alert, Version, hello, State0),
- {stop, normal, State0}
+ handle_own_alert(Alert, Version, hello, State0)
end.
handle_new_session(NewId, CipherSuite, Compression, #state{session = Session0} = State0) ->
@@ -1371,8 +1374,7 @@ handle_resumed_session(SessId, #state{connection_states = ConnectionStates0,
session = Session}),
next_state(hello, abbreviated, Record, State);
#alert{} = Alert ->
- handle_own_alert(Alert, Version, hello, State0),
- {stop, normal, State0}
+ handle_own_alert(Alert, Version, hello, State0)
end.
@@ -1389,8 +1391,7 @@ client_certify_and_key_exchange(#state{negotiated_version = Version} =
next_state(certify, cipher, Record, State)
catch
throw:#alert{} = Alert ->
- handle_own_alert(Alert, Version, certify, State0),
- {stop, normal, State0}
+ handle_own_alert(Alert, Version, certify, State0)
end.
do_client_certify_and_key_exchange(State0) ->
@@ -1962,8 +1963,7 @@ handle_tls_handshake(Handle, StateName, #state{tls_packets = [Packet | Packets]}
end.
next_state(Current,_, #alert{} = Alert, #state{negotiated_version = Version} = State) ->
- handle_own_alert(Alert, Version, Current, State),
- {stop, normal, State};
+ handle_own_alert(Alert, Version, Current, State);
next_state(_,Next, no_record, State) ->
{next_state, Next, State, get_timeout(State)};
@@ -2001,8 +2001,7 @@ next_state(Current, Next, #ssl_tls{type = ?HANDSHAKE, fragment = Data},
State = State0#state{tls_packets = Packets, tls_handshake_buffer = Buf},
handle_tls_handshake(Handle, Next, State)
catch throw:#alert{} = Alert ->
- handle_own_alert(Alert, Version, Current, State0),
- {stop, normal, State0}
+ handle_own_alert(Alert, Version, Current, State0)
end;
next_state(_, StateName, #ssl_tls{type = ?APPLICATION_DATA, fragment = Data}, State0) ->
@@ -2258,13 +2257,13 @@ handle_alert(#alert{level = ?FATAL} = Alert, StateName,
handle_alert(#alert{level = ?WARNING, description = ?CLOSE_NOTIFY} = Alert,
StateName, State) ->
handle_normal_shutdown(Alert, StateName, State),
- {stop, normal, State};
+ {stop, {shutdown, peer_close}, State};
handle_alert(#alert{level = ?WARNING, description = ?NO_RENEGOTIATION} = Alert, StateName,
#state{log_alert = Log, renegotiation = {true, internal}} = State) ->
log_alert(Log, StateName, Alert),
handle_normal_shutdown(Alert, StateName, State),
- {stop, normal, State};
+ {stop, {shutdown, peer_close}, State};
handle_alert(#alert{level = ?WARNING, description = ?NO_RENEGOTIATION} = Alert, StateName,
#state{log_alert = Log, renegotiation = {true, From}} = State0) ->
@@ -2317,8 +2316,8 @@ handle_own_alert(Alert, Version, StateName,
try %% Try to tell the other side
{BinMsg, _} =
encode_alert(Alert, Version, ConnectionStates),
- linux_workaround_transport_delivery_problems(Alert, Socket),
- Transport:send(Socket, BinMsg)
+ Transport:send(Socket, BinMsg),
+ workaround_transport_delivery_problems(Socket, Transport)
catch _:_ -> %% Can crash if we are in a uninitialized state
ignore
end,
@@ -2327,7 +2326,8 @@ handle_own_alert(Alert, Version, StateName,
handle_normal_shutdown(Alert,StateName, State)
catch _:_ ->
ok
- end.
+ end,
+ {stop, {shutdown, own_alert}, State}.
handle_normal_shutdown(Alert, _, #state{socket = Socket,
start_or_recv_from = StartFrom,
@@ -2342,8 +2342,7 @@ handle_normal_shutdown(Alert, StateName, #state{socket = Socket,
handle_unexpected_message(Msg, Info, #state{negotiated_version = Version} = State) ->
Alert = ?ALERT_REC(?FATAL,?UNEXPECTED_MESSAGE),
- handle_own_alert(Alert, Version, {Info, Msg}, State),
- {stop, normal, State}.
+ handle_own_alert(Alert, Version, {Info, Msg}, State).
make_premaster_secret({MajVer, MinVer}, rsa) ->
Rand = ssl:random_bytes(?NUM_OF_PREMASTERSECRET_BYTES-2),
@@ -2401,36 +2400,35 @@ notify_renegotiater({true, From}) when not is_atom(From) ->
notify_renegotiater(_) ->
ok.
-terminate_alert(Reason, Version, ConnectionStates) when Reason == normal; Reason == shutdown;
+terminate_alert(Reason, Version, ConnectionStates) when Reason == normal;
Reason == user_close ->
{BinAlert, _} = encode_alert(?ALERT_REC(?WARNING, ?CLOSE_NOTIFY),
Version, ConnectionStates),
BinAlert;
+terminate_alert({shutdown, _}, Version, ConnectionStates) ->
+ {BinAlert, _} = encode_alert(?ALERT_REC(?WARNING, ?CLOSE_NOTIFY),
+ Version, ConnectionStates),
+ BinAlert;
+
terminate_alert(_, Version, ConnectionStates) ->
{BinAlert, _} = encode_alert(?ALERT_REC(?FATAL, ?INTERNAL_ERROR),
Version, ConnectionStates),
BinAlert.
-workaround_transport_delivery_problems(_,_, user_close) ->
- ok;
-workaround_transport_delivery_problems(Socket, Transport, _) ->
+workaround_transport_delivery_problems(Socket, gen_tcp = Transport) ->
%% Standard trick to try to make sure all
- %% data sent to to tcp port is really sent
- %% before tcp port is closed so that the peer will
- %% get a correct error message.
+ %% data sent to the tcp port is really delivered to the
+ %% peer application before tcp port is closed so that the peer will
+ %% get the correct TLS alert message and not only a transport close.
inet:setopts(Socket, [{active, false}]),
Transport:shutdown(Socket, write),
- Transport:recv(Socket, 0).
-
-linux_workaround_transport_delivery_problems(#alert{level = ?FATAL}, Socket) ->
- case os:type() of
- {unix, linux} ->
- inet:setopts(Socket, [{nodelay, true}]);
- _ ->
- ok
- end;
-linux_workaround_transport_delivery_problems(_, _) ->
- ok.
+ %% Will return when other side has closed or after 30 s
+ %% e.g. we do not want to hang if something goes wrong
+ %% with the network but we want to maximise the odds that
+ %% peer application gets all data sent on the tcp connection.
+ Transport:recv(Socket, 0, 30000);
+workaround_transport_delivery_problems(Socket, Transport) ->
+ Transport:close(Socket).
get_timeout(#state{ssl_options=#ssl_options{hibernate_after = undefined}}) ->
infinity;
@@ -2503,3 +2501,26 @@ default_hashsign(_Version, KeyExchange)
default_hashsign(_Version, KeyExchange)
when KeyExchange == dh_anon ->
{null, anon}.
+
+start_or_recv_cancel_timer(infinity, _RecvFrom) ->
+ ok;
+start_or_recv_cancel_timer(Timeout, RecvFrom) ->
+ erlang:send_after(Timeout, self(), {cancel_start_or_recv, RecvFrom}).
+
+handle_unrecv_data(StateName, #state{socket = Socket, transport_cb = Transport} = State) ->
+ inet:setopts(Socket, [{active, false}]),
+ case Transport:recv(Socket, 0, 0) of
+ {error, closed} ->
+ ok;
+ {ok, Data} ->
+ handle_close_alert(Data, StateName, State)
+ end.
+
+handle_close_alert(Data, StateName, State) ->
+ case next_tls_record(Data, State) of
+ #ssl_tls{type = ?ALERT, fragment = EncAlerts} ->
+ [Alert|_] = decode_alerts(EncAlerts),
+ handle_normal_shutdown(Alert, StateName, State);
+ _ ->
+ ok
+ end.
diff --git a/lib/ssl/vsn.mk b/lib/ssl/vsn.mk
index e381b73c27..bc8b8fd039 100644
--- a/lib/ssl/vsn.mk
+++ b/lib/ssl/vsn.mk
@@ -1 +1 @@
-SSL_VSN = 5.1
+SSL_VSN = 5.1.1
diff --git a/lib/stdlib/test/dict_SUITE.erl b/lib/stdlib/test/dict_SUITE.erl
index c46fc47b34..df9c769c67 100644
--- a/lib/stdlib/test/dict_SUITE.erl
+++ b/lib/stdlib/test/dict_SUITE.erl
@@ -53,7 +53,7 @@ end_per_group(_GroupName, Config) ->
init_per_testcase(_Case, Config) ->
- ?line Dog = ?t:timetrap(?t:minutes(5)),
+ Dog = ?t:timetrap(?t:minutes(5)),
[{watchdog,Dog}|Config].
end_per_testcase(_Case, Config) ->
@@ -65,22 +65,22 @@ create(Config) when is_list(Config) ->
test_all(fun create_1/1).
create_1(M) ->
- ?line D0 = M:empty(),
- ?line [] = M:to_list(D0),
- ?line 0 = M:size(D0),
+ D0 = M(empty, []),
+ [] = M(to_list, D0),
+ 0 = M(size, D0),
D0.
store(Config) when is_list(Config) ->
test_all([{0,132},{253,258},{510,514}], fun store_1/2).
store_1(List, M) ->
- ?line D0 = M:from_list(List),
+ D0 = M(from_list, List),
%% Make sure that we get the same result by inserting
%% elements one at the time.
- ?line D1 = foldl(fun({K,V}, Dict) -> M:enter(K, V, Dict) end,
- M:empty(), List),
- ?line true = M:equal(D0, D1),
+ D1 = foldl(fun({K,V}, Dict) -> M(enter, {K,V,Dict}) end,
+ M(empty, []), List),
+ true = M(equal, {D0,D1}),
D0.
%%%
@@ -98,7 +98,7 @@ dict_mods() ->
[Orddict,Dict,Gb].
test_all(Tester) ->
- ?line Pids = [spawn_tester(M, Tester) || M <- dict_mods()],
+ Pids = [spawn_tester(M, Tester) || M <- dict_mods()],
collect_all(Pids, []).
spawn_tester(M, Tester) ->
@@ -106,7 +106,7 @@ spawn_tester(M, Tester) ->
spawn_link(fun() ->
random:seed(1, 2, 42),
S = Tester(M),
- Res = {M:size(S),lists:sort(M:to_list(S))},
+ Res = {M(size, S),lists:sort(M(to_list, S))},
Parent ! {result,self(),Res}
end).
diff --git a/lib/stdlib/test/dict_test_lib.erl b/lib/stdlib/test/dict_test_lib.erl
index 92a75dad89..7167014310 100644
--- a/lib/stdlib/test/dict_test_lib.erl
+++ b/lib/stdlib/test/dict_test_lib.erl
@@ -17,67 +17,48 @@
%% %CopyrightEnd%
%%
--module(dict_test_lib, [Mod,Equal]).
+-module(dict_test_lib).
--export([module/0,equal/2,empty/0,size/1,to_list/1,from_list/1,
- enter/3,delete/2,lookup/2]).
+-export([new/2]).
-module() ->
- Mod.
-
-equal(X, Y) ->
- Equal(X, Y).
+new(Mod, Eq) ->
+ fun (enter, {K,V,D}) -> enter(Mod, K, V, D);
+ (empty, []) -> empty(Mod);
+ (equal, {D1,D2}) -> Eq(D1, D2);
+ (from_list, L) -> from_list(Mod, L);
+ (module, []) -> Mod;
+ (size, D) -> Mod:size(D);
+ (to_list, D) -> to_list(Mod, D)
+ end.
-empty() ->
+empty(Mod) ->
case erlang:function_exported(Mod, new, 0) of
false -> Mod:empty();
true -> Mod:new()
end.
-size(S) ->
- Mod:size(S).
-
-to_list(S) ->
- Mod:to_list(S).
+to_list(Mod, D) ->
+ Mod:to_list(D).
-from_list(S) ->
+from_list(Mod, L) ->
case erlang:function_exported(Mod, from_orddict, 1) of
false ->
- Mod:from_list(S);
+ Mod:from_list(L);
true ->
%% The gb_trees module has no from_list/1 function.
%%
%% The keys in S are not unique. To make sure
%% that we pick the same key/value pairs as
%% dict/orddict, first convert the list to an orddict.
- Orddict = orddict:from_list(S),
+ Orddict = orddict:from_list(L),
Mod:from_orddict(Orddict)
end.
%% Store new value into dictionary or update previous value in dictionary.
-enter(Key, Val, Dict) ->
+enter(Mod, Key, Val, Dict) ->
case erlang:function_exported(Mod, store, 3) of
false ->
Mod:enter(Key, Val, Dict);
true ->
Mod:store(Key, Val, Dict)
end.
-
-%% Delete an EXISTING key.
-delete(Key, Dict) ->
- case erlang:function_exported(Mod, delete, 2) of
- true -> Mod:delete(Key, Dict);
- false -> Mod:erase(Key, Dict)
- end.
-
-%% -> none | {value,Value}
-lookup(Key, Dict) ->
- case erlang:function_exported(Mod, lookup, 2) of
- false ->
- case Mod:find(Key, Dict) of
- error -> none;
- {ok,Value} -> {value,Value}
- end;
- true ->
- Mod:lookup(Key, Dict)
- end.
diff --git a/lib/stdlib/test/epp_SUITE.erl b/lib/stdlib/test/epp_SUITE.erl
index f79414db49..77c615d6d9 100644
--- a/lib/stdlib/test/epp_SUITE.erl
+++ b/lib/stdlib/test/epp_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -1236,6 +1236,13 @@ otp_8911(doc) ->
otp_8911(suite) ->
[];
otp_8911(Config) when is_list(Config) ->
+ case test_server:is_cover() of
+ true ->
+ {skip, "Testing cover, so can not run when cover is already running"};
+ false ->
+ do_otp_8911(Config)
+ end.
+do_otp_8911(Config) ->
?line {ok, CWD} = file:get_cwd(),
?line ok = file:set_cwd(?config(priv_dir, Config)),
diff --git a/lib/stdlib/test/io_proto_SUITE.erl b/lib/stdlib/test/io_proto_SUITE.erl
index 661d57c85b..17e69f7c1c 100644
--- a/lib/stdlib/test/io_proto_SUITE.erl
+++ b/lib/stdlib/test/io_proto_SUITE.erl
@@ -96,7 +96,8 @@ groups() ->
[].
init_per_suite(Config) ->
- Config.
+ DefShell = get_default_shell(),
+ [{default_shell,DefShell}|Config].
end_per_suite(_Config) ->
ok.
@@ -124,20 +125,25 @@ unicode_prompt(doc) ->
["Test that an Unicode prompt does not crash the shell"];
unicode_prompt(Config) when is_list(Config) ->
?line PA = filename:dirname(code:which(?MODULE)),
- ?line rtnode([{putline,""},
- {putline, "2."},
- {getline, "2"},
- {putline, "shell:prompt_func({io_proto_SUITE,uprompt})."},
- {getline, "default"},
- {putline, "io:get_line('')."},
- {putline, "hej"},
- {getline, "\"hej\\n\""},
- {putline, "io:setopts([{binary,true}])."},
- {getline, "ok"},
- {putline, "io:get_line('')."},
- {putline, "hej"},
- {getline, "<<\"hej\\n\">>"}
- ],[],[],"-pa \""++ PA++"\""),
+ case proplists:get_value(default_shell,Config) of
+ old ->
+ ok;
+ new ->
+ ?line rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline, "shell:prompt_func({io_proto_SUITE,uprompt})."},
+ {getline, "default"},
+ {putline, "io:get_line('')."},
+ {putline, "hej"},
+ {getline, "\"hej\\n\""},
+ {putline, "io:setopts([{binary,true}])."},
+ {getline, "ok"},
+ {putline, "io:get_line('')."},
+ {putline, "hej"},
+ {getline, "<<\"hej\\n\">>"}
+ ],[],[],"-pa \""++ PA++"\"")
+ end,
%% And one with oldshell
?line rtnode([{putline,""},
{putline, "2."},
@@ -234,21 +240,26 @@ setopts_getopts(Config) when is_list(Config) ->
lists:sort(io:getopts(RFile)),
?line eof = io:get_line(RFile,''),
?line file:close(RFile),
- %% So, lets test another node with new interactive shell
- ?line rtnode([{putline,""},
- {putline, "2."},
- {getline, "2"},
- {putline, "lists:keyfind(binary,1,io:getopts())."},
- {getline, "{binary,false}"},
- {putline, "io:get_line('')."},
- {putline, "hej"},
- {getline, "\"hej\\n\""},
- {putline, "io:setopts([{binary,true}])."},
- {getline, "ok"},
- {putline, "io:get_line('')."},
- {putline, "hej"},
- {getline, "<<\"hej\\n\">>"}
- ],[]),
+ case proplists:get_value(default_shell,Config) of
+ old ->
+ ok;
+ new ->
+ %% So, lets test another node with new interactive shell
+ ?line rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline, "lists:keyfind(binary,1,io:getopts())."},
+ {getline, "{binary,false}"},
+ {putline, "io:get_line('')."},
+ {putline, "hej"},
+ {getline, "\"hej\\n\""},
+ {putline, "io:setopts([{binary,true}])."},
+ {getline, "ok"},
+ {putline, "io:get_line('')."},
+ {putline, "hej"},
+ {getline, "<<\"hej\\n\">>"}
+ ],[])
+ end,
%% And one with oldshell
?line rtnode([{putline,""},
{putline, "2."},
@@ -433,21 +444,27 @@ unicode_options(Config) when is_list(Config) ->
end,
?line [ ok = CannotWriteFile(F,FailDir) || F <- AllNoBom ],
- %% OK, time for the group_leaders...
- ?line rtnode([{putline,""},
- {putline, "2."},
- {getline, "2"},
- {putline, "lists:keyfind(encoding,1,io:getopts())."},
- {getline, "{encoding,latin1}"},
- {putline, "io:format(\"~ts~n\",[[1024]])."},
- {getline, "\\x{400}"},
- {putline, "io:setopts([unicode])."},
- {getline, "ok"},
- {putline, "io:format(\"~ts~n\",[[1024]])."},
- {getline,
- binary_to_list(unicode:characters_to_binary(
- [1024],unicode,utf8))}
- ],[],"LC_CTYPE=\""++get_lc_ctype()++"\"; export LC_CTYPE; "),
+ case proplists:get_value(default_shell,Config) of
+ old ->
+ ok;
+ new ->
+ %% OK, time for the group_leaders...
+ ?line rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline, "lists:keyfind(encoding,1,io:getopts())."},
+ {getline, "{encoding,latin1}"},
+ {putline, "io:format(\"~ts~n\",[[1024]])."},
+ {getline, "\\x{400}"},
+ {putline, "io:setopts([unicode])."},
+ {getline, "ok"},
+ {putline, "io:format(\"~ts~n\",[[1024]])."},
+ {getline,
+ binary_to_list(unicode:characters_to_binary(
+ [1024],unicode,utf8))}
+ ],[],"LC_CTYPE=\""++get_lc_ctype()++"\"; "
+ "export LC_CTYPE; ")
+ end,
?line rtnode([{putline,""},
{putline, "2."},
{getline_re, ".*2."},
@@ -680,23 +697,28 @@ binary_options(Config) when is_list(Config) ->
?line file:close(F3),
%% OK, time for the group_leaders...
%% io:format(standard_error,"Hmmm:~w~n",["<<\""++binary_to_list(<<"\345\344\366"/utf8>>)++"\\n\">>"]),
- ?line rtnode([{putline,""},
- {putline, "2."},
- {getline, "2"},
- {putline, "lists:keyfind(binary,1,io:getopts())."},
- {getline, "{binary,false}"},
- {putline, "io:get_line('')."},
- {putline, "hej"},
- {getline, "\"hej\\n\""},
- {putline, "io:setopts([{binary,true},unicode])."},
- {getline, "ok"},
- {putline, "io:get_line('')."},
- {putline, "hej"},
- {getline, "<<\"hej\\n\">>"},
- {putline, "io:get_line('')."},
- {putline, binary_to_list(<<"\345\344\366"/utf8>>)},
- {getline, "<<\""++binary_to_list(unicode:characters_to_binary(<<"\345\344\366"/utf8>>,latin1,utf8))++"\\n\">>"}
- ],[]),
+ case proplists:get_value(default_shell,Config) of
+ old ->
+ ok;
+ new ->
+ ?line rtnode([{putline,""},
+ {putline, "2."},
+ {getline, "2"},
+ {putline, "lists:keyfind(binary,1,io:getopts())."},
+ {getline, "{binary,false}"},
+ {putline, "io:get_line('')."},
+ {putline, "hej"},
+ {getline, "\"hej\\n\""},
+ {putline, "io:setopts([{binary,true},unicode])."},
+ {getline, "ok"},
+ {putline, "io:get_line('')."},
+ {putline, "hej"},
+ {getline, "<<\"hej\\n\">>"},
+ {putline, "io:get_line('')."},
+ {putline, binary_to_list(<<"\345\344\366"/utf8>>)},
+ {getline, "<<\""++binary_to_list(unicode:characters_to_binary(<<"\345\344\366"/utf8>>,latin1,utf8))++"\\n\">>"}
+ ],[])
+ end,
%% And one with oldshell
?line rtnode([{putline,""},
{putline, "2."},
@@ -1146,9 +1168,11 @@ read_modes_gl(suite) ->
read_modes_gl(doc) ->
["Test various modes when reading from the group leade from another machine"];
read_modes_gl(Config) when is_list(Config) ->
- case get_progs() of
- {error,Reason} ->
+ case {get_progs(),proplists:get_value(default_shell,Config)} of
+ {{error,Reason},_} ->
{skipped,Reason};
+ {_,old} ->
+ {skipper,"No new shell"};
_ ->
read_modes_gl_1(Config,answering_machine1)
end.
@@ -1754,6 +1778,17 @@ get_data_within(Port, Timeout, Acc) ->
timeout
end.
+get_default_shell() ->
+ try
+ rtnode([{putline,""},
+ {putline, "whereis(user_drv)."},
+ {getline, "undefined"}],[]),
+ old
+ catch E:R ->
+ ?dbg({E,R}),
+ new
+ end.
+
%%
%% Test I/O-server
%%
diff --git a/lib/stdlib/test/sets_SUITE.erl b/lib/stdlib/test/sets_SUITE.erl
index f284276bd7..e2bcdd18ce 100644
--- a/lib/stdlib/test/sets_SUITE.erl
+++ b/lib/stdlib/test/sets_SUITE.erl
@@ -35,7 +35,7 @@
-import(lists, [foldl/3,reverse/1]).
init_per_testcase(_Case, Config) ->
- ?line Dog = ?t:timetrap(?t:minutes(5)),
+ Dog = ?t:timetrap(?t:minutes(5)),
[{watchdog,Dog}|Config].
end_per_testcase(_Case, Config) ->
@@ -70,65 +70,65 @@ create(Config) when is_list(Config) ->
test_all(fun create_1/1).
create_1(M) ->
- ?line S0 = M:empty(),
- ?line [] = M:to_list(S0),
- ?line 0 = M:size(S0),
- ?line true = M:is_empty(S0),
+ S0 = M(empty, []),
+ [] = M(to_list, S0),
+ 0 = M(size, S0),
+ true = M(is_empty, S0),
E = make_ref(),
- ?line One = M:singleton(E),
- ?line 1 = M:size(One),
- ?line false = M:is_empty(One),
- [E] = M:to_list(One),
+ One = M(singleton, E),
+ 1 = M(size, One),
+ false = M(is_empty, One),
+ [E] = M(to_list, One),
S0.
add_element(Config) when is_list(Config) ->
test_all([{0,132},{253,258},{510,514}], fun add_element_1/2).
add_element_1(List, M) ->
- ?line S = M:from_list(List),
- ?line SortedSet = lists:usort(List),
- ?line SortedSet = lists:sort(M:to_list(S)),
+ S = M(from_list, List),
+ SortedSet = lists:usort(List),
+ SortedSet = lists:sort(M(to_list, S)),
%% Make sure that we get the same result by inserting
%% elements one at the time.
- ?line S2 = foldl(fun(El, Set) -> M:add_element(El, Set) end,
- M:empty(), List),
- ?line true = M:equal(S, S2),
+ S2 = foldl(fun(El, Set) -> M(add_element, {El,Set}) end,
+ M(empty, []), List),
+ true = M(equal, {S,S2}),
%% Insert elements, randomly delete inserted elements,
%% and re-inserted all deleted elements at the end.
- ?line S3 = add_element_del(List, M, M:empty(), [], []),
- ?line true = M:equal(S2, S3),
- ?line true = M:equal(S, S3),
+ S3 = add_element_del(List, M, M(empty, []), [], []),
+ true = M(equal, {S2,S3}),
+ true = M(equal, {S,S3}),
S.
add_element_del([H|T], M, S, Del, []) ->
- add_element_del(T, M, M:add_element(H, S), Del, [H]);
+ add_element_del(T, M, M(add_element, {H,S}), Del, [H]);
add_element_del([H|T], M, S0, Del, Inserted) ->
- S1 = M:add_element(H, S0),
+ S1 = M(add_element, {H,S0}),
case random:uniform(3) of
1 ->
OldEl = lists:nth(random:uniform(length(Inserted)), Inserted),
- S = M:del_element(OldEl, S1),
+ S = M(del_element, {OldEl,S1}),
add_element_del(T, M, S, [OldEl|Del], [H|Inserted]);
_ ->
add_element_del(T, M, S1, Del, [H|Inserted])
end;
add_element_del([], M, S, Del, _) ->
- M:union(S, M:from_list(Del)).
+ M(union, {S,M(from_list, Del)}).
del_element(Config) when is_list(Config) ->
test_all([{0,132},{253,258},{510,514},{1022,1026}], fun del_element_1/2).
del_element_1(List, M) ->
- ?line S0 = M:from_list(List),
- ?line Empty = foldl(fun(El, Set) -> M:del_element(El, Set) end, S0, List),
- ?line Empty = M:empty(),
- ?line M:is_empty(Empty),
- ?line S1 = foldl(fun(El, Set) ->
- M:add_element(El, Set)
- end, S0, reverse(List)),
- ?line true = M:equal(S0, S1),
+ S0 = M(from_list, List),
+ Empty = foldl(fun(El, Set) -> M(del_element, {El,Set}) end, S0, List),
+ Empty = M(empty, []),
+ true = M(is_empty, Empty),
+ S1 = foldl(fun(El, Set) ->
+ M(add_element, {El,Set})
+ end, S0, reverse(List)),
+ true = M(equal, {S0,S1}),
S1.
subtract(Config) when is_list(Config) ->
@@ -138,23 +138,23 @@ subtract(Config) when is_list(Config) ->
test_all([{2,69},{126,130},{253,258},511,512,{1023,1030}], fun subtract_1/2).
subtract_empty(M) ->
- ?line Empty = M:empty(),
- ?line true = M:is_empty(M:subtract(Empty, Empty)),
- M:subtract(Empty, Empty).
+ Empty = M(empty, []),
+ true = M(is_empty, M(subtract, {Empty,Empty})),
+ M(subtract, {Empty,Empty}).
subtract_1(List, M) ->
- ?line S0 = M:from_list(List),
- ?line Empty = M:empty(),
+ S0 = M(from_list, List),
+ Empty = M(empty, []),
%% Trivial cases.
- ?line true = M:is_empty(M:subtract(Empty, S0)),
- ?line true = M:equal(S0, M:subtract(S0, Empty)),
+ true = M(is_empty, M(subtract, {Empty,S0})),
+ true = M(equal, {S0,M(subtract, {S0,Empty})}),
%% Not so trivial.
- ?line subtract_check(List, mutate_some(remove_some(List, 0.4)), M),
- ?line subtract_check(List, rnd_list(length(List) div 2 + 5), M),
- ?line subtract_check(List, rnd_list(length(List) div 7 + 9), M),
- ?line subtract_check(List, mutate_some(List), M).
+ subtract_check(List, mutate_some(remove_some(List, 0.4)), M),
+ subtract_check(List, rnd_list(length(List) div 2 + 5), M),
+ subtract_check(List, rnd_list(length(List) div 7 + 9), M),
+ subtract_check(List, mutate_some(List), M).
subtract_check(A, B, M) ->
one_subtract_check(B, A, M),
@@ -163,12 +163,12 @@ subtract_check(A, B, M) ->
one_subtract_check(A, B, M) ->
ASorted = lists:usort(A),
BSorted = lists:usort(B),
- ASet = M:from_list(A),
- BSet = M:from_list(B),
- DiffSet = M:subtract(ASet, BSet),
+ ASet = M(from_list, A),
+ BSet = M(from_list, B),
+ DiffSet = M(subtract, {ASet,BSet}),
Diff = ASorted -- BSorted,
- true = M:equal(DiffSet, M:from_list(Diff)),
- Diff = lists:sort(M:to_list(DiffSet)),
+ true = M(equal, {DiffSet,M(from_list, Diff)}),
+ Diff = lists:sort(M(to_list, DiffSet)),
DiffSet.
intersection(Config) when is_list(Config) ->
@@ -176,60 +176,60 @@ intersection(Config) when is_list(Config) ->
test_all([{1,65},{126,130},{253,259},{499,513},{1023,1025}], fun intersection_1/2).
intersection_1(List, M) ->
- ?line S0 = M:from_list(List),
+ S0 = M(from_list, List),
%% Intersection with self.
- ?line true = M:equal(S0, M:intersection(S0, S0)),
- ?line true = M:equal(S0, M:intersection([S0,S0])),
- ?line true = M:equal(S0, M:intersection([S0,S0,S0])),
- ?line true = M:equal(S0, M:intersection([S0])),
+ true = M(equal, {S0,M(intersection, {S0,S0})}),
+ true = M(equal, {S0,M(intersection, [S0,S0])}),
+ true = M(equal, {S0,M(intersection, [S0,S0,S0])}),
+ true = M(equal, {S0,M(intersection, [S0])}),
%% Intersection with empty.
- ?line Empty = M:empty(),
- ?line true = M:equal(Empty, M:intersection(S0, Empty)),
- ?line true = M:equal(Empty, M:intersection([S0,Empty,S0,Empty])),
+ Empty = M(empty, []),
+ true = M(equal, {Empty,M(intersection, {S0,Empty})}),
+ true = M(equal, {Empty,M(intersection, [S0,Empty,S0,Empty])}),
%% The intersection of no sets is undefined.
- ?line {'EXIT',_} = (catch M:intersection([])),
+ {'EXIT',_} = (catch M(intersection, [])),
%% Disjoint sets.
- ?line Disjoint = [{El} || El <- List],
- ?line DisjointSet = M:from_list(Disjoint),
- ?line M:is_empty(M:intersection(S0, DisjointSet)),
+ Disjoint = [{El} || El <- List],
+ DisjointSet = M(from_list, Disjoint),
+ true = M(is_empty, M(intersection, {S0,DisjointSet})),
%% Disjoint, different sizes.
- ?line M:is_empty(M:intersection(S0, M:from_list(remove_some(Disjoint, 0.3)))),
- ?line M:is_empty(M:intersection(S0, M:from_list(remove_some(Disjoint, 0.7)))),
- ?line M:is_empty(M:intersection(S0, M:from_list(remove_some(Disjoint, 0.9)))),
- ?line M:is_empty(M:intersection(M:from_list(remove_some(List, 0.3)), DisjointSet)),
- ?line M:is_empty(M:intersection(M:from_list(remove_some(List, 0.5)), DisjointSet)),
- ?line M:is_empty(M:intersection(M:from_list(remove_some(List, 0.9)), DisjointSet)),
+ [begin
+ SomeRemoved = M(from_list, remove_some(Disjoint, HowMuch)),
+ true = M(is_empty, M(intersection, {S0,SomeRemoved})),
+ MoreRemoved = M(from_list, remove_some(List, HowMuch)),
+ true = M(is_empty, M(intersection, {MoreRemoved,DisjointSet}))
+ end || HowMuch <- [0.3,0.5,0.7,0.9]],
%% Partial overlap (one or more elements in result set).
%% The sets have almost the same size. (Almost because a duplicated
%% element in the original list could be mutated and not mutated
%% at the same time.)
- ?line PartialOverlap = mutate_some(List, []),
- ?line IntersectionSet = check_intersection(List, PartialOverlap, M),
- ?line false = M:is_empty(IntersectionSet),
+ PartialOverlap = mutate_some(List, []),
+ IntersectionSet = check_intersection(List, PartialOverlap, M),
+ false = M(is_empty, IntersectionSet),
%% Partial overlap, different set sizes. (Intersection possibly empty.)
- ?line check_intersection(List, remove_some(PartialOverlap, 0.1), M),
- ?line check_intersection(List, remove_some(PartialOverlap, 0.3), M),
- ?line check_intersection(List, remove_some(PartialOverlap, 0.5), M),
- ?line check_intersection(List, remove_some(PartialOverlap, 0.7), M),
- ?line check_intersection(List, remove_some(PartialOverlap, 0.9), M),
+ check_intersection(List, remove_some(PartialOverlap, 0.1), M),
+ check_intersection(List, remove_some(PartialOverlap, 0.3), M),
+ check_intersection(List, remove_some(PartialOverlap, 0.5), M),
+ check_intersection(List, remove_some(PartialOverlap, 0.7), M),
+ check_intersection(List, remove_some(PartialOverlap, 0.9), M),
IntersectionSet.
check_intersection(Orig, Mutated, M) ->
- OrigSet = M:from_list(Orig),
- MutatedSet = M:from_list(Mutated),
+ OrigSet = M(from_list, Orig),
+ MutatedSet = M(from_list, Mutated),
Intersection = [El || El <- Mutated, not is_tuple(El)],
SortedIntersection = lists:usort(Intersection),
- IntersectionSet = M:intersection(OrigSet, MutatedSet),
- true = M:equal(IntersectionSet, M:from_list(SortedIntersection)),
- SortedIntersection = lists:sort(M:to_list(IntersectionSet)),
+ IntersectionSet = M(intersection, {OrigSet,MutatedSet}),
+ true = M(equal, {IntersectionSet,M(from_list, SortedIntersection)}),
+ SortedIntersection = lists:sort(M(to_list, IntersectionSet)),
IntersectionSet.
@@ -239,63 +239,63 @@ union(Config) when is_list(Config) ->
test_all([{1,71},{125,129},{254,259},{510,513},{1023,1025}], fun union_1/2).
union_1(List, M) ->
- ?line S = M:from_list(List),
+ S = M(from_list, List),
%% Union with self and empty.
- ?line Empty = M:empty(),
- ?line true = M:equal(S, M:union(S, S)),
- ?line true = M:equal(S, M:union([S,S])),
- ?line true = M:equal(S, M:union([S,S,Empty])),
- ?line true = M:equal(S, M:union([S,Empty,S])),
- ?line true = M:equal(S, M:union(S, Empty)),
- ?line true = M:equal(S, M:union([S])),
- ?line true = M:is_empty(M:union([])),
+ Empty = M(empty, []),
+ true = M(equal, {S,M(union, {S,S})}),
+ true = M(equal, {S,M(union, [S,S])}),
+ true = M(equal, {S,M(union, [S,S,Empty])}),
+ true = M(equal, {S,M(union, [S,Empty,S])}),
+ true = M(equal, {S,M(union, {S,Empty})}),
+ true = M(equal, {S,M(union, [S])}),
+ true = M(is_empty, M(union, [])),
%% Partial overlap.
- ?line check_union(List, remove_some(mutate_some(List), 0.9), M),
- ?line check_union(List, remove_some(mutate_some(List), 0.7), M),
- ?line check_union(List, remove_some(mutate_some(List), 0.5), M),
- ?line check_union(List, remove_some(mutate_some(List), 0.3), M),
- ?line check_union(List, remove_some(mutate_some(List), 0.1), M),
-
- ?line check_union(List, mutate_some(remove_some(List, 0.9)), M),
- ?line check_union(List, mutate_some(remove_some(List, 0.7)), M),
- ?line check_union(List, mutate_some(remove_some(List, 0.5)), M),
- ?line check_union(List, mutate_some(remove_some(List, 0.3)), M),
- ?line check_union(List, mutate_some(remove_some(List, 0.1)), M).
+ check_union(List, remove_some(mutate_some(List), 0.9), M),
+ check_union(List, remove_some(mutate_some(List), 0.7), M),
+ check_union(List, remove_some(mutate_some(List), 0.5), M),
+ check_union(List, remove_some(mutate_some(List), 0.3), M),
+ check_union(List, remove_some(mutate_some(List), 0.1), M),
+
+ check_union(List, mutate_some(remove_some(List, 0.9)), M),
+ check_union(List, mutate_some(remove_some(List, 0.7)), M),
+ check_union(List, mutate_some(remove_some(List, 0.5)), M),
+ check_union(List, mutate_some(remove_some(List, 0.3)), M),
+ check_union(List, mutate_some(remove_some(List, 0.1)), M).
check_union(Orig, Other, M) ->
- OrigSet = M:from_list(Orig),
- OtherSet = M:from_list(Other),
+ OrigSet = M(from_list, Orig),
+ OtherSet = M(from_list, Other),
Union = Orig++Other,
SortedUnion = lists:usort(Union),
- UnionSet = M:union(OrigSet, OtherSet),
- SortedUnion = lists:sort(M:to_list(UnionSet)),
- M:equal(UnionSet, M:from_list(Union)),
+ UnionSet = M(union, {OrigSet,OtherSet}),
+ SortedUnion = lists:sort(M(to_list, UnionSet)),
+ M(equal, {UnionSet,M(from_list, Union)}),
UnionSet.
is_subset(Config) when is_list(Config) ->
test_all([{1,132},{253,270},{299,311}], fun is_subset_1/2).
is_subset_1(List, M) ->
- ?line S = M:from_list(List),
- ?line Empty = M:empty(),
+ S = M(from_list, List),
+ Empty = M(empty, []),
%% Subset of empty and self.
- ?line true = M:is_subset(Empty, Empty),
- ?line true = M:is_subset(Empty, S),
- ?line false = M:is_subset(S, Empty),
- ?line true = M:is_subset(S, S),
+ true = M(is_subset, {Empty,Empty}),
+ true = M(is_subset, {Empty,S}),
+ false = M(is_subset, {S,Empty}),
+ true = M(is_subset, {S,S}),
%% Other cases.
- Res = [?line false = M:is_subset(M:singleton(make_ref()), S),
- ?line true = M:is_subset(M:singleton(hd(List)), S),
- ?line true = check_subset(remove_some(List, 0.1), List, M),
- ?line true = check_subset(remove_some(List, 0.5), List, M),
- ?line true = check_subset(remove_some(List, 0.9), List, M),
- ?line check_subset(mutate_some(List), List, M),
- ?line check_subset(rnd_list(length(List) div 2 + 5), List, M),
- ?line subtract_check(List, rnd_list(length(List) div 7 + 9), M)
+ Res = [false = M(is_subset, {M(singleton, make_ref()),S}),
+ true = M(is_subset, {M(singleton, hd(List)),S}),
+ true = check_subset(remove_some(List, 0.1), List, M),
+ true = check_subset(remove_some(List, 0.5), List, M),
+ true = check_subset(remove_some(List, 0.9), List, M),
+ check_subset(mutate_some(List), List, M),
+ check_subset(rnd_list(length(List) div 2 + 5), List, M),
+ subtract_check(List, rnd_list(length(List) div 7 + 9), M)
],
res_to_set(Res, M, 0, []).
@@ -304,12 +304,12 @@ check_subset(X, Y, M) ->
check_one_subset(X, Y, M).
check_one_subset(X, Y, M) ->
- XSet = M:from_list(X),
- YSet = M:from_list(Y),
+ XSet = M(from_list, X),
+ YSet = M(from_list, Y),
SortedX = lists:usort(X),
SortedY = lists:usort(Y),
IsSubSet = length(SortedY--SortedX) =:= length(SortedY) - length(SortedX),
- IsSubSet = M:is_subset(XSet, YSet),
+ IsSubSet = M(is_subset, {XSet,YSet}),
IsSubSet.
%% Encode all test results as a set to return.
@@ -317,54 +317,54 @@ res_to_set([true|T], M, I, Acc) ->
res_to_set(T, M, I+1, [I|Acc]);
res_to_set([_|T], M, I, Acc) ->
res_to_set(T, M, I+1, Acc);
-res_to_set([], M, _, Acc) -> M:from_list(Acc).
+res_to_set([], M, _, Acc) -> M(from_list, Acc).
is_set(Config) when is_list(Config) ->
%% is_set/1 is tested in the other test cases when its argument
%% is a set. Here test some arguments that makes it return false.
- ?line false = gb_sets:is_set([a,b]),
- ?line false = gb_sets:is_set({a,very,bad,tuple}),
+ false = gb_sets:is_set([a,b]),
+ false = gb_sets:is_set({a,very,bad,tuple}),
- ?line false = sets:is_set([a,b]),
- ?line false = sets:is_set({a,very,bad,tuple}),
+ false = sets:is_set([a,b]),
+ false = sets:is_set({a,very,bad,tuple}),
- ?line false = ordsets:is_set([b,a]),
- ?line false = ordsets:is_set({bad,tuple}),
+ false = ordsets:is_set([b,a]),
+ false = ordsets:is_set({bad,tuple}),
%% Now test values that are known to be bad for all set representations.
test_all(fun is_set_1/1).
is_set_1(M) ->
- ?line false = M:is_set(self()),
- ?line false = M:is_set(blurf),
- ?line false = M:is_set(make_ref()),
- ?line false = M:is_set(<<1,2,3>>),
- ?line false = M:is_set(42),
- ?line false = M:is_set(math:pi()),
- ?line false = M:is_set({}),
- M:empty().
+ false = M(is_set, self()),
+ false = M(is_set, blurf),
+ false = M(is_set, make_ref()),
+ false = M(is_set, <<1,2,3>>),
+ false = M(is_set, 42),
+ false = M(is_set, math:pi()),
+ false = M(is_set, {}),
+ M(empty, []).
fold(Config) when is_list(Config) ->
test_all([{0,71},{125,129},{254,259},{510,513},{1023,1025},{9999,10001}],
fun fold_1/2).
fold_1(List, M) ->
- ?line S = M:from_list(List),
- ?line L = M:fold(fun(E, A) -> [E|A] end, [], S),
- ?line true = lists:sort(L) =:= lists:usort(List),
- M:empty().
+ S = M(from_list, List),
+ L = M(fold, {fun(E, A) -> [E|A] end,[],S}),
+ true = lists:sort(L) =:= lists:usort(List),
+ M(empty, []).
filter(Config) when is_list(Config) ->
test_all([{0,69},{126,130},{254,259},{510,513},{1023,1025},{7999,8000}],
fun filter_1/2).
filter_1(List, M) ->
- ?line S = M:from_list(List),
+ S = M(from_list, List),
IsNumber = fun(X) -> is_number(X) end,
- ?line M:equal(M:from_list(lists:filter(IsNumber, List)),
- M:filter(IsNumber, S)),
- ?line M:filter(fun(X) -> is_atom(X) end, S).
+ M(equal, {M(from_list, lists:filter(IsNumber, List)),
+ M(filter, {IsNumber,S})}),
+ M(filter, {fun(X) -> is_atom(X) end,S}).
%%%
%%% Test specifics for gb_sets.
@@ -375,26 +375,26 @@ take_smallest(Config) when is_list(Config) ->
fun take_smallest_1/2).
take_smallest_1(List, M) ->
- case M:module() of
+ case M(module, []) of
gb_sets -> take_smallest_2(List, M);
_ -> ok
end,
- M:empty().
+ M(empty, []).
take_smallest_2(List0, M) ->
- ?line List = lists:usort(List0),
- ?line S = M:from_list(List0),
+ List = lists:usort(List0),
+ S = M(from_list, List0),
take_smallest_3(S, List, M).
take_smallest_3(S0, List0, M) ->
- case M:is_empty(S0) of
+ case M(is_empty, S0) of
true -> ok;
false ->
- ?line Smallest = hd(List0),
- ?line Smallest = gb_sets:smallest(S0),
- ?line {Smallest,S} = gb_sets:take_smallest(S0),
- ?line List = tl(List0),
- ?line true = gb_sets:to_list(S) =:= List,
+ Smallest = hd(List0),
+ Smallest = gb_sets:smallest(S0),
+ {Smallest,S} = gb_sets:take_smallest(S0),
+ List = tl(List0),
+ true = gb_sets:to_list(S) =:= List,
take_smallest_3(S, List, M)
end.
@@ -403,26 +403,26 @@ take_largest(Config) when is_list(Config) ->
fun take_largest_1/2).
take_largest_1(List, M) ->
- case M:module() of
+ case M(module, []) of
gb_sets -> take_largest_2(List, M);
_ -> ok
end,
- M:empty().
+ M(empty, []).
take_largest_2(List0, M) ->
- ?line List = reverse(lists:usort(List0)),
- ?line S = M:from_list(List0),
+ List = reverse(lists:usort(List0)),
+ S = M(from_list, List0),
take_largest_3(S, List, M).
take_largest_3(S0, List0, M) ->
- case M:is_empty(S0) of
+ case M(is_empty, S0) of
true -> ok;
false ->
- ?line Largest = hd(List0),
- ?line Largest = gb_sets:largest(S0),
- ?line {Largest,S} = gb_sets:take_largest(S0),
- ?line List = tl(List0),
- ?line true = gb_sets:to_list(S) =:= reverse(List),
+ Largest = hd(List0),
+ Largest = gb_sets:largest(S0),
+ {Largest,S} = gb_sets:take_largest(S0),
+ List = tl(List0),
+ true = gb_sets:to_list(S) =:= reverse(List),
take_largest_3(S, List, M)
end.
@@ -441,23 +441,23 @@ sets_mods() ->
[Ordsets,Sets,Gb].
test_all(Tester) ->
- ?line Res = [begin
- random:seed(1, 2, 42),
- S = Tester(M),
- {M:size(S),lists:sort(M:to_list(S))}
- end || M <- sets_mods()],
- ?line all_same(Res).
+ Res = [begin
+ random:seed(1, 2, 42),
+ S = Tester(M),
+ {M(size, S),lists:sort(M(to_list, S))}
+ end || M <- sets_mods()],
+ all_same(Res).
test_all([{Low,High}|T], Tester) ->
test_all(lists:seq(Low, High)++T, Tester);
test_all([Sz|T], Tester) when is_integer(Sz) ->
List = rnd_list(Sz),
- ?line Res = [begin
+ Res = [begin
random:seed(19, 2, Sz),
S = Tester(List, M),
- {M:size(S),lists:sort(M:to_list(S))}
+ {M(size, S),lists:sort(M(to_list, S))}
end || M <- sets_mods()],
- ?line all_same(Res),
+ all_same(Res),
test_all(T, Tester);
test_all([], _) -> ok.
diff --git a/lib/stdlib/test/sets_test_lib.erl b/lib/stdlib/test/sets_test_lib.erl
index bdfb0d59d2..fd4ec2bac3 100644
--- a/lib/stdlib/test/sets_test_lib.erl
+++ b/lib/stdlib/test/sets_test_lib.erl
@@ -17,91 +17,89 @@
%% %CopyrightEnd%
%%
--module(sets_test_lib, [Mod,Equal]).
-
--export([module/0,equal/2,empty/0,from_list/1,to_list/1,singleton/1,
- add_element/2,del_element/2,size/1,is_empty/1,is_set/1,
- intersection/1,intersection/2,subtract/2,
- union/1,union/2,is_subset/2,fold/3,filter/2]).
-
-module() ->
- Mod.
-
-equal(X, Y) ->
- Equal(X, Y).
-
-empty() ->
- Mod:new().
-
-from_list(L) ->
- Mod:from_list(L).
-
-to_list(S) ->
- Mod:to_list(S).
+-module(sets_test_lib).
+
+-export([new/2]).
+
+new(Mod, Eq) ->
+ fun (add_element, {El,S}) -> add_element(Mod, El, S);
+ (del_element, {El,S}) -> del_element(Mod, El, S);
+ (empty, []) -> Mod:new();
+ (equal, {S1,S2}) -> Eq(S1, S2);
+ (filter, {F,S}) -> filter(Mod, F, S);
+ (fold, {F,A,S}) -> fold(Mod, F, A, S);
+ (from_list, L) -> Mod:from_list(L);
+ (intersection, {S1,S2}) -> intersection(Mod, Eq, S1, S2);
+ (intersection, Ss) -> intersection(Mod, Eq, Ss);
+ (is_empty, S) -> is_empty(Mod, S);
+ (is_set, S) -> Mod:is_set(S);
+ (is_subset, {S,Set}) -> is_subset(Mod, Eq, S, Set);
+ (module, []) -> Mod;
+ (singleton, E) -> singleton(Mod, E);
+ (size, S) -> Mod:size(S);
+ (subtract, {S1,S2}) -> subtract(Mod, S1, S2);
+ (to_list, S) -> Mod:to_list(S);
+ (union, {S1,S2}) -> union(Mod, Eq, S1, S2);
+ (union, Ss) -> union(Mod, Eq, Ss)
+ end.
-singleton(E) ->
+singleton(Mod, E) ->
case erlang:function_exported(Mod, singleton, 1) of
true -> Mod:singleton(E);
- false -> from_list([E])
+ false -> Mod:from_list([E])
end.
-add_element(El, S0) ->
+add_element(Mod, El, S0) ->
S = Mod:add_element(El, S0),
true = Mod:is_element(El, S),
- false = is_empty(S),
+ false = is_empty(Mod, S),
true = Mod:is_set(S),
S.
-del_element(El, S0) ->
+del_element(Mod, El, S0) ->
S = Mod:del_element(El, S0),
false = Mod:is_element(El, S),
true = Mod:is_set(S),
S.
-size(S) ->
- Mod:size(S).
-
-is_empty(S) ->
+is_empty(Mod, S) ->
true = Mod:is_set(S),
case erlang:function_exported(Mod, is_empty, 1) of
true -> Mod:is_empty(S);
false -> Mod:size(S) == 0
end.
-is_set(S) ->
- Mod:is_set(S).
-
-intersection(S1, S2) ->
+intersection(Mod, Equal, S1, S2) ->
S = Mod:intersection(S1, S2),
true = Equal(S, Mod:intersection(S2, S1)),
- Disjoint = is_empty(S),
+ Disjoint = is_empty(Mod, S),
Disjoint = Mod:is_disjoint(S1, S2),
Disjoint = Mod:is_disjoint(S2, S1),
S.
-intersection(Ss) ->
+intersection(Mod, Equal, Ss) ->
S = Mod:intersection(Ss),
true = Equal(S, Mod:intersection(lists:reverse(Ss))),
S.
-subtract(S1, S2) ->
+subtract(Mod, S1, S2) ->
S = Mod:subtract(S1, S2),
true = Mod:is_set(S),
true = Mod:size(S) =< Mod:size(S1),
S.
-union(S1, S2) ->
+union(Mod, Equal, S1, S2) ->
S = Mod:union(S1, S2),
true = Equal(S, Mod:union(S2, S1)),
true = Mod:is_set(S),
S.
-union(Ss) ->
+union(Mod, Equal, Ss) ->
S = Mod:union(Ss),
true = Equal(S, Mod:union(lists:reverse(Ss))),
S.
-is_subset(S, Set) ->
+is_subset(Mod, Equal, S, Set) ->
case Mod:is_subset(S, Set) of
false -> false;
true ->
@@ -115,10 +113,10 @@ is_subset(S, Set) ->
true
end.
-fold(F, A, S) ->
+fold(Mod, F, A, S) ->
true = Mod:is_set(S),
Mod:fold(F, A, S).
-filter(F, S) ->
+filter(Mod, F, S) ->
true = Mod:is_set(S),
Mod:filter(F, S).
diff --git a/lib/stdlib/test/stdlib.cover b/lib/stdlib/test/stdlib.cover
index 61f4f064b9..e71be880cb 100644
--- a/lib/stdlib/test/stdlib.cover
+++ b/lib/stdlib/test/stdlib.cover
@@ -1,17 +1,2 @@
%% -*- erlang -*-
{incl_app,stdlib,details}.
-
-{excl_mods,stdlib,
- [erl_parse,
- erl_eval,
- ets,
- filename,
- gen_event,
- gen_server,
- gen,
- lists,
- io,
- io_lib,
- io_lib_format,
- io_lib_pretty,
- proc_lib]}.
diff --git a/lib/test_server/doc/src/test_server.xml b/lib/test_server/doc/src/test_server.xml
index 5bfa42c36f..841cbfbe91 100644
--- a/lib/test_server/doc/src/test_server.xml
+++ b/lib/test_server/doc/src/test_server.xml
@@ -5,7 +5,7 @@
<header>
<copyright>
<year>2007</year>
- <year>2011</year>
+ <year>2012</year>
<holder>Ericsson AB, All Rights Reserved</holder>
</copyright>
<legalnotice>
@@ -529,6 +529,18 @@ Only valid for peer nodes. Note that slave nodes always
analogy with <c>os:getenv/1</c>), which removes the
environment variable. Only valid for peer nodes. Not
available on VxWorks.</item>
+ <tag><c>{start_cover, false}</c></tag>
+ <item>By default the test server will start cover on all nodes
+ when the test is run with code coverage analysis. To make
+ sure cover is not started on a new node, set this option to
+ <c>false</c>. This can be necessary if the connection to
+ the node at some point will be broken but the node is
+ expected to stay alive. The reason is that a remote cover
+ node can not continue to run without its main node. Another
+ solution would be to explicitly stop cover on the node
+ before breaking the connection, but in some situations (if
+ old code resides in one or more processes) this is not
+ possible.</item>
</taglist>
</desc>
</func>
diff --git a/lib/test_server/src/test_server.erl b/lib/test_server/src/test_server.erl
index bcffe896c4..1d3394e0ca 100644
--- a/lib/test_server/src/test_server.erl
+++ b/lib/test_server/src/test_server.erl
@@ -20,15 +20,12 @@
-define(DEFAULT_TIMETRAP_SECS, 60).
-%%% START %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
--export([start/1,start/2]).
-
%%% TEST_SERVER_CTRL INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-export([run_test_case_apply/1,init_target_info/0,init_purify/0]).
--export([cover_compile/1,cover_analyse/2]).
+-export([cover_compile/1,cover_analyse/3]).
%%% TEST_SERVER_SUP INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
--export([get_loc/1]).
+-export([get_loc/1,set_tc_state/1]).
%%% TEST SUITE INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-export([lookup_config/2]).
@@ -60,49 +57,11 @@
-export([]).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
--record(state,{controller,jobs=[]}).
-
-include("test_server_internal.hrl").
-include_lib("kernel/include/file.hrl").
-define(pl2a(M), test_server_sup:package_atom(M)).
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%
-%% **** START *** CODE FOR REMOTE TARGET ONLY ***
-%%
-%% test_server
-%% This process is started only if the test is to be run on a remote target
-%% The process is then started on target
-%% A socket connection is established with the test_server_ctrl process
-%% on host, and information about target is sent to host.
-start([ControllerHost]) when is_atom(ControllerHost) ->
- start(atom_to_list(ControllerHost));
-start(ControllerHost) when is_list(ControllerHost) ->
- start(ControllerHost,?MAIN_PORT).
-start(ControllerHost,ControllerPort) ->
- S = self(),
- Pid = spawn(fun() -> init(ControllerHost,ControllerPort,S) end),
- receive {Pid,started} -> {ok,Pid};
- {Pid,Error} -> Error
- end.
-
-init(Host,Port,Starter) ->
- global:register_name(?MODULE,self()),
- process_flag(trap_exit,true),
- test_server_sup:cleanup_crash_dumps(),
- case gen_tcp:connect(Host,Port, [binary,
- {reuseaddr,true},
- {packet,2}]) of
- {ok,MainSock} ->
- Starter ! {self(),started},
- request(MainSock,{target_info,init_target_info()}),
- loop(#state{controller={Host,MainSock}});
- Error ->
- Starter ! {self(),{error,
- {could_not_contact_controller,Error}}}
- end.
-
init_target_info() ->
[$.|Emu] = code:objfile_extension(),
{_, OTPRel} = init:script_id(),
@@ -118,171 +77,10 @@ init_target_info() ->
username=test_server_sup:get_username(),
cookie=atom_to_list(erlang:get_cookie())}.
-
-loop(#state{controller={_,MainSock}} = State) ->
- receive
- {tcp, MainSock, <<1,Request/binary>>} ->
- State1 = decode_main(binary_to_term(Request),State),
- loop(State1);
- {tcp_closed, MainSock} ->
- gen_tcp:close(MainSock),
- halt();
- {'EXIT',Pid,Reason} ->
- case lists:keysearch(Pid,1,State#state.jobs) of
- {value,{Pid,Name}} ->
- case Reason of
- normal -> ignore;
- _other -> request(MainSock,{job_proc_killed,Name,Reason})
- end,
- NewJobs = lists:keydelete(Pid,1,State#state.jobs),
- loop(State#state{jobs = NewJobs});
- false ->
- loop(State)
- end
- end.
-
-%% Decode request on main socket
-decode_main({job,Port,Name},#state{controller={Host,_},jobs=Jobs}=State) ->
- S = self(),
- NewJob = spawn_link(fun() -> job(Host,Port,S) end),
- receive {NewJob,started} -> State#state{jobs=[{NewJob,Name}|Jobs]};
- {NewJob,_Error} -> State
- end.
-
init_purify() ->
purify_new_leaks().
-%% Temporary job process on target
-%% This process will live while all test cases in the job are executed.
-%% A socket connection is established with the job process on host.
-job(Host,Port,Starter) ->
- process_flag(trap_exit,true),
- init_purify(),
- case gen_tcp:connect(Host,Port, [binary,
- {reuseaddr,true},
- {packet,4},
- {active,false}]) of
- {ok,JobSock} ->
- Starter ! {self(),started},
- job(JobSock);
- Error ->
- Starter ! {self(),{error,
- {could_not_contact_controller,Error}}}
- end.
-
-job(JobSock) ->
- JobDir = get_jobdir(),
- ok = file:make_dir(JobDir),
- ok = file:make_dir(filename:join(JobDir,?priv_dir)),
- put(test_server_job_sock,JobSock),
- put(test_server_job_dir,JobDir),
- {ok,Cwd} = file:get_cwd(),
- job_loop(JobSock),
- ok = file:set_cwd(Cwd),
- send_privdir(JobDir,JobSock), % also recursively removes jobdir
- ok.
-
-
-get_jobdir() ->
- Now = now(),
- {{Y,M,D},{H,Mi,S}} = calendar:now_to_local_time(Now),
- Basename = io_lib:format("~w-~2.2.0w-~2.2.0w_~2.2.0w.~2.2.0w.~2.2.0w_~w",
- [Y,M,D,H,Mi,S,element(3,Now)]),
- %% if target has a file master, don't use prim_file to look up cwd
- case lists:keymember(master,1,init:get_arguments()) of
- true ->
- {ok,Cwd} = file:get_cwd(),
- Cwd ++ "/" ++ Basename;
- false ->
- filename:absname(Basename)
- end.
-
-send_privdir(JobDir,JobSock) ->
- LocalPrivDir = filename:join(JobDir,?priv_dir),
- case file:list_dir(LocalPrivDir) of
- {ok,List} when List/=[] ->
- Tarfile0 = ?priv_dir ++ ".tar.gz",
- Tarfile = filename:join(JobDir,Tarfile0),
- {ok,Tar} = erl_tar:open(Tarfile,[write,compressed,cooked]),
- ok = erl_tar:add(Tar,LocalPrivDir,?priv_dir,[]),
- ok = erl_tar:close(Tar),
- {ok,TarBin} = file:read_file(Tarfile),
- file:delete(Tarfile),
- ok = del_dir(JobDir),
- request(JobSock,{{privdir,Tarfile0},TarBin});
- _ ->
- ok = del_dir(JobDir),
- request(JobSock,{privdir,empty_priv_dir})
- end.
-
-del_dir(Dir) ->
- case file:read_file_info(Dir) of
- {ok,#file_info{type=directory}} ->
- {ok,Cont} = file:list_dir(Dir),
- lists:foreach(fun(F) -> del_dir(filename:join(Dir,F)) end, Cont),
- ok = file:del_dir(Dir);
- {ok,#file_info{}} ->
- ok = file:delete(Dir);
- _r ->
- %% This might be a symlink - let's try to delete it!
- catch file:delete(Dir),
- ok
- end.
-
-%%
-%% Receive and decode request on job socket
-%%
-job_loop(JobSock) ->
- Request = recv(JobSock),
- case decode_job(Request) of
- ok -> job_loop(JobSock);
- {stop,R} -> R
- end.
-
-decode_job({{beam,Mod,Which},Beam}) ->
- % FIXME, shared directory structure on host and target required,
- % "Library beams" are not loaded from HOST... /Patrik
- code:add_patha(filename:dirname(Which)),
- % End of Patriks uglyness...
- {module,Mod} = code:load_binary(Mod,Which,Beam),
- ok;
-decode_job({{datadir,Tarfile0},Archive}) ->
- JobDir = get(test_server_job_dir),
- Tarfile = filename:join(JobDir,Tarfile0),
- ok = file:write_file(Tarfile,Archive),
- % Cooked is temporary removed/broken
- % ok = erl_tar:extract(Tarfile,[compressed,{cwd,JobDir},cooked]),
- ok = erl_tar:extract(Tarfile,[compressed,{cwd,JobDir}]),
- ok = file:delete(Tarfile),
- ok;
-decode_job({test_case,Case}) ->
- Result = run_test_case_apply(Case),
- JobSock = get(test_server_job_sock),
- request(JobSock,{test_case_result,Result}),
- case test_server_sup:tar_crash_dumps() of
- {error,no_crash_dumps} -> request(JobSock,{crash_dumps,no_crash_dumps});
- {ok,TarFile} ->
- {ok,TarBin} = file:read_file(TarFile),
- file:delete(TarFile),
- request(JobSock,{{crash_dumps,filename:basename(TarFile)},TarBin})
- end,
- ok;
-decode_job({sync_apply,{M,F,A}}) ->
- R = apply(M,F,A),
- request(get(test_server_job_sock),{sync_result,R}),
- ok;
-decode_job(job_done) ->
- {stop,stopped}.
-
-%%
-%% **** STOP *** CODE FOR REMOTE TARGET ONLY ***
-%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-
-
-
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% cover_compile({App,Include,Exclude,Cross}) ->
%% {ok,AnalyseModules} | {error,Reason}
@@ -377,9 +175,7 @@ module_names(Beams) ->
do_cover_compile(Modules) ->
do_cover_compile1(lists:usort(Modules)). % remove duplicates
-do_cover_compile1([Dont|Rest]) when Dont=:=cover;
- Dont=:=test_server;
- Dont=:=test_server_ctrl ->
+do_cover_compile1([Dont|Rest]) when Dont=:=cover ->
do_cover_compile1(Rest);
do_cover_compile1([M|Rest]) ->
case {code:is_sticky(M),code:is_loaded(M)} of
@@ -416,7 +212,7 @@ do_cover_compile1([]) ->
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% cover_analyse(Analyse,Modules) -> [{M,{Cov,NotCov,Details}}]
+%% cover_analyse(Analyse,Modules,Stop) -> [{M,{Cov,NotCov,Details}}]
%%
%% Analyse = {details,Dir} | details | {overview,void()} | overview
%% Modules = [atom()], the modules to analyse
@@ -432,8 +228,19 @@ do_cover_compile1([]) ->
%%
%% Also, if a Dir exists, cover data will be exported to a file called
%% all.coverdata in that directory.
-cover_analyse(Analyse,Modules) ->
- io:fwrite("Cover analysing...\n",[]),
+%%
+%% Finally, if Stop==true, then cover will be stopped after the
+%% analysis is completed. Stopping cover causes the original (non
+%% cover compiled) modules to be loaded back in. If a process at this
+%% point is still running old code of any of the cover compiled
+%% modules, meaning that is has not done any fully qualified function
+%% call after the cover compilation, the process will now be
+%% killed. To avoid this scenario, it is possible to set Stop=false,
+%% which means that the modules will stay cover compiled. Note that
+%% this is only recommended if the erlang node is being terminated
+%% after the test is completed.
+cover_analyse(Analyse,Modules,Stop) ->
+ print(stdout, "Cover analysing...\n", []),
DetailsFun =
case Analyse of
{details,Dir} ->
@@ -483,9 +290,15 @@ cover_analyse(Analyse,Modules) ->
{M,Err}
end
end, Modules),
- Sticky = unstick_all_sticky(node()),
- cover:stop(),
- stick_all_sticky(node(),Sticky),
+
+ case Stop of
+ true ->
+ Sticky = unstick_all_sticky(node()),
+ cover:stop(),
+ stick_all_sticky(node(),Sticky);
+ false ->
+ ok
+ end,
R.
pmap(Fun,List) ->
@@ -502,7 +315,20 @@ pmap(Fun,List) ->
end
end, Pids).
+
+do_cover_for_node(Node,CoverFunc) ->
+ %% In case a slave node is starting another slave node! I.e. this
+ %% function is executed on a slave node - then the cover function
+ %% must be executed on the master node. This is for instance the
+ %% case in test_server's own tests.
+ MainCoverNode = cover:get_main_node(),
+ Sticky = unstick_all_sticky(MainCoverNode,Node),
+ rpc:call(MainCoverNode,cover,CoverFunc,[Node]),
+ stick_all_sticky(Node,Sticky).
+
unstick_all_sticky(Node) ->
+ unstick_all_sticky(node(),Node).
+unstick_all_sticky(MainCoverNode,Node) ->
lists:filter(
fun(M) ->
case code:is_sticky(M) of
@@ -513,7 +339,7 @@ unstick_all_sticky(Node) ->
false
end
end,
- cover:modules()).
+ rpc:call(MainCoverNode,cover,modules,[])).
stick_all_sticky(Node,Sticky) ->
lists:foreach(
@@ -578,34 +404,23 @@ run_test_case_apply({CaseNum,Mod,Func,Args,Name,
DetFail = get(test_server_detected_fail),
{Result,DetFail,ProcBef,ProcAft}.
-run_test_case_apply(Mod, Func, Args, Name, RunInit, TimetrapData) ->
- case get(test_server_job_dir) of
- undefined ->
- %% i'm a local target
- do_run_test_case_apply(Mod, Func, Args, Name, RunInit,
- TimetrapData);
- JobDir ->
- %% i'm a remote target
- case Args of
- [Config] when is_list(Config) ->
- {value,{data_dir,HostDataDir}} =
- lists:keysearch(data_dir, 1, Config),
- DataBase = filename:basename(HostDataDir),
- TargetDataDir = filename:join(JobDir, DataBase),
- Config1 = lists:keyreplace(data_dir, 1, Config,
- {data_dir,TargetDataDir}),
- TargetPrivDir = filename:join(JobDir, ?priv_dir),
- Config2 = lists:keyreplace(priv_dir, 1, Config1,
- {priv_dir,TargetPrivDir}),
- do_run_test_case_apply(Mod, Func, [Config2], Name, RunInit,
- TimetrapData);
- _other ->
- do_run_test_case_apply(Mod, Func, Args, Name, RunInit,
- TimetrapData)
- end
- end.
+-type tc_status() :: 'starting' | 'running' | 'init_per_testcase' |
+ 'end_per_testcase' | {'framework',atom(),atom()} |
+ 'tc'.
+-record(st,
+ {
+ ref :: reference(),
+ pid :: pid(),
+ mf :: {atom(),atom()},
+ status :: tc_status() | 'undefined',
+ ret_val :: term(),
+ comment :: list(char()),
+ timeout :: non_neg_integer() | 'infinity',
+ config :: list() | 'undefined',
+ end_conf_pid :: pid() | 'undefined'
+ }).
-do_run_test_case_apply(Mod, Func, Args, Name, RunInit, TimetrapData) ->
+run_test_case_apply(Mod, Func, Args, Name, RunInit, TimetrapData) ->
{ok,Cwd} = file:get_cwd(),
Args2Print = case Args of
[Args1] when is_list(Args1) ->
@@ -628,8 +443,9 @@ do_run_test_case_apply(Mod, Func, Args, Name, RunInit, TimetrapData) ->
LogOpts, TCCallback)
end),
put(test_server_detected_fail, []),
- run_test_case_msgloop(Ref, Pid, false, "",
- undefined, starting).
+ St = #st{ref=Ref,pid=Pid,mf={Mod,Func},status=starting,ret_val=[],
+ comment="",timeout=infinity,config=hd(Args)},
+ run_test_case_msgloop(St).
%% Ugly bug (pre R5A):
%% If this process (group leader of the test case) terminates before
@@ -640,31 +456,23 @@ do_run_test_case_apply(Mod, Func, Args, Name, RunInit, TimetrapData) ->
%% A test case is known to have failed if it returns {'EXIT', _} tuple,
%% or sends a message {failed, File, Line} to it's group_leader
%%
-run_test_case_msgloop(Ref, Pid, Terminate, Comment, CurrConf, Status) ->
- %% NOTE: Keep job_proxy_msgloop/0 up to date when changes
- %% are made in this function!
- {Timeout,ReturnValue} =
- case Terminate of
- {true, ReturnVal} ->
- %% stop any timetrap timers for the test case
- %% that have been started by this process
- timetrap_cancel_all(Pid, false),
- {20, ReturnVal};
- false ->
- {infinity, should_never_appear}
- end,
+run_test_case_msgloop(#st{ref=Ref,pid=Pid,end_conf_pid=EndConfPid0}=St0) ->
receive
- {test_case_initialized,Pid} ->
- run_test_case_msgloop(Ref,Pid,Terminate,
- Comment,CurrConf,running);
- Abort = {abort_current_testcase,_,_} when Status == starting ->
+ {set_tc_state=Tag,From,{Status,Config0}} ->
+ Config = case Config0 of
+ unknown -> St0#st.config;
+ _ -> Config0
+ end,
+ St = St0#st{status=Status,config=Config},
+ From ! {self(),Tag,ok},
+ run_test_case_msgloop(St);
+ {abort_current_testcase,_,_}=Abort when St0#st.status =:= starting ->
%% we're in init phase, must must postpone this operation
%% until test case execution is in progress (or FW:init_tc
%% gets killed)
self() ! Abort,
erlang:yield(),
- run_test_case_msgloop(Ref,Pid,Terminate,
- Comment,CurrConf,Status);
+ run_test_case_msgloop(St0);
{abort_current_testcase,Reason,From} ->
Line = case is_process_alive(Pid) of
true -> get_loc(Pid);
@@ -674,65 +482,49 @@ run_test_case_msgloop(Ref, Pid, Terminate, Comment, CurrConf, Status) ->
exit(Pid,{testcase_aborted,Reason,Line}),
erlang:yield(),
From ! {self(),abort_current_testcase,ok},
- NewComment =
- receive
- {'DOWN', Mon, process, Pid, _} ->
- Comment
- after 10000 ->
- %% Pid is probably trapping exits, hit it harder...
- exit(Pid, kill),
- %% here's the only place we know Reason, so we save
- %% it as a comment, potentially replacing user data
- Error = lists:flatten(io_lib:format("Aborted: ~p",
- [Reason])),
- Error1 = lists:flatten([string:strip(S,left) ||
+ St = receive
+ {'DOWN', Mon, process, Pid, _} ->
+ St0
+ after 10000 ->
+ %% Pid is probably trapping exits, hit it harder...
+ exit(Pid, kill),
+ %% here's the only place we know Reason, so we save
+ %% it as a comment, potentially replacing user data
+ Error = lists:flatten(io_lib:format("Aborted: ~p",
+ [Reason])),
+ Error1 = lists:flatten([string:strip(S,left) ||
S <- string:tokens(Error,
[$\n])]),
- if length(Error1) > 63 ->
- string:substr(Error1,1,60) ++ "...";
- true ->
- Error1
- end
- end,
- run_test_case_msgloop(Ref,Pid,Terminate,
- NewComment,CurrConf,Status);
+ Comment = if length(Error1) > 63 ->
+ string:substr(Error1,1,60) ++ "...";
+ true ->
+ Error1
+ end,
+ St0#st{comment=Comment}
+ end,
+ run_test_case_msgloop(St);
{sync_apply,From,MFA} ->
sync_local_or_remote_apply(false,From,MFA),
- run_test_case_msgloop(Ref,Pid,Terminate,
- Comment,CurrConf,Status);
+ run_test_case_msgloop(St0);
{sync_apply_proxy,Proxy,From,MFA} ->
sync_local_or_remote_apply(Proxy,From,MFA),
- run_test_case_msgloop(Ref,Pid,Terminate,
- Comment,CurrConf,Status);
- {comment,NewComment} ->
- NewComment1 = test_server_ctrl:to_string(NewComment),
- NewComment2 = test_server_sup:framework_call(format_comment,
- [NewComment1],
- NewComment1),
- Terminate1 =
- case Terminate of
- {true,{Time,Value,Loc,Opts,_OldComment}} ->
- {true,{Time,Value,mod_loc(Loc),Opts,NewComment2}};
- Other ->
- Other
- end,
- run_test_case_msgloop(Ref,Pid,Terminate1,
- NewComment2,CurrConf,Status);
+ run_test_case_msgloop(St0);
+ {comment,NewComment0} ->
+ NewComment1 = test_server_ctrl:to_string(NewComment0),
+ NewComment = test_server_sup:framework_call(format_comment,
+ [NewComment1],
+ NewComment1),
+ run_test_case_msgloop(St0#st{comment=NewComment});
{read_comment,From} ->
- From ! {self(),read_comment,Comment},
- run_test_case_msgloop(Ref,Pid,Terminate,
- Comment,CurrConf,Status);
- {set_curr_conf,From,NewCurrConf} ->
- From ! {self(),set_curr_conf,ok},
- run_test_case_msgloop(Ref,Pid,Terminate,
- Comment,NewCurrConf,Status);
- {make_priv_dir,From} when CurrConf == undefined ->
- From ! {self(),make_priv_dir,{error,no_priv_dir_in_config}},
- run_test_case_msgloop(Ref,Pid,Terminate,
- Comment,CurrConf,Status);
+ From ! {self(),read_comment,St0#st.comment},
+ run_test_case_msgloop(St0);
{make_priv_dir,From} ->
+ Config = case St0#st.config of
+ undefined -> [];
+ Config0 -> Config0
+ end,
Result =
- case proplists:get_value(priv_dir, element(2, CurrConf)) of
+ case proplists:get_value(priv_dir, Config) of
undefined ->
{error,no_priv_dir_in_config};
PrivDir ->
@@ -746,207 +538,63 @@ run_test_case_msgloop(Ref, Pid, Terminate, Comment, CurrConf, Status) ->
end
end,
From ! {self(),make_priv_dir,Result},
- run_test_case_msgloop(Ref,Pid,Terminate,
- Comment,CurrConf,Status);
+ run_test_case_msgloop(St0);
{'EXIT',Pid,{Ref,Time,Value,Loc,Opts}} ->
- RetVal = {Time/1000000,Value,mod_loc(Loc),Opts,Comment},
- run_test_case_msgloop(Ref,Pid,
- {true,RetVal},Comment,undefined,Status);
+ RetVal = {Time/1000000,Value,Loc,Opts},
+ St = setup_termination(RetVal, St0#st{config=undefined}),
+ run_test_case_msgloop(St);
{'EXIT',Pid,Reason} ->
- case Reason of
- {timetrap_timeout,TVal,Loc} ->
- %% convert Loc to form that can be formatted
- case mod_loc(Loc) of
- {FwMod,FwFunc,framework} ->
- %% timout during framework call
- spawn_fw_call(FwMod,FwFunc,CurrConf,Pid,
- {framework_error,{timetrap,TVal}},
- unknown,self()),
- run_test_case_msgloop(Ref,Pid,
- Terminate,Comment,
- undefined,Status);
- Loc1 ->
- %% call end_per_testcase on a separate process,
- %% only so that the user has a chance to
- %% clean up after init_per_testcase, even after
- %% a timetrap timeout
- NewCurrConf =
- case CurrConf of
- {{Mod,Func},Conf} ->
- EndConfPid =
- call_end_conf(
- Mod,Func,Pid,
- {timetrap_timeout,TVal},
- Loc1,[{tc_status,
- {failed,
- timetrap_timeout}}|Conf],
- TVal),
- {EndConfPid,{Mod,Func},Conf};
- _ ->
- {Mod,Func} = get_mf(Loc1),
- %% The framework functions mustn't
- %% execute on this group leader process
- %% or io will cause deadlock, so we
- %% spawn a dedicated process for the
- %% operation and let the group leader
- %% go back to handle io.
- spawn_fw_call(Mod,Func,CurrConf,Pid,
- {timetrap_timeout,TVal},
- Loc1,self()),
- undefined
- end,
- run_test_case_msgloop(Ref,Pid,
- Terminate,Comment,
- NewCurrConf,Status)
- end;
- {timetrap_timeout,TVal,Loc,InitOrEnd} ->
- case mod_loc(Loc) of
- {FwMod,FwFunc,framework} ->
- %% timout during framework call
- spawn_fw_call(FwMod,FwFunc,CurrConf,Pid,
- {framework_error,{timetrap,TVal}},
- unknown,self());
- Loc1 ->
- {Mod,_Func} = get_mf(Loc1),
- spawn_fw_call(Mod,InitOrEnd,CurrConf,Pid,
- {timetrap_timeout,TVal},
- Loc1,self())
- end,
- run_test_case_msgloop(Ref,Pid,
- Terminate,Comment,CurrConf,Status);
- {testcase_aborted,ErrorMsg={user_timetrap_error,_},AbortLoc} ->
- %% user timetrap function caused exit
- %% during start of test case
- {Mod,Func} = get_mf(mod_loc(AbortLoc)),
- spawn_fw_call(Mod,Func,CurrConf,Pid,
- ErrorMsg,unknown,self()),
- run_test_case_msgloop(Ref,Pid,
- Terminate,Comment,
- undefined,Status);
- {testcase_aborted,AbortReason,AbortLoc} ->
- ErrorMsg = {testcase_aborted,AbortReason},
- case mod_loc(AbortLoc) of
- {FwMod,FwFunc,framework} ->
- %% abort during framework call
- spawn_fw_call(FwMod,FwFunc,CurrConf,Pid,
- {framework_error,ErrorMsg},
- unknown,self()),
- run_test_case_msgloop(Ref,Pid,
- Terminate,Comment,
- undefined,Status);
- Loc1 ->
- %% call end_per_testcase on a separate process,
- %% only so that the user has a chance to clean up
- %% after init_per_testcase, even after abortion
- NewCurrConf =
- case CurrConf of
- {{Mod,Func},Conf} ->
- TVal =
- case lists:keysearch(default_timeout,
- 1,
- Conf) of
- {value,{default_timeout,Tmo}} ->
- Tmo;
- _ ->
- ?DEFAULT_TIMETRAP_SECS*1000
- end,
- EndConfPid =
- call_end_conf(
- Mod,Func,Pid,
- ErrorMsg,Loc1,
- [{tc_status,
- {failed,ErrorMsg}}|Conf],TVal),
- {EndConfPid,{Mod,Func},Conf};
- _ ->
- {Mod,Func} = get_mf(Loc1),
- spawn_fw_call(Mod,Func,CurrConf,Pid,
- ErrorMsg,Loc1,self()),
- undefined
- end,
- run_test_case_msgloop(Ref,Pid,
- Terminate,Comment,
- NewCurrConf,Status)
- end;
- killed ->
- %% result of an exit(TestCase,kill) call, which is the
- %% only way to abort a testcase process that traps exits
- %% (see abort_current_testcase)
- {Mod,Func} = case CurrConf of
- {MF,_} -> MF;
- _ -> {undefined,undefined}
- end,
- spawn_fw_call(Mod,Func,CurrConf,Pid,
- testcase_aborted_or_killed,
- unknown,self()),
- run_test_case_msgloop(Ref,Pid,
- Terminate,Comment,CurrConf,Status);
- {fw_error,{FwMod,FwFunc,FwError}} ->
- spawn_fw_call(FwMod,FwFunc,CurrConf,Pid,
- {framework_error,FwError},
- unknown,self()),
- run_test_case_msgloop(Ref,Pid,
- Terminate,Comment,CurrConf,Status);
- _Other ->
- %% the testcase has terminated because of Reason (e.g. an exit
- %% because a linked process failed)
- {Mod,Func} = case CurrConf of
- {MF,_} -> MF;
- _ -> {undefined,undefined}
- end,
- spawn_fw_call(Mod,Func,CurrConf,Pid,
- Reason,unknown,self()),
- run_test_case_msgloop(Ref,Pid,
- Terminate,Comment,CurrConf,Status)
- end;
- {EndConfPid,{call_end_conf,Data,_Result}} ->
+ St = handle_tc_exit(Reason, St0),
+ run_test_case_msgloop(St);
+ {EndConfPid0,{call_end_conf,Data,_Result}} ->
+ #st{mf={Mod,Func},config=CurrConf} = St0,
case CurrConf of
- {EndConfPid,{Mod,Func},_Conf} ->
+ _ when is_list(CurrConf) ->
{_Mod,_Func,TCPid,TCExitReason,Loc} = Data,
spawn_fw_call(Mod,Func,CurrConf,TCPid,
TCExitReason,Loc,self()),
- run_test_case_msgloop(Ref,Pid,
- Terminate,Comment,undefined,Status);
+ St = St0#st{config=undefined,end_conf_pid=undefined},
+ run_test_case_msgloop(St);
_ ->
- run_test_case_msgloop(Ref,Pid,
- Terminate,Comment,CurrConf,Status)
+ run_test_case_msgloop(St0)
end;
{_FwCallPid,fw_notify_done,{T,Value,Loc,Opts,AddToComment}} ->
%% the framework has been notified, we're finished
- RetVal =
- case AddToComment of
- undefined ->
- {T,Value,Loc,Opts,Comment};
- _ ->
- Comment1 =
- if Comment == "" ->
- AddToComment;
- true ->
- Comment ++
- test_server_ctrl:xhtml("<br>",
- "<br />") ++
- AddToComment
- end,
- {T,Value,Loc,Opts,Comment1}
- end,
- run_test_case_msgloop(Ref,Pid,
- {true,RetVal},Comment,undefined,Status);
+ RetVal = {T,Value,Loc,Opts},
+ Comment0 = St0#st.comment,
+ Comment = case AddToComment of
+ undefined ->
+ Comment0;
+ _ ->
+ if Comment0 =:= "" ->
+ AddToComment;
+ true ->
+ Comment0 ++
+ test_server_ctrl:xhtml("<br>",
+ "<br />") ++
+ AddToComment
+ end
+ end,
+ St = setup_termination(RetVal, St0#st{comment=Comment,
+ config=undefined}),
+ run_test_case_msgloop(St);
{'EXIT',_FwCallPid,{fw_notify_done,Func,Error}} ->
%% a framework function failed
CB = os:getenv("TEST_SERVER_FRAMEWORK"),
Loc = case CB of
FW when FW =:= false; FW =:= "undefined" ->
- {test_server,Func};
+ [{test_server,Func}];
_ ->
- {list_to_atom(CB),Func}
+ [{list_to_atom(CB),Func}]
end,
- RetVal = {died,{framework_error,Loc,Error},Loc,"Framework error"},
- run_test_case_msgloop(Ref,Pid,
- {true,RetVal},Comment,undefined,Status);
+ RetVal = {died,{framework_error,Loc,Error},Loc},
+ St = setup_termination(RetVal, St0#st{comment="Framework error",
+ config=undefined}),
+ run_test_case_msgloop(St);
{failed,File,Line} ->
put(test_server_detected_fail,
[{File, Line}| get(test_server_detected_fail)]),
- run_test_case_msgloop(Ref,Pid,
- Terminate,Comment,CurrConf,Status);
+ run_test_case_msgloop(St0);
{user_timetrap,Pid,_TrapTime,StartTime,E={user_timetrap_error,_},_} ->
case update_user_timetraps(Pid, StartTime) of
@@ -955,8 +603,7 @@ run_test_case_msgloop(Ref, Pid, Terminate, Comment, CurrConf, Status) ->
ignore ->
ok
end,
- run_test_case_msgloop(Ref,Pid,
- Terminate,Comment,CurrConf,Status);
+ run_test_case_msgloop(St0);
{user_timetrap,Pid,TrapTime,StartTime,ElapsedTime,Scale} ->
%% a user timetrap is triggered, ignore it if new
%% timetrap has been started since
@@ -971,36 +618,107 @@ run_test_case_msgloop(Ref, Pid, Terminate, Comment, CurrConf, Status) ->
ignore ->
ok
end,
- run_test_case_msgloop(Ref,Pid,
- Terminate,Comment,CurrConf,Status);
+ run_test_case_msgloop(St0);
{timetrap_cancel_one,Handle,_From} ->
timetrap_cancel_one(Handle, false),
- run_test_case_msgloop(Ref,Pid,
- Terminate,Comment,CurrConf,Status);
+ run_test_case_msgloop(St0);
{timetrap_cancel_all,TCPid,_From} ->
timetrap_cancel_all(TCPid, false),
- run_test_case_msgloop(Ref,Pid,
- Terminate,Comment,CurrConf,Status);
+ run_test_case_msgloop(St0);
{get_timetrap_info,From,TCPid} ->
Info = get_timetrap_info(TCPid, false),
From ! {self(),get_timetrap_info,Info},
- run_test_case_msgloop(Ref,Pid,
- Terminate,Comment,CurrConf,Status);
+ run_test_case_msgloop(St0);
_Other when not is_tuple(_Other) ->
%% ignore anything not generated by test server
- run_test_case_msgloop(Ref,Pid,
- Terminate,Comment,CurrConf,Status);
+ run_test_case_msgloop(St0);
_Other when element(1, _Other) /= 'EXIT',
element(1, _Other) /= started,
element(1, _Other) /= finished,
element(1, _Other) /= print ->
%% ignore anything not generated by test server
- run_test_case_msgloop(Ref,Pid,
- Terminate,Comment,CurrConf,Status)
- after Timeout ->
- ReturnValue
+ run_test_case_msgloop(St0)
+ after St0#st.timeout ->
+ #st{ret_val=RetVal,comment=Comment} = St0,
+ erlang:append_element(RetVal, Comment)
end.
+setup_termination(RetVal, #st{pid=Pid}=St) ->
+ timetrap_cancel_all(Pid, false),
+ St#st{ret_val=RetVal,timeout=20}.
+
+set_tc_state(State) ->
+ set_tc_state(State,unknown).
+set_tc_state(State, Config) ->
+ tc_supervisor_req(set_tc_state, {State,Config}).
+
+handle_tc_exit(killed, St) ->
+ %% probably the result of an exit(TestCase,kill) call, which is the
+ %% only way to abort a testcase process that traps exits
+ %% (see abort_current_testcase).
+ #st{config=Config,mf={Mod,Func},pid=Pid} = St,
+ Msg = testcase_aborted_or_killed,
+ spawn_fw_call(Mod, Func, Config, Pid, Msg, unknown, self()),
+ St;
+handle_tc_exit({testcase_aborted,{user_timetrap_error,_}=Msg,_}, St) ->
+ #st{config=Config,mf={Mod,Func},pid=Pid} = St,
+ spawn_fw_call(Mod, Func, Config, Pid, Msg, unknown, self()),
+ St;
+handle_tc_exit(Reason, #st{status={framework,FwMod,FwFunc},
+ config=Config,pid=Pid}=St) ->
+ R = case Reason of
+ {timetrap_timeout,TVal,_} ->
+ {timetrap,TVal};
+ {testcase_aborted=E,AbortReason,_} ->
+ {E,AbortReason};
+ {fw_error,{FwMod,FwFunc,FwError}} ->
+ FwError;
+ Other ->
+ Other
+ end,
+ Error = {framework_error,R},
+ spawn_fw_call(FwMod, FwFunc, Config, Pid, Error, unknown, self()),
+ St;
+handle_tc_exit(Reason, #st{status=tc,config=Config0,mf={Mod,Func},pid=Pid}=St)
+ when is_list(Config0) ->
+ {R,Loc1,F} = case Reason of
+ {timetrap_timeout=E,TVal,Loc0} ->
+ {{E,TVal},Loc0,E};
+ {testcase_aborted=E,AbortReason,Loc0} ->
+ Msg = {E,AbortReason},
+ {Msg,Loc0,Msg};
+ Other ->
+ {Other,unknown,Other}
+ end,
+ Timeout = end_conf_timeout(Reason, St),
+ Config = [{tc_status,{failed,F}}|Config0],
+ EndConfPid = call_end_conf(Mod, Func, Pid, R, Loc1, Config, Timeout),
+ St#st{end_conf_pid=EndConfPid};
+handle_tc_exit(Reason, #st{config=Config,mf={Mod,Func0},pid=Pid,
+ status=Status}=St) ->
+ {R,Loc1} = case Reason of
+ {timetrap_timeout=E,TVal,Loc0} ->
+ {{E,TVal},Loc0};
+ {testcase_aborted=E,AbortReason,Loc0} ->
+ {{E,AbortReason},Loc0};
+ Other ->
+ {Other,unknown}
+ end,
+ Func = case Status of
+ init_per_testcase=F -> {F,Func0};
+ end_per_testcase=F -> {F,Func0};
+ _ -> Func0
+ end,
+ spawn_fw_call(Mod, Func, Config, Pid, R, Loc1, self()),
+ St.
+
+end_conf_timeout({timetrap_timeout,Timeout,_}, _) ->
+ Timeout;
+end_conf_timeout(_, #st{config=Config}) when is_list(Config) ->
+ proplists:get_value(default_timeout, Config, ?DEFAULT_TIMETRAP_SECS*1000);
+end_conf_timeout(_, _) ->
+ ?DEFAULT_TIMETRAP_SECS*1000.
+
call_end_conf(Mod,Func,TCPid,TCExitReason,Loc,Conf,TVal) ->
Starter = self(),
Data = {Mod,Func,TCPid,TCExitReason,Loc},
@@ -1039,14 +757,14 @@ call_end_conf(Mod,Func,TCPid,TCExitReason,Loc,Conf,TVal) ->
end,
spawn_link(EndConfProc).
-spawn_fw_call(Mod,{init_per_testcase,Func},_,Pid,{timetrap_timeout,TVal}=Why,
+spawn_fw_call(Mod,{init_per_testcase,Func},CurrConf,Pid,{timetrap_timeout,TVal}=Why,
Loc,SendTo) ->
FwCall =
fun() ->
Skip = {skip,{failed,{Mod,init_per_testcase,Why}}},
%% if init_per_testcase fails, the test case
%% should be skipped
- case catch do_end_tc_call(Mod,Func, Loc, {Pid,Skip,[[]]}, Why) of
+ case catch do_end_tc_call(Mod,Func, {Pid,Skip,[CurrConf]}, Why) of
{'EXIT',FwEndTCErr} ->
exit({fw_notify_done,end_tc,FwEndTCErr});
_ ->
@@ -1060,19 +778,10 @@ spawn_fw_call(Mod,{init_per_testcase,Func},_,Pid,{timetrap_timeout,TVal}=Why,
spawn_fw_call(Mod,{end_per_testcase,Func},EndConf,Pid,
{timetrap_timeout,TVal}=Why,_Loc,SendTo) ->
- %%! This is a temporary fix that keeps Test Server alive during
- %%! execution of a parallel test case group, when sometimes
- %%! this clause gets called with EndConf == undefined. See OTP-9594
- %%! for more info.
- EndConf1 = if EndConf == undefined ->
- [{tc_status,{failed,{Mod,end_per_testcase,Why}}}];
- true ->
- EndConf
- end,
FwCall =
fun() ->
{RetVal,Report} =
- case proplists:get_value(tc_status, EndConf1) of
+ case proplists:get_value(tc_status, EndConf) of
undefined ->
E = {failed,{Mod,end_per_testcase,Why}},
{E,E};
@@ -1086,9 +795,9 @@ spawn_fw_call(Mod,{end_per_testcase,Func},EndConf,Pid,
"WARNING! ~p:end_per_testcase(~p, ~p)"
" failed!\n\tReason: timetrap timeout"
" after ~w ms!\n", [Mod,Func,EndConf,TVal]},
- FailLoc = proplists:get_value(tc_fail_loc, EndConf1),
- case catch do_end_tc_call(Mod,Func, FailLoc,
- {Pid,Report,[EndConf1]}, Why) of
+ FailLoc = proplists:get_value(tc_fail_loc, EndConf),
+ case catch do_end_tc_call(Mod,Func,
+ {Pid,Report,[EndConf]}, Why) of
{'EXIT',FwEndTCErr} ->
exit({fw_notify_done,end_tc,FwEndTCErr});
_ ->
@@ -1122,14 +831,9 @@ spawn_fw_call(FwMod,FwFunc,_,_Pid,{framework_error,FwError},_,SendTo) ->
spawn_link(FwCall);
spawn_fw_call(Mod,Func,CurrConf,Pid,Error,Loc,SendTo) ->
- {Mod1,Func1} =
- case {Mod,Func,CurrConf} of
- {undefined,undefined,{{M,F},_}} -> {M,F};
- _ -> {Mod,Func}
- end,
FwCall =
fun() ->
- case catch fw_error_notify(Mod1,Func1,[],
+ case catch fw_error_notify(Mod,Func,[],
Error,Loc) of
{'EXIT',FwErrorNotifyErr} ->
exit({fw_notify_done,error_notification,
@@ -1137,8 +841,8 @@ spawn_fw_call(Mod,Func,CurrConf,Pid,Error,Loc,SendTo) ->
_ ->
ok
end,
- Conf = [{tc_status,{failed,timetrap_timeout}}],
- case catch do_end_tc_call(Mod1,Func1, Loc,
+ Conf = [{tc_status,{failed,timetrap_timeout}}|CurrConf],
+ case catch do_end_tc_call(Mod,Func,
{Pid,Error,[Conf]},Error) of
{'EXIT',FwEndTCErr} ->
exit({fw_notify_done,end_tc,FwEndTCErr});
@@ -1203,81 +907,73 @@ run_test_case_eval(Mod, Func, Args0, Name, Ref, RunInit,
TimetrapData, LogOpts, TCCallback) ->
put(test_server_multiply_timetraps, TimetrapData),
put(test_server_logopts, LogOpts),
+ Where = [{Mod,Func}],
+ put(test_server_loc, Where),
FWInitResult = test_server_sup:framework_call(init_tc,[?pl2a(Mod),Func,Args0],
{ok,Args0}),
- group_leader() ! {test_case_initialized,self()},
+ set_tc_state(running),
{{Time,Value},Loc,Opts} =
case FWInitResult of
{ok,Args} ->
run_test_case_eval1(Mod, Func, Args, Name, RunInit, TCCallback);
Error = {error,_Reason} ->
- Where = {Mod,Func},
- NewResult = do_end_tc_call(Mod,Func, Where, {Error,Args0},
+ NewResult = do_end_tc_call(Mod,Func, {Error,Args0},
{skip,{failed,Error}}),
{{0,NewResult},Where,[]};
{fail,Reason} ->
Conf = [{tc_status,{failed,Reason}} | hd(Args0)],
- Where = {Mod,Func},
fw_error_notify(Mod, Func, Conf, Reason),
- NewResult = do_end_tc_call(Mod,Func, Where, {{error,Reason},[Conf]},
+ NewResult = do_end_tc_call(Mod,Func, {{error,Reason},[Conf]},
{fail,Reason}),
{{0,NewResult},Where,[]};
Skip = {skip,_Reason} ->
- Where = {Mod,Func},
- NewResult = do_end_tc_call(Mod,Func, Where, {Skip,Args0}, Skip),
+ NewResult = do_end_tc_call(Mod,Func, {Skip,Args0}, Skip),
{{0,NewResult},Where,[]};
{auto_skip,Reason} ->
- Where = {Mod,Func},
- NewResult = do_end_tc_call(Mod,Func, Where, {{skip,Reason},Args0},
+ NewResult = do_end_tc_call(Mod,Func, {{skip,Reason},Args0},
{skip,Reason}),
{{0,NewResult},Where,[]}
end,
exit({Ref,Time,Value,Loc,Opts}).
run_test_case_eval1(Mod, Func, Args, Name, RunInit, TCCallback) ->
- %% save current state in controller loop
- tc_supervisor_req(set_curr_conf, {{Mod,Func},hd(Args)}),
case RunInit of
run_init ->
- put(test_server_init_or_end_conf,{init_per_testcase,Func}),
- put(test_server_loc, {Mod,{init_per_testcase,Func}}),
+ set_tc_state(init_per_testcase, hd(Args)),
ensure_timetrap(Args),
case init_per_testcase(Mod, Func, Args) of
Skip = {skip,Reason} ->
Line = get_loc(),
- Conf = [{tc_status,{skipped,Reason}}],
- NewRes = do_end_tc_call(Mod,Func, Line, {Skip,[Conf]}, Skip),
+ Conf = [{tc_status,{skipped,Reason}}|hd(Args)],
+ NewRes = do_end_tc_call(Mod,Func, {Skip,[Conf]}, Skip),
{{0,NewRes},Line,[]};
{skip_and_save,Reason,SaveCfg} ->
Line = get_loc(),
- Conf = [{tc_status,{skipped,Reason}},{save_config,SaveCfg}],
- NewRes = do_end_tc_call(Mod,Func, Line, {{skip,Reason},[Conf]},
+ Conf = [{tc_status,{skipped,Reason}},{save_config,SaveCfg}|hd(Args)],
+ NewRes = do_end_tc_call(Mod,Func, {{skip,Reason},[Conf]},
{skip,Reason}),
{{0,NewRes},Line,[]};
FailTC = {fail,Reason} -> % user fails the testcase
EndConf = [{tc_status,{failed,Reason}} | hd(Args)],
fw_error_notify(Mod, Func, EndConf, Reason),
- NewRes = do_end_tc_call(Mod,Func, {Mod,Func},
+ NewRes = do_end_tc_call(Mod,Func,
{{error,Reason},[EndConf]},
FailTC),
- {{0,NewRes},{Mod,Func},[]};
+ {{0,NewRes},[{Mod,Func}],[]};
{ok,NewConf} ->
- put(test_server_init_or_end_conf,undefined),
%% call user callback function if defined
NewConf1 = user_callback(TCCallback, Mod, Func, init, NewConf),
%% save current state in controller loop
- tc_supervisor_req(set_curr_conf, {{Mod,Func},NewConf1}),
- put(test_server_loc, {Mod,Func}),
+ set_tc_state(tc, NewConf1),
%% execute the test case
{{T,Return},Loc} = {ts_tc(Mod, Func, [NewConf1]),get_loc()},
{EndConf,TSReturn,FWReturn} =
case Return of
{E,TCError} when E=='EXIT' ; E==failed ->
- ModLoc = mod_loc(Loc),
fw_error_notify(Mod, Func, NewConf1,
- TCError, ModLoc),
+ TCError, Loc),
{[{tc_status,{failed,TCError}},
- {tc_fail_loc,ModLoc}|NewConf1],
+ {tc_fail_loc,Loc}|NewConf1],
Return,{error,TCError}};
SaveCfg={save_config,_} ->
{[{tc_status,ok},SaveCfg|NewConf1],Return,ok};
@@ -1294,7 +990,6 @@ run_test_case_eval1(Mod, Func, Args, Name, RunInit, TCCallback) ->
%% call user callback function if defined
EndConf1 = user_callback(TCCallback, Mod, Func, 'end', EndConf),
%% update current state in controller loop
- tc_supervisor_req(set_curr_conf, EndConf1),
{FWReturn1,TSReturn1,EndConf2} =
case end_per_testcase(Mod, Func, EndConf1) of
SaveCfg1={save_config,_} ->
@@ -1314,23 +1009,21 @@ run_test_case_eval1(Mod, Func, Args, Name, RunInit, TCCallback) ->
{FWReturn,TSReturn,EndConf1}
end,
%% clear current state in controller loop
- tc_supervisor_req(set_curr_conf, undefined),
- put(test_server_init_or_end_conf,undefined),
- case do_end_tc_call(Mod,Func, Loc,
+ case do_end_tc_call(Mod,Func,
{FWReturn1,[EndConf2]}, TSReturn1) of
{failed,Reason} = NewReturn ->
fw_error_notify(Mod,Func,EndConf2, Reason),
- {{T,NewReturn},{Mod,Func},[]};
+ {{T,NewReturn},[{Mod,Func}],[]};
NewReturn ->
{{T,NewReturn},Loc,[]}
end
end;
skip_init ->
+ set_tc_state(running, hd(Args)),
%% call user callback function if defined
Args1 = user_callback(TCCallback, Mod, Func, init, Args),
ensure_timetrap(Args1),
%% ts_tc does a catch
- put(test_server_loc, {Mod,Func}),
%% if this is a named conf group, the test case (init or end conf)
%% should be called with the name as the first argument
Args2 = if Name == undefined -> Args1;
@@ -1341,43 +1034,12 @@ run_test_case_eval1(Mod, Func, Args, Name, RunInit, TCCallback) ->
%% call user callback function if defined
Return1 = user_callback(TCCallback, Mod, Func, 'end', Return),
{Return2,Opts} = process_return_val([Return1], Mod, Func,
- Args1, {Mod,Func}, Return1),
+ Args1, [{Mod,Func}], Return1),
{{T,Return2},Loc,Opts}
end.
-do_end_tc_call(M,F, Loc, Res, Return) ->
- IsSuite = case lists:reverse(atom_to_list(M)) of
- [$E,$T,$I,$U,$S,$_|_] -> true;
- _ -> false
- end,
+do_end_tc_call(Mod, Func, Res, Return) ->
FwMod = os:getenv("TEST_SERVER_FRAMEWORK"),
- {Mod,Func} =
- if FwMod == M ; FwMod == "undefined"; FwMod == false ->
- {M,F};
- (not IsSuite) and is_list(Loc) and (length(Loc)>1) ->
- %% If failure in other module (M) than suite, try locate
- %% suite name in Loc list and call end_tc with Suite:TestCase
- %% instead of M:F.
- GetSuite = fun(S,TC) ->
- case lists:reverse(atom_to_list(S)) of
- [$E,$T,$I,$U,$S,$_|_] -> [{S,TC}];
- _ -> []
- end
- end,
- case lists:flatmap(fun({S,TC,_}) -> GetSuite(S,TC);
- ({{S,TC},_}) -> GetSuite(S,TC);
- ({S,TC}) -> GetSuite(S,TC);
- (_) -> []
- end, Loc) of
- [] ->
- {M,F};
- [FoundSuite|_] ->
- FoundSuite
- end;
- true ->
- {M,F}
- end,
-
Ref = make_ref(),
if FwMod == "ct_framework" ; FwMod == "undefined"; FwMod == false ->
case test_server_sup:framework_call(
@@ -1419,7 +1081,7 @@ process_return_val([Return], M,F,A, Loc, Final) when is_list(Return) ->
true -> % must be return value from end conf case
process_return_val1(Return, M,F,A, Loc, Final, []);
false -> % must be Config value from init conf case
- case do_end_tc_call(M, F, Loc, {ok,A}, Return) of
+ case do_end_tc_call(M, F, {ok,A}, Return) of
{failed, FWReason} = Failed ->
fw_error_notify(M,F,A, FWReason),
{Failed, []};
@@ -1435,9 +1097,9 @@ process_return_val(Return, M,F,A, Loc, Final) ->
process_return_val1([Failed={E,TCError}|_], M,F,A=[Args], Loc, _, SaveOpts)
when E=='EXIT';
E==failed ->
- fw_error_notify(M,F,A, TCError, mod_loc(Loc)),
- case do_end_tc_call(M,F, Loc, {{error,TCError},
- [[{tc_status,{failed,TCError}}|Args]]},
+ fw_error_notify(M,F,A, TCError, Loc),
+ case do_end_tc_call(M,F, {{error,TCError},
+ [[{tc_status,{failed,TCError}}|Args]]},
Failed) of
{failed,FWReason} ->
{{failed,FWReason},SaveOpts};
@@ -1455,8 +1117,8 @@ process_return_val1([RetVal={Tag,_}|Opts], M,F,A, Loc, _, SaveOpts) when Tag==sk
process_return_val1(Opts, M,F,A, Loc, RetVal, SaveOpts);
process_return_val1([_|Opts], M,F,A, Loc, Final, SaveOpts) ->
process_return_val1(Opts, M,F,A, Loc, Final, SaveOpts);
-process_return_val1([], M,F,A, Loc, Final, SaveOpts) ->
- case do_end_tc_call(M,F, Loc, {Final,A}, Final) of
+process_return_val1([], M,F,A, _Loc, Final, SaveOpts) ->
+ case do_end_tc_call(M,F, {Final,A}, Final) of
{failed,FWReason} ->
{{failed,FWReason},SaveOpts};
NewReturn ->
@@ -1522,7 +1184,7 @@ do_init_per_testcase(Mod, Args) ->
throw:Other ->
set_loc(erlang:get_stacktrace()),
Line = get_loc(),
- FormattedLoc = test_server_sup:format_loc(mod_loc(Line)),
+ FormattedLoc = test_server_sup:format_loc(Line),
group_leader() ! {printout,12,
"ERROR! init_per_testcase thrown!\n"
"\tLocation: ~s\n\tReason: ~p\n",
@@ -1533,7 +1195,7 @@ do_init_per_testcase(Mod, Args) ->
Reason = {Reason0,Stk},
set_loc(Stk),
Line = get_loc(),
- FormattedLoc = test_server_sup:format_loc(mod_loc(Line)),
+ FormattedLoc = test_server_sup:format_loc(Line),
group_leader() ! {printout,12,
"ERROR! init_per_testcase crashed!\n"
"\tLocation: ~s\n\tReason: ~p\n",
@@ -1556,8 +1218,7 @@ end_per_testcase(Mod, Func, Conf) ->
end.
do_end_per_testcase(Mod,EndFunc,Func,Conf) ->
- put(test_server_init_or_end_conf,{EndFunc,Func}),
- put(test_server_loc, {Mod,{EndFunc,Func}}),
+ set_tc_state(end_per_testcase, Conf),
try Mod:EndFunc(Func, Conf) of
{save_config,_}=SaveCfg ->
SaveCfg;
@@ -1581,8 +1242,7 @@ do_end_per_testcase(Mod,EndFunc,Func,Conf) ->
"Reason: ~p\n"
"Line: ~s\n",
[EndFunc, Other,
- test_server_sup:format_loc(
- mod_loc(get_loc()))]},
+ test_server_sup:format_loc(get_loc())]},
{failed,{Mod,end_per_testcase,Other}};
Class:Reason ->
Stk = erlang:get_stacktrace(),
@@ -1604,8 +1264,7 @@ do_end_per_testcase(Mod,EndFunc,Func,Conf) ->
"Reason: ~p\n"
"Line: ~s\n",
[EndFunc, Reason,
- test_server_sup:format_loc(
- mod_loc(get_loc()))]},
+ test_server_sup:format_loc(get_loc())]},
{failed,{Mod,end_per_testcase,Why}}
end.
@@ -1618,66 +1277,19 @@ get_loc(Pid) ->
lists:foreach(fun({Key,Val}) -> put(Key, Val) end, Dict),
Stk = [rewrite_loc_item(Loc) || Loc <- Stk0],
case get(test_server_loc) of
- undefined ->
- put(test_server_loc, Stk);
- {Suite,Case} ->
+ [{Suite,Case}] ->
%% location info unknown, check if {Suite,Case,Line}
%% is available in stacktrace. and if so, use stacktrace
- %% instead of currect test_server_loc
+ %% instead of current test_server_loc
case [match || {S,C,_L} <- Stk, S == Suite, C == Case] of
[match|_] -> put(test_server_loc, Stk);
_ -> ok
end;
_ ->
- ok
+ put(test_server_loc, Stk)
end,
get_loc().
-%% find the latest known Suite:Testcase
-get_mf(MFs) ->
- get_mf(MFs, {undefined,undefined}).
-
-get_mf([MF|MFs], _Found) when is_tuple(MF) ->
- ModFunc = {Mod,_} = case MF of
- {M,F,_} -> {M,F};
- MF -> MF
- end,
- case is_suite(Mod) of
- true -> ModFunc;
- false -> get_mf(MFs, ModFunc)
- end;
-get_mf(_, Found) ->
- Found.
-
-is_suite(Mod) ->
- case lists:reverse(atom_to_list(Mod)) of
- "ETIUS" ++ _ -> true;
- _ -> false
- end.
-
-mod_loc(Loc) ->
- %% handle diff line num versions
- case Loc of
- [{{_M,_F},_L}|_] ->
- [begin if L /= 0 -> {?pl2a(M),F,L};
- true -> {?pl2a(M),F} end end || {{M,F},L} <- Loc];
- [{_M,_F}|_] ->
- [{?pl2a(M),F} || {M,F} <- Loc];
- {{M,F},0} ->
- [{?pl2a(M),F}];
- {{M,F},L} ->
- [{?pl2a(M),F,L}];
- {M,ForL} ->
- [{?pl2a(M),ForL}];
- {M,F,0} ->
- [{M,F}];
- [{M,F,0}|Stack] ->
- [{M,F}|Stack];
- _ ->
- Loc
- end.
-
-
fw_error_notify(Mod, Func, Args, Error) ->
test_server_sup:framework_call(error_notification,
[?pl2a(Mod),Func,[Args],
@@ -1699,10 +1311,10 @@ fw_error_notify(Mod, Func, Args, Error, Loc) ->
%% is directed to console, major and/or minor log files.
print(Detail,Format,Args) ->
- local_or_remote_apply({test_server_ctrl,print,[Detail,Format,Args]}).
+ test_server_ctrl:print(Detail, Format, Args).
print(Detail,Format,Args,Printer) ->
- local_or_remote_apply({test_server_ctrl,print,[Detail,Format,Args,Printer]}).
+ test_server_ctrl:print(Detail, Format, Args, Printer).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% print_timsteamp(Detail,Leader) -> ok
@@ -1712,7 +1324,7 @@ print(Detail,Format,Args,Printer) ->
%% log files.
print_timestamp(Detail,Leader) ->
- local_or_remote_apply({test_server_ctrl,print_timestamp,[Detail,Leader]}).
+ test_server_ctrl:print_timestamp(Detail, Leader).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -1760,7 +1372,12 @@ ts_tc(M, F, A) ->
{Elapsed, Result}.
set_loc(Stk) ->
- Loc = [rewrite_loc_item(I) || {_,_,_,_}=I <- Stk],
+ Loc = case [rewrite_loc_item(I) || {_,_,_,_}=I <- Stk] of
+ [{M,F,0}|Stack] ->
+ [{M,F}|Stack];
+ Other ->
+ Other
+ end,
put(test_server_loc, Loc).
rewrite_loc_item({M,F,_,Loc}) ->
@@ -2569,7 +2186,10 @@ start_node(Name, Type, Options) ->
%% by a shielded node.
Cover = case is_cover() of
true ->
- not is_shielded(Name) andalso same_version(Node);
+ not is_shielded(Name)
+ andalso same_version(Node)
+ andalso proplists:get_value(start_cover,Options,
+ true);
false ->
false
end,
@@ -2577,9 +2197,7 @@ start_node(Name, Type, Options) ->
net_adm:ping(Node),
case Cover of
true ->
- Sticky = unstick_all_sticky(Node),
- cover:start(Node),
- stick_all_sticky(Node,Sticky);
+ do_cover_for_node(Node,start);
_ ->
ok
end,
@@ -2607,7 +2225,27 @@ wait_for_node(Slave) ->
group_leader() ! {sync_apply,
self(),
{test_server_ctrl,wait_for_node,[Slave]}},
- receive {sync_result,R} -> R end.
+ Result = receive {sync_result,R} -> R end,
+ case Result of
+ ok ->
+ Cover = case is_cover() of
+ true ->
+ not is_shielded(Slave) andalso same_version(Slave);
+ false ->
+ false
+ end,
+
+ net_adm:ping(Slave),
+ case Cover of
+ true ->
+ do_cover_for_node(Slave,start);
+ _ ->
+ ok
+ end;
+ _ ->
+ ok
+ end,
+ Result.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -2619,9 +2257,7 @@ stop_node(Slave) ->
Nocover = is_shielded(Slave) orelse not same_version(Slave),
case is_cover() of
true when not Nocover ->
- Sticky = unstick_all_sticky(Slave),
- cover:stop(Slave),
- stick_all_sticky(Slave,Sticky);
+ do_cover_for_node(Slave,flush);
_ ->
ok
end,
@@ -2818,7 +2454,7 @@ make_priv_dir() ->
%% Returns the OsType of the target node. OsType is
%% the same as returned from os:type()
os_type() ->
- test_server_ctrl:get_target_os_type().
+ os:type().
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -2937,47 +2573,9 @@ purify_format(Format, Args) ->
%%
%% Generic send functions for communication with host
%%
-sync_local_or_remote_apply(Proxy,From,{M,F,A} = MFA) ->
- case get(test_server_job_sock) of
- undefined ->
- %% i'm a local target
- Result = apply(M,F,A),
- if is_pid(Proxy) -> Proxy ! {sync_result_proxy,From,Result};
- true -> From ! {sync_result,Result}
- end;
- JobSock ->
- %% i'm a remote target
- request(JobSock,{sync_apply,MFA}),
- {sync_result,Result} = recv(JobSock),
- if is_pid(Proxy) -> Proxy ! {sync_result_proxy,From,Result};
- true -> From ! {sync_result,Result}
- end
- end.
-local_or_remote_apply({M,F,A} = MFA) ->
- case get(test_server_job_sock) of
- undefined ->
- %% i'm a local target
- apply(M,F,A),
- ok;
- JobSock ->
- %% i'm a remote target
- request(JobSock,{apply,MFA}),
- ok
- end.
-
-request(Sock,Request) ->
- gen_tcp:send(Sock,<<1,(term_to_binary(Request))/binary>>).
-
-%%
-%% Generic receive function for communication with host
-%%
-recv(Sock) ->
- case gen_tcp:recv(Sock,0) of
- {error,closed} ->
- gen_tcp:close(Sock),
- exit(connection_lost);
- {ok,<<1,Request/binary>>} ->
- binary_to_term(Request);
- {ok,<<0,B/binary>>} ->
- B
+sync_local_or_remote_apply(Proxy, From, {M,F,A}) ->
+ %% i'm a local target
+ Result = apply(M, F, A),
+ if is_pid(Proxy) -> Proxy ! {sync_result_proxy,From,Result};
+ true -> From ! {sync_result,Result}
end.
diff --git a/lib/test_server/src/test_server_ctrl.erl b/lib/test_server/src/test_server_ctrl.erl
index 7f04e2eb23..7b2ebcefc0 100644
--- a/lib/test_server/src/test_server_ctrl.erl
+++ b/lib/test_server/src/test_server_ctrl.erl
@@ -34,118 +34,6 @@
%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% ARCHITECTURE
-%%
-%% The Erlang Test Server can be run on the target machine (local target)
-%% or towards a remote target. The execution flow is mainly the same in
-%% both cases, but with a remote target the test cases are (obviously)
-%% executed on the target machine. Host and target communicates over
-%% socket connections because the host should not be introduced as an
-%% additional node in the distributed erlang system in which the test
-%% cases are run.
-%%
-%%
-%% Local Target:
-%% =============
-%%
-%% -----
-%% | | test_server_ctrl ({global,test_server})
-%% ----- (test_server_ctrl.erl)
-%% |
-%% |
-%% -----
-%% | | JobProc
-%% ----- (test_server_ctrl.erl and test_server.erl)
-%% |
-%% |
-%% -----
-%% | | CaseProc
-%% ----- (test_server.erl)
-%%
-%%
-%%
-%% test_server_ctrl is the main process in the system. It is a registered
-%% process, and it will always be alive when testing is ongoing.
-%% test_server_ctrl initiates testing and monitors JobProc(s).
-%%
-%% When target is local, and Test Server is *not* being used by a framework
-%% application (where it might cause duplicate name problems in a distributed
-%% test environment), the process is globally registered as 'test_server'
-%% to be able to simulate the {global,test_server} process on a remote target.
-%%
-%% JobProc is spawned for each 'job' added to the test_server_ctrl.
-%% A job can mean one test case, one test suite or one spec.
-%% JobProc creates and writes logs and presents results from testing.
-%% JobProc is the group leader for CaseProc.
-%%
-%% CaseProc is spawned for each test case. It runs the test case and
-%% sends results and any other information to its group leader - JobProc.
-%%
-%%
-%%
-%% Remote Target:
-%% ==============
-%%
-%% HOST TARGET
-%%
-%% ----- MainSock -----
-%% test_server_ctrl | |- - - - - - -| | {global,test_server}
-%% (test_server_ctrl.erl) ----- ----- (test_server.erl)
-%% | |
-%% | |
-%% ----- JobSock -----
-%% JobProcH | |- - - - - - -| | JobProcT
-%% (test_server_ctrl.erl) ----- ----- (test_server.erl)
-%% |
-%% |
-%% -----
-%% | | CaseProc
-%% ----- (test_server.erl)
-%%
-%%
-%%
-%%
-%% A separate test_server process only exists when target is remote. It
-%% is then the main process on target. It is started when test_server_ctrl
-%% is started, and a socket connection is established between
-%% test_server_ctrl and test_server. The following information can be sent
-%% over MainSock:
-%%
-%% HOST TARGET
-%% -> {target_info, TargetInfo} (during initiation)
-%% <- {job_proc_killed,Name,Reason} (if a JobProcT dies unexpectedly)
-%% -> {job,Port,Name} (to start a new JobProcT)
-%%
-%%
-%% When target is remote, JobProc is split into to processes: JobProcH
-%% executing on Host and JobProcT executing on Target. (The two processes
-%% execute the same code as JobProc does when target is local.) JobProcH
-%% and JobProcT communicates over a socket connection. The following
-%% information can be sent over JobSock:
-%%
-%% HOST TARGET
-%% -> {test_case, Case} To start a new test case
-%% -> {beam,Mod} .beam file as binary to be loaded
-%% on target, e.g. a test suite
-%% -> {datadir,Tarfile} Content of the datadir for a test suite
-%% <- {apply,MFA} MFA to be applied on host, ignore return;
-%% (apply is used for printing information in
-%% log or console)
-%% <- {sync_apply,MFA} MFA to be applied on host, wait for return
-%% (used for starting and stopping slave nodes)
-%% -> {sync_apply,MFA} MFA to be applied on target, wait for return
-%% (used for cover compiling and analysing)
-%% <-> {sync_result,Result} Return value from sync_apply
-%% <- {test_case_result,Result} When a test case is finished
-%% <- {crash_dumps,Tarfile} When a test case is finished
-%% -> job_done When a job is finished
-%% <- {privdir,Privdir} When a job is finished
-%%
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-
-
%%% SUPERVISOR INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-export([start/0, start/1, start_link/1, stop/0]).
@@ -165,8 +53,8 @@
-export([reject_io_reqs/1, get_levels/0, set_levels/3]).
-export([multiply_timetraps/1, scale_timetraps/1, get_timetrap_parameters/0]).
-export([create_priv_dir/1]).
--export([cover/2, cover/3, cover/7,
- cross_cover_analyse/1, cross_cover_analyse/2, trc/1, stop_trace/0]).
+-export([cover/2, cover/3, cover/8,
+ cross_cover_analyse/2, cross_cover_analyse/3, trc/1, stop_trace/0]).
-export([testcase_callback/1]).
-export([set_random_seed/1]).
-export([kill_slavenodes/0]).
@@ -177,7 +65,6 @@
-export([format/1, format/2, format/3, to_string/1]).
-export([get_target_info/0]).
-export([get_hosts/0]).
--export([get_target_os_type/0]).
-export([node_started/1]).
%%% DEBUGGER INTERFACE %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -463,8 +350,7 @@ wait_finish() ->
ok.
abort_current_testcase(Reason) ->
- controller_call({abort_current_testcase,Reason}),
- ok.
+ controller_call({abort_current_testcase,Reason}).
abort() ->
OldTrap = process_flag(trap_exit, true),
@@ -521,9 +407,9 @@ cover(App, Analyse) when is_atom(App) ->
cover(CoverFile, Analyse) ->
cover(none, CoverFile, Analyse).
cover(App, CoverFile, Analyse) ->
- controller_call({cover,{App,CoverFile},Analyse}).
-cover(App, CoverFile, Exclude, Include, Cross, Export, Analyse) ->
- controller_call({cover,{App,{CoverFile,Exclude,Include,Cross,Export}},Analyse}).
+ controller_call({cover,{App,CoverFile},Analyse,true}).
+cover(App, CoverFile, Exclude, Include, Cross, Export, Analyse, Stop) ->
+ controller_call({cover,{App,{CoverFile,Exclude,Include,Cross,Export}},Analyse,Stop}).
testcase_callback(ModFunc) ->
controller_call({testcase_callback,ModFunc}).
@@ -537,20 +423,6 @@ kill_slavenodes() ->
get_hosts() ->
get(test_server_hosts).
-get_target_os_type() ->
- case whereis(?MODULE) of
- undefined ->
- %% This is probably called on the target node
- os:type();
- Pid when Pid =:= self() ->
- os:type();
- _pid ->
- %% This is called on the controller, e.g. from a
- %% specification clause of a test case
- #target_info{os_type=OsType} = controller_call(get_target_info),
- OsType
- end.
-
%%--------------------------------------------------------------------
add_job(Name, TopCase) ->
@@ -606,7 +478,7 @@ controller_call(Arg, Timeout) ->
%% Mode 'lazy' ignores (and resets to []) any jobs in the state file
%%
-init([Param]) ->
+init([_]) ->
case os:getenv("TEST_SERVER_CALL_TRACE") of
false ->
ok;
@@ -632,104 +504,14 @@ init([Param]) ->
test_server_sup:cleanup_crash_dumps(),
State = #state{jobs=[],finish=false},
put(test_server_free_targets,[]),
- case contact_main_target(Param) of
- {ok,TI} ->
- ets:new(slave_tab, [named_table,set,public,{keypos,2}]),
- set_hosts([TI#target_info.host]),
- {ok,State#state{target_info=TI}};
- {error,Reason} ->
- {stop,Reason}
- end.
-
-
-%% If the test is to be run at a remote target, this function sets up
-%% a socket communication with the target.
-contact_main_target(local) ->
- %% When used by a general framework, global registration of
- %% test_server should not be required.
- case get_fw_mod(undefined) of
- undefined ->
- %% Local target! The global test_server process implemented by
- %% test_server.erl will not be started, so we simulate it by
- %% globally registering this process instead.
- global:sync(),
- case global:whereis_name(test_server) of
- undefined ->
- global:register_name(test_server, self());
- Pid ->
- case node() of
- N when N == node(Pid) ->
- io:format(user, "Warning: test_server already running!\n", []),
- global:re_register_name(test_server,self());
- _ ->
- ok
- end
- end;
- _ ->
- ok
- end,
- TI = test_server:init_target_info(),
+ TI0 = test_server:init_target_info(),
TargetHost = test_server_sup:hoststr(),
- {ok,TI#target_info{where=local,
- host=TargetHost,
- naming=naming(),
- master=TargetHost}};
-
-contact_main_target(ParameterFile) ->
- case read_parameters(ParameterFile) of
- {ok,Par} ->
- case test_server_node:start_remote_main_target(Par) of
- {ok,TI} ->
- {ok,TI};
- {error,Error} ->
- {error,{could_not_start_main_target,Error}}
- end;
- {error,Error} ->
- {error,{could_not_read_parameterfile,Error}}
- end.
-
-read_parameters(File) ->
- case file:consult(File) of
- {ok,Data} ->
- read_parameters(lists:flatten(Data), #par{naming=naming()});
- Error ->
- Error
- end.
-read_parameters([{type,Type}|Data], Par) -> % mandatory
- read_parameters(Data, Par#par{type=Type});
-read_parameters([{target,Target}|Data], Par) -> % mandatory
- read_parameters(Data, Par#par{target=cast_to_list(Target)});
-read_parameters([{slavetargets,SlaveTargets}|Data], Par) ->
- read_parameters(Data, Par#par{slave_targets=SlaveTargets});
-read_parameters([{longnames,Bool}|Data], Par) ->
- Naming = if Bool->"-name"; true->"-sname" end,
- read_parameters(Data, Par#par{naming=Naming});
-read_parameters([{master,{Node,Cookie}}|Data], Par) ->
- read_parameters(Data, Par#par{master=cast_to_list(Node),
- cookie=cast_to_list(Cookie)});
-read_parameters([Other|_Data], _Par) ->
- {error,{illegal_parameter,Other}};
-read_parameters([], Par) when Par#par.type==undefined ->
- {error, {missing_mandatory_parameter,type}};
-read_parameters([], Par) when Par#par.target==undefined ->
- {error, {missing_mandatory_parameter,target}};
-read_parameters([], Par0) ->
- Par =
- case {Par0#par.type, Par0#par.master} of
- {ose, undefined} ->
- %% Use this node as master and bootserver for target
- %% and slave nodes
- Par0#par{master = atom_to_list(node()),
- cookie = atom_to_list(erlang:get_cookie())};
- {ose, _Master} ->
- %% Master for target and slave nodes was defined in parameterfile
- Par0;
- _ ->
- %% Use target as master for slave nodes,
- %% (No master is used for target)
- Par0#par{master="test_server@" ++ Par0#par.target}
- end,
- {ok,Par}.
+ TI = TI0#target_info{host=TargetHost,
+ naming=naming(),
+ master=TargetHost},
+ ets:new(slave_tab, [named_table,set,public,{keypos,2}]),
+ set_hosts([TI#target_info.host]),
+ {ok,State#state{target_info=TI}}.
naming() ->
case lists:member($., test_server_sup:hoststr()) of
@@ -796,7 +578,7 @@ handle_call({add_job,Dir,Name,TopCase,Skip}, _From, State) ->
ExtraTools =
case State#state.cover of
false -> [];
- {App,Analyse} -> [{cover,App,Analyse}]
+ {App,Analyse,Stop} -> [{cover,App,Analyse,Stop}]
end,
ExtraTools1 =
case State#state.random_seed of
@@ -1052,13 +834,13 @@ handle_call(stop_trace, _From, State) ->
{reply,R,State#state{trc=false}};
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%% handle_call({cover,App,Analyse}, _, State) -> ok | {error,Reason}
+%% handle_call({cover,App,Analyse,Stop}, _, State) -> ok | {error,Reason}
%%
%% All modules inn application App are cover compiled
%% Analyse indicates on which level the coverage should be analysed
-handle_call({cover,App,Analyse}, _From, State) ->
- {reply,ok,State#state{cover={App,Analyse}}};
+handle_call({cover,App,Analyse,Stop}, _From, State) ->
+ {reply,ok,State#state{cover={App,Analyse,Stop}}};
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% handle_call({create_priv_dir,Value}, _, State) -> ok | {error,Reason}
@@ -1210,25 +992,17 @@ handle_cast({node_started,Node}, State) ->
%% Pid = pid()
%% Reason = term()
%%
-%% Handles exit messages from linked processes. Only test suites and
-%% possibly a target client are expected to be linked.
-%% When a test suite terminates, it is removed from the job queue.
-%% If a target client terminates it means that we lost contact with
-%% target. The test_server_ctrl process is terminated, and teminate/2
-%% will do the cleanup
+%% Handles exit messages from linked processes. Only test suites are
+%% expected to be linked. When a test suite terminates, it is removed
+%% from the job queue. If a target client terminates it means that we
+%% lost contact with target. The test_server_ctrl process is
+%% terminated, and teminate/2 will do the cleanup
handle_info({'EXIT',Pid,Reason}, State) ->
case lists:keysearch(Pid,2,State#state.jobs) of
false ->
- TI = State#state.target_info,
- case TI#target_info.target_client of
- Pid ->
- %% The target client died - lost contact with target
- {stop,{lost_contact_with_target,Reason},State};
- _other ->
- %% not our problem
- {noreply,State}
- end;
+ %% not our problem
+ {noreply,State};
{value,{Name,_}} ->
NewJobs = lists:keydelete(Pid, 2, State#state.jobs),
case Reason of
@@ -1303,14 +1077,8 @@ handle_info({tcp_closed,Sock}, State=#state{trc=Sock}) ->
%%! Maybe print something???
{noreply,State#state{trc=false}};
handle_info({tcp_closed,Sock}, State) ->
- case test_server_node:nodedown(Sock,State#state.target_info) of
- target_died ->
- %% terminate/2 will do the cleanup
- {stop,target_died,State};
- _ ->
- {noreply,State}
- end;
-
+ test_server_node:nodedown(Sock, State#state.target_info),
+ {noreply,State};
handle_info(_, State) ->
%% dummy; accept all, do nothing.
{noreply, State}.
@@ -1416,6 +1184,7 @@ init_tester(Mod, Func, Args, Dir, Name, {_,_,MinLev}=Levels,
group_leader(test_server_io:get_gl(true), self()),
{TimeMy,Result} = ts_tc(Mod, Func, Args),
set_io_buffering(undefined),
+ test_server_io:set_job_name(undefined),
catch stop_extra_tools(StartedExtraTools),
case Result of
{'EXIT',test_suites_done} ->
@@ -1465,11 +1234,11 @@ elapsed_time(Before, After) ->
start_extra_tools(ExtraTools) ->
start_extra_tools(ExtraTools, []).
-start_extra_tools([{cover,App,Analyse} | ExtraTools], Started) ->
+start_extra_tools([{cover,App,Analyse,Stop} | ExtraTools], Started) ->
case cover_compile(App) of
{ok,AnalyseMods} ->
start_extra_tools(ExtraTools,
- [{cover,App,Analyse,AnalyseMods}|Started]);
+ [{cover,App,Analyse,AnalyseMods,Stop}|Started]);
{error,_} ->
start_extra_tools(ExtraTools, Started)
end;
@@ -1488,8 +1257,8 @@ stop_extra_tools(ExtraTools) ->
end,
stop_extra_tools(ExtraTools, TestDir).
-stop_extra_tools([{cover,App,Analyse,AnalyseMods}|ExtraTools], TestDir) ->
- cover_analyse(App, Analyse, AnalyseMods, TestDir),
+stop_extra_tools([{cover,App,Analyse,AnalyseMods,Stop}|ExtraTools], TestDir) ->
+ cover_analyse(App, Analyse, AnalyseMods, Stop, TestDir),
stop_extra_tools(ExtraTools, TestDir);
%%stop_extra_tools([_ | ExtraTools], TestDir) ->
%% stop_extra_tools(ExtraTools, TestDir);
@@ -2318,9 +2087,7 @@ do_add_end_per_suite_and_skip(LastMod, LastRef, Mod, FwMod) ->
%% Runs the specified tests, then displays/logs the summary.
run_test_cases(TestSpec, Config, TimetrapData) ->
-
- maybe_open_job_sock(),
-
+ test_server:init_purify(),
case lists:member(no_src, get(test_server_logopts)) of
true ->
ok;
@@ -2330,8 +2097,6 @@ run_test_cases(TestSpec, Config, TimetrapData) ->
run_test_cases_loop(TestSpec, [Config], TimetrapData, [], []),
- maybe_get_privdir(),
-
{AllSkippedN,UserSkipN,AutoSkipN,SkipStr} =
case get(test_server_skipped) of
{0,0} -> {0,0,0,""};
@@ -2350,41 +2115,6 @@ run_test_cases(TestSpec, Config, TimetrapData) ->
print(major, "=auto_skipped ~p", [AutoSkipN]),
exit(test_suites_done).
-%% If the test is run at a remote target, this function sets up a socket
-%% communication with the target for handling this particular job.
-maybe_open_job_sock() ->
- TI = get_target_info(),
- case TI#target_info.where of
- local ->
- %% local target
- test_server:init_purify();
- MainSock ->
- %% remote target
- {ok,LSock} = gen_tcp:listen(0, [binary,
- {reuseaddr,true},
- {packet,4},
- {active,false}]),
- {ok,Port} = inet:port(LSock),
- request(MainSock, {job,Port,get(test_server_name)}),
- case gen_tcp:accept(LSock, ?ACCEPT_TIMEOUT) of
- {ok,Sock} -> put(test_server_ctrl_job_sock, Sock);
- {error,Reason} -> exit({no_contact,Reason})
- end
- end.
-
-%% If the test is run at a remote target, this function waits for a
-%% tar packet containing the privdir created by the test case.
-maybe_get_privdir() ->
- case get(test_server_ctrl_job_sock) of
- undefined ->
- %% local target
- ok;
- Sock ->
- %% remote target
- request(Sock, job_done),
- gen_tcp:close(Sock)
- end.
-
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% run_test_cases_loop(TestCases, Config, TimetrapData, Mode, Status) -> ok
@@ -2899,7 +2629,7 @@ run_test_cases_loop([{conf,Ref,Props,{Mod,Func}}|_Cases]=Cs0,
end,
CurrMode = curr_mode(Ref, Mode0, Mode),
- ConfCaseResult = run_test_case(Ref, 0, Mod, Func, [ActualCfg], skip_init, target,
+ ConfCaseResult = run_test_case(Ref, 0, Mod, Func, [ActualCfg], skip_init,
TimetrapData, CurrMode),
case ConfCaseResult of
@@ -2933,6 +2663,7 @@ run_test_cases_loop([{conf,Ref,Props,{Mod,Func}}|_Cases]=Cs0,
exit(framework_error);
{_,Fail,_} when element(1,Fail) == 'EXIT';
element(1,Fail) == timetrap_timeout;
+ element(1,Fail) == user_timetrap_error;
element(1,Fail) == failed ->
{Cases2,Config1,Status3} =
if StartConf ->
@@ -2952,14 +2683,6 @@ run_test_cases_loop([{conf,Ref,Props,{Mod,Func}}|_Cases]=Cs0,
set_io_buffering(IOHandler),
stop_minor_log_file(),
run_test_cases_loop(Cases2, Config1, TimetrapData, Mode, Status3);
- {died,Why,_} when Func == init_per_suite ->
- print(minor, "~n*** Unexpected exit during init_per_suite.~n", []),
- Reason = {failed,{Mod,init_per_suite,Why}},
- Cases2 = skip_cases_upto(Ref, Cases, Reason, conf, CurrMode),
- set_io_buffering(IOHandler),
- stop_minor_log_file(),
- run_test_cases_loop(Cases2, Config, TimetrapData, Mode,
- delete_status(Ref, Status2));
{_,{Skip,Reason},_} when StartConf and ((Skip==skip) or (Skip==skipped)) ->
ReportAbortRepeat(skipped),
print(minor, "~n*** ~p skipped.~n"
@@ -3030,7 +2753,7 @@ run_test_cases_loop([{conf,Ref,Props,{Mod,Func}}|_Cases]=Cs0,
end;
run_test_cases_loop([{make,Ref,{Mod,Func,Args}}|Cases0], Config, TimetrapData, Mode, Status) ->
- case run_test_case(Ref, 0, Mod, Func, Args, skip_init, host, TimetrapData) of
+ case run_test_case(Ref, 0, Mod, Func, Args, skip_init, TimetrapData) of
{_,Why={'EXIT',_},_} ->
print(minor, "~n*** ~p failed.~n"
" Skipping all cases.", [Func]),
@@ -3075,7 +2798,7 @@ run_test_cases_loop([{Mod,Func,Args}|Cases], Config, TimetrapData, Mode, Status)
end,
case run_test_case(undefined, Num+1, Mod, Func, Args,
- run_init, target, TimetrapData, Mode) of
+ run_init, TimetrapData, Mode) of
%% callback to framework module failed, exit immediately
{_,{framework_error,{FwMod,FwFunc},Reason},_} ->
print(minor, "~n*** ~p failed in ~p. Reason: ~p~n", [FwMod,FwFunc,Reason]),
@@ -3711,6 +3434,11 @@ handle_io_and_exit_loop(_, [], Ok,Skip,Fail) ->
handle_io_and_exits(Main, CurrPid, CaseNum, Mod, Func, Cases) ->
receive
+ {abort_current_testcase=Tag,_Reason,From} ->
+ %% If a parallel group is executing, there is no unique
+ %% current test case, so we must generate an error.
+ From ! {self(),Tag,{error,parallel_group}},
+ handle_io_and_exits(Main, CurrPid, CaseNum, Mod, Func, Cases);
%% end of io session from test case executed by main process
{finished,_,Main,CaseNum,Mod,Func,Result,_RetVal} ->
test_server_io:print_buffered(CurrPid),
@@ -3764,23 +3492,23 @@ handle_io_and_exits(Main, CurrPid, CaseNum, Mod, Func, Cases) ->
%% RetVal is the result of executing the test case. It contains info
%% about the execution time and the return value of the test case function.
-run_test_case(Ref, Num, Mod, Func, Args, RunInit, Where, TimetrapData) ->
+run_test_case(Ref, Num, Mod, Func, Args, RunInit, TimetrapData) ->
file:set_cwd(filename:dirname(get(test_server_dir))),
- run_test_case1(Ref, Num, Mod, Func, Args, RunInit, Where,
+ run_test_case1(Ref, Num, Mod, Func, Args, RunInit,
TimetrapData, [], self()).
-run_test_case(Ref, Num, Mod, Func, Args, skip_init, Where, TimetrapData, Mode) ->
+run_test_case(Ref, Num, Mod, Func, Args, skip_init, TimetrapData, Mode) ->
%% a conf case is always executed by the main process
- run_test_case1(Ref, Num, Mod, Func, Args, skip_init, Where,
+ run_test_case1(Ref, Num, Mod, Func, Args, skip_init,
TimetrapData, Mode, self());
-run_test_case(Ref, Num, Mod, Func, Args, RunInit, Where, TimetrapData, Mode) ->
+run_test_case(Ref, Num, Mod, Func, Args, RunInit, TimetrapData, Mode) ->
file:set_cwd(filename:dirname(get(test_server_dir))),
Main = self(),
case check_prop(parallel, Mode) of
false ->
%% this is a sequential test case
- run_test_case1(Ref, Num, Mod, Func, Args, RunInit, Where,
+ run_test_case1(Ref, Num, Mod, Func, Args, RunInit,
TimetrapData, Mode, Main);
_Ref ->
%% this a parallel test case, spawn the new process
@@ -3792,11 +3520,11 @@ run_test_case(Ref, Num, Mod, Func, Args, RunInit, Where, TimetrapData, Mode) ->
[put(Key, Val) || {Key,Val} <- Dictionary],
set_io_buffering({tc,Main}),
run_test_case1(Ref, Num, Mod, Func, Args, RunInit,
- Where, TimetrapData, Mode, Main)
+ TimetrapData, Mode, Main)
end)
end.
-run_test_case1(Ref, Num, Mod, Func, Args, RunInit, Where,
+run_test_case1(Ref, Num, Mod, Func, Args, RunInit,
TimetrapData, Mode, Main) ->
group_leader(test_server_io:get_gl(Main == self()), self()),
@@ -3809,12 +3537,6 @@ run_test_case1(Ref, Num, Mod, Func, Args, RunInit, Where,
Main ! {started,Ref,self(),Num,Mod,Func}
end,
TSDir = get(test_server_dir),
- case Where of
- target ->
- maybe_send_beam_and_datadir(Mod);
- host ->
- ok
- end,
print(major, "=case ~p:~p", [Mod, Func]),
MinorName = start_minor_log_file(Mod, Func),
@@ -3872,7 +3594,7 @@ run_test_case1(Ref, Num, Mod, Func, Args, RunInit, Where,
%% run the test case
{Result,DetectedFail,ProcsBefore,ProcsAfter} =
run_test_case_apply(Num, Mod, Func, [UpdatedArgs], get_name(Mode),
- RunInit, Where, TimetrapData),
+ RunInit, TimetrapData),
{Time,RetVal,Loc,Opts,Comment} =
case Result of
Normal={_Time,_RetVal,_Loc,_Opts,_Comment} -> Normal;
@@ -3989,7 +3711,7 @@ run_test_case1(Ref, Num, Mod, Func, Args, RunInit, Where,
true ->
ok
end,
- check_new_crash_dumps(Where),
+ test_server_sup:check_new_crash_dumps(),
%% if io is being buffered, send finished message
%% (no matter if case runs on parallel or main process)
@@ -4017,109 +3739,6 @@ do_unless_parallel(Main, Action) when is_function(Action, 0) ->
num2str(0) -> "";
num2str(N) -> integer_to_list(N).
-%% If remote target, this function sends the test suite (if not already sent)
-%% and the content of datadir til target.
-maybe_send_beam_and_datadir(Mod) ->
- case get(test_server_ctrl_job_sock) of
- undefined ->
- %% local target
- ok;
- JobSock ->
- %% remote target
- case get(test_server_downloaded_suites) of
- undefined ->
- send_beam_and_datadir(Mod, JobSock),
- put(test_server_downloaded_suites, [Mod]);
- Suites ->
- case lists:member(Mod, Suites) of
- false ->
- send_beam_and_datadir(Mod, JobSock),
- put(test_server_downloaded_suites, [Mod|Suites]);
- true ->
- ok
- end
- end
- end.
-
-send_beam_and_datadir(Mod, JobSock) ->
- case code:which(Mod) of
- non_existing ->
- io:format("** WARNING: Suite ~w could not be found on host\n",
- [Mod]);
- BeamFile ->
- send_beam(JobSock, Mod, BeamFile)
- end,
- DataDir = get_data_dir(Mod),
- case file:read_file_info(DataDir) of
- {ok,_I} ->
- {ok,All} = file:list_dir(DataDir),
- AddTarFiles =
- case controller_call(get_target_info) of
- #target_info{os_family=ose} ->
- ObjExt = code:objfile_extension(),
- Wc = filename:join(DataDir, "*" ++ ObjExt),
- ModsInDatadir = filelib:wildcard(Wc),
- SendBeamFun = fun(X) -> send_beam(JobSock, X) end,
- lists:foreach(SendBeamFun, ModsInDatadir),
- %% No need to send C code or makefiles since
- %% no compilation can be done on target anyway.
- %% Compiled C code must exist on target.
- %% Beam files are already sent as binaries.
- %% Erlang source are sent in case the test case
- %% is to compile it.
- Filter = fun("Makefile") -> false;
- ("Makefile.src") -> false;
- (Y) ->
- case filename:extension(Y) of
- ".c" -> false;
- ObjExt -> false;
- _ -> true
- end
- end,
- lists:filter(Filter, All);
- _ ->
- All
- end,
- Tarfile = "data_dir.tar.gz",
- {ok,Tar} = erl_tar:open(Tarfile, [write,compressed]),
- ShortDataDir = filename:basename(DataDir),
- AddTarFun =
- fun(File) ->
- Long = filename:join(DataDir, File),
- Short = filename:join(ShortDataDir, File),
- ok = erl_tar:add(Tar, Long, Short, [])
- end,
- lists:foreach(AddTarFun, AddTarFiles),
- ok = erl_tar:close(Tar),
- {ok,TarBin} = file:read_file(Tarfile),
- file:delete(Tarfile),
- request(JobSock, {{datadir,Tarfile}, TarBin});
- {error,_R} ->
- ok
- end.
-
-send_beam(JobSock, BeamFile) ->
- Mod=filename:rootname(filename:basename(BeamFile), code:objfile_extension()),
- send_beam(JobSock, list_to_atom(Mod), BeamFile).
-send_beam(JobSock, Mod, BeamFile) ->
- {ok,BeamBin} = file:read_file(BeamFile),
- request(JobSock, {{beam,Mod,BeamFile}, BeamBin}).
-
-check_new_crash_dumps(Where) ->
- case Where of
- target ->
- case get(test_server_ctrl_job_sock) of
- undefined ->
- ok;
- Socket ->
- read_job_sock_loop(Socket)
- end;
- _ ->
- ok
- end,
- test_server_sup:check_new_crash_dumps().
-
-
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% progress(Result, CaseNum, Mod, Func, Location, Reason, Time,
%% Comment, TimeFormat) -> Result
@@ -4487,11 +4106,10 @@ do_format_exception(Reason={Error,Stack}) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% run_test_case_apply(CaseNum, Mod, Func, Args, Name, RunInit,
-%% Where, TimetrapData) ->
+%% TimetrapData) ->
%% {{Time,RetVal,Loc,Opts,Comment},DetectedFail,ProcessesBefore,ProcessesAfter} |
%% {{died,Reason,unknown,Comment},DetectedFail,ProcessesBefore,ProcessesAfter}
%% Name = atom()
-%% Where = target | host
%% Time = float() (seconds)
%% RetVal = term()
%% Loc = term()
@@ -4506,23 +4124,10 @@ do_format_exception(Reason={Error,Stack}) ->
%% sent over socket to target, and test_server runs the case and sends the
%% result back over the socket. Else test_server runs the case directly on host.
-run_test_case_apply(CaseNum, Mod, Func, Args, Name, RunInit, host,
+run_test_case_apply(CaseNum, Mod, Func, Args, Name, RunInit,
TimetrapData) ->
test_server:run_test_case_apply({CaseNum,Mod,Func,Args,Name,RunInit,
- TimetrapData});
-run_test_case_apply(CaseNum, Mod, Func, Args, Name, RunInit, target,
- TimetrapData) ->
- case get(test_server_ctrl_job_sock) of
- undefined ->
- %% local target
- test_server:run_test_case_apply({CaseNum,Mod,Func,Args,Name,RunInit,
- TimetrapData});
- JobSock ->
- %% remote target
- request(JobSock, {test_case,{CaseNum,Mod,Func,Args,Name,RunInit,
- TimetrapData}}),
- read_job_sock_loop(JobSock)
- end.
+ TimetrapData}).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% print(Detail, Format, Args) -> ok
@@ -5119,7 +4724,7 @@ get_target_info() ->
%% Called by test_server. See test_server:start_node/3 for details
start_node(Name, Type, Options) ->
- T = 10 * ?ACCEPT_TIMEOUT, % give some extra time
+ T = 10 * ?ACCEPT_TIMEOUT * test_server:timetrap_scale_factor(),
format(minor, "Attempt to start ~w node ~p with options ~p",
[Type, Name, Options]),
case controller_call({start_node,Name,Type,Options}, T) of
@@ -5164,7 +4769,8 @@ start_node(Name, Type, Options) ->
%% when the new node has contacted test_server_ctrl again
wait_for_node(Slave) ->
- case catch controller_call({wait_for_node,Slave},10000) of
+ T = 10000 * test_server:timetrap_scale_factor(),
+ case catch controller_call({wait_for_node,Slave},T) of
{'EXIT',{timeout,_}} -> {error,timeout};
ok -> ok
end.
@@ -5188,60 +4794,6 @@ stop_node(Slave) ->
controller_call({stop_node,Slave}).
-%%--------------------------------------------------------------------
-%% Functions handling target communication over socket
-
-%% Generic send function for communication with target
-request(Sock,Request) ->
- gen_tcp:send(Sock,<<1,(term_to_binary(Request))/binary>>).
-
-%% Receive and decode request on job specific socket
-%% Used when test is running on a remote target
-read_job_sock_loop(Sock) ->
- case gen_tcp:recv(Sock,0) of
- {error,Reason} ->
- gen_tcp:close(Sock),
- exit({controller,connection_lost,Reason});
- {ok,<<1,Request/binary>>} ->
- case decode(binary_to_term(Request)) of
- ok ->
- read_job_sock_loop(Sock);
- {stop,Result} ->
- Result
- end
- end.
-
-decode({apply,{M,F,A}}) ->
- apply(M,F,A),
- ok;
-decode({sync_apply,{M,F,A}}) ->
- R = apply(M,F,A),
- request(get(test_server_ctrl_job_sock),{sync_result,R}),
- ok;
-decode({sync_result,Result}) ->
- {stop,Result};
-decode({test_case_result,Result}) ->
- {stop,Result};
-decode({privdir,empty_priv_dir}) ->
- {stop,ok};
-decode({{privdir,PrivDirTar},TarBin}) ->
- Root = get(test_server_log_dir_base),
- unpack_tar(Root,PrivDirTar,TarBin),
- {stop,ok};
-decode({crash_dumps,no_crash_dumps}) ->
- {stop,ok};
-decode({{crash_dumps,CrashDumpTar},TarBin}) ->
- Dir = test_server_sup:crash_dump_dir(),
- unpack_tar(Dir,CrashDumpTar,TarBin),
- {stop,ok}.
-
-unpack_tar(Dir,TarFileName0,TarBin) ->
- TarFileName = filename:join(Dir,TarFileName0),
- ok = file:write_file(TarFileName,TarBin),
- ok = erl_tar:extract(TarFileName,[compressed,{cwd,Dir}]),
- ok = file:delete(TarFileName).
-
-
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% DEBUGGER INTERFACE %%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -5386,16 +4938,7 @@ cover_compile({App,CoverFile}) ->
cover_compile1({App,Exclude,Include,Cross}).
cover_compile1(What) ->
- case get(test_server_ctrl_job_sock) of
- undefined ->
- %% local target
- test_server:cover_compile(What);
- JobSock ->
- %% remote target
- request(JobSock, {sync_apply,{test_server,cover_compile,[What]}}),
- read_job_sock_loop(JobSock)
- end.
-
+ test_server:cover_compile(What).
%% Read the coverfile for an application and return a list of modules
%% that are members of the application but shall not be compiled
@@ -5447,7 +4990,7 @@ check_cover_file([], Exclude, Include) ->
%%
%% This per application analysis writes the file cover.html in the
%% application's run.<timestamp> directory.
-cover_analyse({App,CoverInfo}, Analyse, AnalyseMods, TestDir) ->
+cover_analyse({App,CoverInfo}, Analyse, AnalyseMods, Stop, TestDir) ->
write_default_cross_coverlog(TestDir),
{ok,CoverLog} = file:open(filename:join(TestDir, ?coverlog_name), [write]),
@@ -5478,7 +5021,7 @@ cover_analyse({App,CoverInfo}, Analyse, AnalyseMods, TestDir) ->
io:fwrite(CoverLog, "<p>Excluded module(s): <code>~p</code>\n", [Excluded]),
- Coverage = cover_analyse(Analyse, AnalyseMods),
+ Coverage = cover_analyse(Analyse, AnalyseMods, Stop),
case lists:filter(fun({_M,{_,_,_}}) -> false;
(_) -> true
@@ -5495,32 +5038,27 @@ cover_analyse({App,CoverInfo}, Analyse, AnalyseMods, TestDir) ->
file:write_file(filename:join(TestDir, ?cover_total),
term_to_binary(TotPercent)).
-cover_analyse(Analyse, AnalyseMods) ->
+cover_analyse(Analyse, AnalyseMods, Stop) ->
TestDir = get(test_server_log_dir_base),
- case get(test_server_ctrl_job_sock) of
- undefined ->
- %% local target
- test_server:cover_analyse({Analyse,TestDir}, AnalyseMods);
- JobSock ->
- %% remote target
- request(JobSock, {sync_apply,{test_server,
- cover_analyse,
- [Analyse,AnalyseMods]}}),
- read_job_sock_loop(JobSock)
- end.
+ test_server:cover_analyse({Analyse,TestDir}, AnalyseMods, Stop).
%% Cover analysis, cross application
%% This can be executed on any node after all tests are finished.
-%% The node's current directory must be the same as when the tests
-%% were run.
-cross_cover_analyse(Analyse) ->
- cross_cover_analyse(Analyse, undefined).
-
-cross_cover_analyse(Analyse, CrossModules) ->
- CoverdataFiles = get_coverdata_files(),
+%% Apps = [{App,Dir}]
+%% App = atom(), application name
+%% Dir = string(), the log directory for App, normally where
+%% run.<timestamp> is found.
+%% Modules = [atom()], modules that have been cover compiled during tests
+%% of other apps than the one they belong to.
+cross_cover_analyse(Analyse, Apps) ->
+ cross_cover_analyse(Analyse, Apps, get_cross_modules()).
+cross_cover_analyse(Analyse, Apps, Modules) ->
+ Apps1 = get_latest_run_dirs(Apps),
+ Apps2 = add_cross_modules(Modules,Apps1),
+ CoverdataFiles = get_coverdata_files(Apps2),
lists:foreach(fun(CDF) -> cover:import(CDF) end, CoverdataFiles),
- io:fwrite("Cover analysing... ", []),
+ io:fwrite("Cover analysing...\n", []),
DetailsFun =
case Analyse of
details ->
@@ -5534,25 +5072,15 @@ cross_cover_analyse(Analyse, CrossModules) ->
_ ->
fun(_,_) -> undefined end
end,
- SortedModules =
- case CrossModules of
- undefined ->
- sort_modules([Mod || Mod <- get_all_cross_modules(),
- lists:member(Mod, cover:imported_modules())], []);
- _ ->
- sort_modules(CrossModules, [])
- end,
- Coverage = analyse_apps(SortedModules, DetailsFun, []),
+ Coverage = analyse_apps(Apps2, DetailsFun, []),
cover:stop(),
- write_cross_cover_logs(Coverage).
+ write_cross_cover_logs(Coverage,Apps2).
-%% For each application from which there are modules listed in the
-%% cross.cover, write a cross cover log (cross_cover.html).
-write_cross_cover_logs([{App,Coverage}|T]) ->
- case last_test_for_app(App) of
- false ->
- ok;
- Dir ->
+%% For each application from which there are cross cover analysed
+%% modules, write a cross cover log (cross_cover.html).
+write_cross_cover_logs([{App,Coverage}|T],Apps) ->
+ case lists:keyfind(App,1,Apps) of
+ {_,Dir,Mods} when Mods=/=[] ->
CoverLogName = filename:join(Dir,?cross_coverlog_name),
{ok,CoverLog} = file:open(CoverLogName, [write]),
write_coverlog_header(CoverLog),
@@ -5560,54 +5088,51 @@ write_cross_cover_logs([{App,Coverage}|T]) ->
"<h1>Coverage results for \'~w\' from all tests</h1>\n",
[App]),
write_cover_result_table(CoverLog, Coverage),
- io:fwrite("Written file ~p\n", [CoverLogName])
+ io:fwrite("Written file ~p\n", [CoverLogName]);
+ _ ->
+ ok
end,
- write_cross_cover_logs(T);
-write_cross_cover_logs([]) ->
+ write_cross_cover_logs(T,Apps);
+write_cross_cover_logs([],_) ->
io:fwrite("done\n", []).
-%% Find all exported coverdata files. First find all the latest
-%% run.<timestamp> directories, and the check if there is a file named
-%% all.coverdata.
-get_coverdata_files() ->
- PossibleFiles = [last_coverdata_file(Dir) ||
- Dir <- filelib:wildcard([$*|?logdir_ext]),
- filelib:is_dir(Dir)],
- [File || File <- PossibleFiles, filelib:is_file(File)].
-
-last_coverdata_file(Dir) ->
- LastDir = last_test(filelib:wildcard(filename:join(Dir,"run.[1-2]*")),false),
- filename:join(LastDir,"all.coverdata").
-
-
-%% Find the latest run.<timestamp> directory for the given application.
-last_test_for_app(App) ->
- AppLogDir = atom_to_list(App)++?logdir_ext,
- last_test(filelib:wildcard(filename:join(AppLogDir,"run.[1-2]*")),false).
-
-last_test([Run|Rest], false) ->
- last_test(Rest, Run);
-last_test([Run|Rest], Latest) when Run > Latest ->
- last_test(Rest, Run);
-last_test([_|Rest], Latest) ->
- last_test(Rest, Latest);
-last_test([], Latest) ->
+%% Get the latest run.<timestamp> directories
+get_latest_run_dirs([{App,Dir}|Apps]) ->
+ [{App,get_latest_run_dir(Dir)} | get_latest_run_dirs(Apps)];
+get_latest_run_dirs([]) ->
+ [].
+
+get_latest_run_dir(Dir) ->
+ case filelib:wildcard(filename:join(Dir,"run.[1-2]*")) of
+ [] ->
+ Dir;
+ [H|T] ->
+ get_latest_dir(T,H)
+ end.
+
+get_latest_dir([H|T],Latest) when H>Latest ->
+ get_latest_dir(T,H);
+get_latest_dir([_|T],Latest) ->
+ get_latest_dir(T,Latest);
+get_latest_dir([],Latest) ->
Latest.
-%% Sort modules according to the application they belong to.
-%% Return [{App,LastTestDir,ModuleList}]
-sort_modules([M|Modules], Acc) ->
- App = get_app(M),
- Acc1 =
- case lists:keysearch(App, 1, Acc) of
- {value,{App,LastTest,List}} ->
- lists:keyreplace(App, 1, Acc, {App,LastTest,[M|List]});
+%% Associate the cross cover modules with their applications.
+add_cross_modules(Mods,Apps)->
+ do_add_cross_modules(Mods,[{App,Dir,[]} || {App,Dir} <- Apps]).
+do_add_cross_modules([Mod|Mods],Apps)->
+ App = get_app(Mod),
+ NewApps =
+ case lists:keytake(App,1,Apps) of
+ {value,{App,Dir,AppMods},Rest} ->
+ [{App,Dir,lists:umerge([Mod],AppMods)}|Rest];
false ->
- [{App,last_test_for_app(App),[M]}|Acc]
+ Apps
end,
- sort_modules(Modules, Acc1);
-sort_modules([], Acc) ->
- Acc.
+ do_add_cross_modules(Mods,NewApps);
+do_add_cross_modules([],Apps) ->
+ %% Just to get the modules in the same order as app-only cover log
+ [{App,Dir,lists:reverse(Mods)} || {App,Dir,Mods} <- Apps].
get_app(Module) ->
Beam = code:which(Module),
@@ -5615,6 +5140,14 @@ get_app(Module) ->
[AppStr|_] = string:tokens(AppDir,"-"),
list_to_atom(AppStr).
+%% Find all exported coverdata files.
+get_coverdata_files(Apps) ->
+ lists:flatmap(
+ fun({_,LatestAppDir,_}) ->
+ filelib:wildcard(filename:join(LatestAppDir,"all.coverdata"))
+ end,
+ Apps).
+
%% For each application, analyse all modules
%% Used for cross cover analysis.
@@ -5635,7 +5168,7 @@ analyse_modules(_Dir, [], _DetailsFun, Acc) ->
%% Read the cross cover file (cross.cover)
-get_all_cross_modules() ->
+get_cross_modules() ->
get_cross_modules(all).
get_cross_modules(App) ->
case file:consult(?cross_cover_file) of
diff --git a/lib/test_server/src/test_server_h.erl b/lib/test_server/src/test_server_h.erl
index fdeee59326..78daba855d 100644
--- a/lib/test_server/src/test_server_h.erl
+++ b/lib/test_server/src/test_server_h.erl
@@ -131,6 +131,11 @@ report_receiver(warning_msg, _) -> kernel;
report_receiver(warning_report, _) -> kernel;
report_receiver(info, _) -> kernel;
report_receiver(info_msg, _) -> kernel;
+report_receiver(info_report,Tuple)
+ when is_tuple(Tuple) andalso
+ (element(1,Tuple)==ct_connection orelse
+ element(1,Tuple)==conn_log) ->
+ none;
report_receiver(info_report, _) -> kernel;
report_receiver(_, _) -> none.
diff --git a/lib/test_server/src/test_server_internal.hrl b/lib/test_server/src/test_server_internal.hrl
index b58b42805e..d204c35293 100644
--- a/lib/test_server/src/test_server_internal.hrl
+++ b/lib/test_server/src/test_server_internal.hrl
@@ -24,8 +24,7 @@
%% Target information generated by test_server:init_target_info/0 and
%% test_server_ctrl:contact_main_target/2
%% Once initiated, this information will never change!!
--record(target_info, {where, % local | Socket
- os_family, % atom(); win32 | unix
+-record(target_info, {os_family, % atom(); win32 | unix
os_type, % result of os:type()
host, % string(); the name of the target machine
version, % string()
@@ -43,7 +42,6 @@
% itself is master for slave nodes
%% The following are only used for remote targets
- target_client, % reference to a client talking to target
slave_targets=[]}).% list() of atom(); all available
% targets for starting slavenodes
diff --git a/lib/test_server/src/test_server_io.erl b/lib/test_server/src/test_server_io.erl
index abdfb71241..e960b3087a 100644
--- a/lib/test_server/src/test_server_io.erl
+++ b/lib/test_server/src/test_server_io.erl
@@ -68,8 +68,8 @@ stop() ->
%% Return a group leader (a process using the test_server_gl module).
%% If Shared is true, the shared group leader is returned (suitable for
%% running sequential test cases), otherwise a new group leader process
-%% is spawned. Group leader processes will live until they are garbaged
-%% collected by a call to gc/0.
+%% is spawned. Group leader processes will live until the
+%% 'test_server_io' process is stopped.
get_gl(Shared) when is_boolean(Shared) ->
req({get_gl,Shared}).
@@ -95,7 +95,7 @@ start_transaction() ->
%% end_transaction()
%%
%% End the transaction started by start_transaction/0. Subsequent calls to
-%% print/3 will cause the message to be printed directory.
+%% print/3 will cause the message to be printed directly.
end_transaction() ->
req({end_transaction,self()}).
@@ -253,6 +253,8 @@ output(From, Tag, Str, #st{io_buffering=Buffered,buffered=Buf0}=St) ->
St#st{buffered=Buf}
end.
+do_output(stdout, Str, #st{job_name=undefined}) ->
+ io:put_chars(Str);
do_output(stdout, Str0, #st{job_name=Name}) ->
Str = io_lib:format("Testing ~s: ~s\n", [Name,Str0]),
io:put_chars(Str);
diff --git a/lib/test_server/src/test_server_node.erl b/lib/test_server/src/test_server_node.erl
index 872f15f2be..b307d93c7d 100644
--- a/lib/test_server/src/test_server_node.erl
+++ b/lib/test_server/src/test_server_node.erl
@@ -26,7 +26,7 @@
%% Test Controller interface
-export([is_release_available/1]).
--export([start_remote_main_target/1,stop/1]).
+-export([stop/1]).
-export([start_tracer_node/2,trace_nodes/2,stop_tracer_node/1]).
-export([start_node/5, stop_node/2]).
-export([kill_nodes/1, nodedown/2]).
@@ -57,79 +57,8 @@ is_release_available(Rel) ->
false
end.
-%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-%%% Start main target node on remote host
-%%% The target node must not know the controller node via erlang distribution.
-start_remote_main_target(Parameters) ->
- #par{type=TargetType,
- target=TargetHost,
- naming=Naming,
- master=MasterNode,
- cookie=MasterCookie,
- slave_targets=SlaveTargets} = Parameters,
-
- lists:foreach(fun(T) -> maybe_reboot_target({TargetType,T}) end,
- [list_to_atom(TargetHost)|SlaveTargets]),
-
- Cmd0 = get_main_target_start_command(TargetType,TargetHost,Naming,
- MasterNode,MasterCookie),
- Cmd =
- case os:getenv("TEST_SERVER_FRAMEWORK") of
- FW when FW =:= false; FW =:= "undefined" -> Cmd0;
- FW -> Cmd0 ++ " -env TEST_SERVER_FRAMEWORK " ++ FW
- end,
-
- {ok,LSock} = gen_tcp:listen(?MAIN_PORT,[binary,{reuseaddr,true},{packet,2}]),
- case start_target(TargetType,TargetHost,Cmd) of
- {ok,TargetClient,AcceptTimeout} ->
- case gen_tcp:accept(LSock,AcceptTimeout) of
- {ok,Sock} ->
- gen_tcp:close(LSock),
- receive
- {tcp,Sock,Bin} when is_binary(Bin) ->
- case unpack(Bin) of
- error ->
- gen_tcp:close(Sock),
- close_target_client(TargetClient),
- {error,bad_message};
- {ok,{target_info,TI}} ->
- put(test_server_free_targets,SlaveTargets),
- {ok, TI#target_info{where=Sock,
- host=TargetHost,
- naming=Naming,
- master=MasterNode,
- target_client=TargetClient,
- slave_targets=SlaveTargets}}
- end;
- {tcp_closed,Sock} ->
- gen_tcp:close(Sock),
- close_target_client(TargetClient),
- {error,could_not_contact_target}
- after AcceptTimeout ->
- gen_tcp:close(Sock),
- close_target_client(TargetClient),
- {error,timeout}
- end;
- Error ->
- %%! maybe something like kill_target(...)???
- gen_tcp:close(LSock),
- close_target_client(TargetClient),
- {error,{could_not_contact_target,Error}}
- end;
- Error ->
- gen_tcp:close(LSock),
- {error,{could_not_start_target,Error}}
- end.
-
stop(TI) ->
- kill_nodes(TI),
- case TI#target_info.where of
- local -> % there is no remote target to stop
- ok;
- Sock -> % stop remote target
- gen_tcp:close(Sock),
- close_target_client(TI#target_info.target_client)
- end.
+ kill_nodes(TI).
nodedown(Sock, TI) ->
Match = #slave_info{name='$1',socket=Sock,client='$2',_='_'},
@@ -146,14 +75,8 @@ nodedown(Sock, TI) ->
false -> ok
end,
slave_died;
- [] ->
- case TI#target_info.where of
- Sock ->
- %% test_server_ctrl will do the cleanup
- target_died;
- _ ->
- ignore
- end
+ [] ->
+ ok
end.
@@ -167,10 +90,7 @@ start_tracer_node(TraceFile,TI) ->
Match = #slave_info{name='$1',_='_'},
SlaveNodes = lists:map(fun([N]) -> [" ",N] end,
ets:match(slave_tab,Match)),
- TargetNode = case TI#target_info.where of
- local -> node();
- _ -> "test_server@" ++ TI#target_info.host
- end,
+ TargetNode = node(),
Cookie = TI#target_info.cookie,
{ok,LSock} = gen_tcp:listen(0,[binary,{reuseaddr,true},{packet,2}]),
{ok,TracePort} = inet:port(LSock),
@@ -471,129 +391,29 @@ start_node_slave(SlaveName, OptList, From, TI) ->
Ret =
case start_which_node(OptList) of
{error,Reason} -> {{error,Reason},undefined,undefined};
- Host0 -> do_start_node_slave(Host0,SlaveName,Args,Prog,Cleanup,TI)
+ Host0 -> do_start_node_slave(Host0,SlaveName,Args,Prog,Cleanup)
end,
gen_server:reply(From,Ret).
-do_start_node_slave(Host0, SlaveName, Args, Prog, Cleanup, TI) ->
- case TI#target_info.where of
- local ->
- Host =
- case Host0 of
- local -> test_server_sup:hoststr();
- _ -> cast_to_list(Host0)
- end,
- Cmd = Prog ++ " " ++ Args,
- %% Can use slave.erl here because I'm both controller and target
- %% so I will ping the new node anyway
- case slave:start(Host, SlaveName, Args, no_link, Prog) of
- {ok,Nodename} ->
- case Cleanup of
- true -> ets:insert(slave_tab,#slave_info{name=Nodename});
- false -> ok
- end,
- {{ok,Nodename}, Host, Cmd, [], []};
- Ret ->
- {Ret, Host, Cmd}
- end;
-
- _Sock ->
- %% Cannot use slave.erl here because I'm only controller, and will
- %% not ping the new node. Only target shall contact the new node!!
- no_contact_start_slave(Host0,SlaveName,Args,Prog,Cleanup,TI)
- end.
-
-
-
-no_contact_start_slave(Host, Name, Args0, Prog, Cleanup,TI) ->
- Args1 = case string:str(Args0,"-setcookie") of
- 0 -> "-setcookie " ++ TI#target_info.cookie ++ " " ++ Args0;
- _ -> Args0
+do_start_node_slave(Host0, SlaveName, Args, Prog, Cleanup) ->
+ Host =
+ case Host0 of
+ local -> test_server_sup:hoststr();
+ _ -> cast_to_list(Host0)
+ end,
+ Cmd = Prog ++ " " ++ Args,
+ %% Can use slave.erl here because I'm both controller and target
+ %% so I will ping the new node anyway
+ case slave:start(Host, SlaveName, Args, no_link, Prog) of
+ {ok,Nodename} ->
+ case Cleanup of
+ true -> ets:insert(slave_tab,#slave_info{name=Nodename});
+ false -> ok
end,
- Args = TI#target_info.naming ++ " " ++ cast_to_list(Name) ++ " " ++ Args1,
- case Host of
- local ->
- case get(test_server_free_targets) of
- [] ->
- io:format("Starting slave ~p on HOST~n", [Name]),
- TargetType = test_server_sup:get_os_family(),
- Cmd0 = get_slave_node_start_command(TargetType,
- Prog,
- TI#target_info.master),
- Cmd = Cmd0 ++ " " ++ Args,
- do_no_contact_start_slave(TargetType,
- test_server_sup:hoststr(),
- Cmd, Cleanup,TI, false);
- [H|T] ->
- TargetType = TI#target_info.os_family,
- Cmd0 = get_slave_node_start_command(TargetType,
- Prog,
- TI#target_info.master),
- Cmd = Cmd0 ++ " " ++ Args,
- case do_no_contact_start_slave(TargetType,H,Cmd,Cleanup,
- TI,true) of
- {error,remove} ->
- io:format("Cannot start node on ~p, "
- "removing from slave "
- "target list.", [H]),
- put(test_server_free_targets,T),
- no_contact_start_slave(Host,Name,Args,Prog,
- Cleanup,TI);
- {error,keep} ->
- %% H is added to the END OF THE LIST
- %% in order to avoid the same target to
- %% be selected each time
- put(test_server_free_targets,T++[H]),
- no_contact_start_slave(Host,Name,Args,Prog,
- Cleanup,TI);
- R ->
- put(test_server_free_targets,T),
- R
- end
- end;
- _ ->
- TargetType = TI#target_info.os_family,
- Cmd0 = get_slave_node_start_command(TargetType,
- Prog,
- TI#target_info.master),
- Cmd = Cmd0 ++ " " ++ Args,
- do_no_contact_start_slave(TargetType, Host, Cmd, Cleanup, TI, false)
- end.
-
-do_no_contact_start_slave(TargetType,Host0,Cmd0,Cleanup,TI,Retry) ->
- %% Must use TargetType instead of TI#target_info.os_familiy here
- %% because if there were no free_targets we will be starting the
- %% slave node on host which might have a different os_familiy
- Host = cast_to_list(Host0),
- {ok,LSock} = gen_tcp:listen(0,[binary,
- {reuseaddr,true},
- {packet,2}]),
- {ok,WaitPort} = inet:port(LSock),
- Cmd = lists:concat([Cmd0, " -s ", ?MODULE, " node_started ",
- test_server_sup:hoststr(), " ", WaitPort]),
-
- case start_target(TargetType,Host,Cmd) of
- {ok,Client,AcceptTimeout} ->
- case wait_for_node_started(LSock,AcceptTimeout,
- Client,Cleanup,TI,self()) of
- {error,_}=WaitError ->
- if Retry ->
- case maybe_reboot_target(Client) of
- {error,_} -> {error,remove};
- ok -> {error,keep}
- end;
- true ->
- {WaitError,Host,Cmd}
- end;
- {Ok,Warning} ->
- {Ok,Host,Cmd,[],Warning}
- end;
- StartError ->
- gen_tcp:close(LSock),
- if Retry -> {error,remove};
- true -> {{error,{could_not_start_target,StartError}},Host,Cmd}
- end
+ {{ok,Nodename}, Host, Cmd, [], []};
+ Ret ->
+ {Ret, Host, Cmd}
end.
@@ -777,40 +597,10 @@ kill_node(SI,TI) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Platform specific code
-start_target(unix,TargetHost,Cmd0) ->
- Cmd =
- case test_server_sup:hoststr() of
- TargetHost -> Cmd0;
- _ -> lists:concat(["rsh ",TargetHost, " ", Cmd0])
- end,
- open_port({spawn, Cmd}, [stream]),
- {ok,undefined,?ACCEPT_TIMEOUT}.
-
-maybe_reboot_target(_) ->
- {error, cannot_reboot_target}.
-
close_target_client(undefined) ->
ok.
-
-%%
-%% Command for starting main target
-%%
-get_main_target_start_command(unix,_TargetHost,Naming,
- _MasterNode,_MasterCookie) ->
- Prog = pick_erl_program(default),
- Prog ++ " " ++ Naming ++ " test_server" ++
- " -boot start_sasl -sasl errlog_type error"
- " -s test_server start " ++ test_server_sup:hoststr().
-
-%%
-%% Command for starting slave nodes
-%%
-get_slave_node_start_command(unix, Prog, MasterNode) ->
- cast_to_list(Prog) ++ " -detached -master " ++ MasterNode.
-
-
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% cast_to_list(X) -> string()
%%% X = list() | atom() | void()
diff --git a/lib/test_server/src/test_server_sup.erl b/lib/test_server/src/test_server_sup.erl
index 4a27c1ebae..ba5bb9f5d2 100644
--- a/lib/test_server/src/test_server_sup.erl
+++ b/lib/test_server/src/test_server_sup.erl
@@ -64,13 +64,7 @@ timetrap(Timeout0, ReportTVal, Scale, Pid) ->
true -> ReportTVal end,
MFLs = test_server:get_loc(Pid),
Mon = erlang:monitor(process, Pid),
- Trap =
- case get(test_server_init_or_end_conf) of
- undefined ->
- {timetrap_timeout,TimeToReport,MFLs};
- InitOrEnd ->
- {timetrap_timeout,TimeToReport,MFLs,InitOrEnd}
- end,
+ Trap = {timetrap_timeout,TimeToReport,MFLs},
exit(Pid, Trap),
receive
{'DOWN', Mon, process, Pid, _} ->
@@ -518,8 +512,18 @@ framework_call(Callback,Func,Args,DefaultReturn) ->
end,
case erlang:function_exported(Mod,Func,length(Args)) of
true ->
- put(test_server_loc, {Mod,Func,framework}),
EH = fun(Reason) -> exit({fw_error,{Mod,Func,Reason}}) end,
+ SetTcState = case Func of
+ end_tc -> true;
+ init_tc -> true;
+ _ -> false
+ end,
+ case SetTcState of
+ true ->
+ test_server:set_tc_state({framework,Mod,Func});
+ false ->
+ ok
+ end,
try apply(Mod,Func,Args) of
Result ->
Result
@@ -550,18 +554,6 @@ format_loc([{Mod,LineOrFunc}]) ->
format_loc({Mod,LineOrFunc});
format_loc({Mod,Func}) when is_atom(Func) ->
io_lib:format("{~s,~w}",[package_str(Mod),Func]);
-format_loc({Mod,Line}) when is_integer(Line) ->
- %% ?line macro is used
- ModStr = package_str(Mod),
- case {lists:member(no_src, get(test_server_logopts)),
- lists:reverse(ModStr)} of
- {false,[$E,$T,$I,$U,$S,$_|_]} ->
- io_lib:format("{~s,<a href=\"~s~s#~w\">~w</a>}",
- [ModStr,downcase(ModStr),?src_listing_ext,
- round_to_10(Line),Line]);
- _ ->
- io_lib:format("{~s,~w}",[ModStr,Line])
- end;
format_loc(Loc) ->
io_lib:format("~p",[Loc]).
diff --git a/lib/test_server/src/ts.erl b/lib/test_server/src/ts.erl
index db16b6ecd2..3ddc58fdbc 100644
--- a/lib/test_server/src/ts.erl
+++ b/lib/test_server/src/ts.erl
@@ -260,12 +260,19 @@ run(Testspec, Config) when is_atom(Testspec), is_list(Config) ->
Options=check_test_get_opts(Testspec, Config),
File=atom_to_list(Testspec),
Spec = case code:lib_dir(Testspec) of
- {error, bad_name} when Testspec /= emulator,
- Testspec /= system,
- Testspec /= epmd ->
+ _ when Testspec == emulator;
+ Testspec == system;
+ Testspec == epmd ->
+ File++".spec";
+ {error, bad_name} ->
create_skip_spec(Testspec, tests(Testspec));
- _ ->
- File++".spec"
+ Path ->
+ case file:read_file_info(filename:join(Path,"ebin")) of
+ {ok,_} ->
+ File++".spec";
+ _ ->
+ create_skip_spec(Testspec, tests(Testspec))
+ end
end,
run_test(File, [{spec,[Spec]}], Options);
%% Runs one module in a spec (interactive)
@@ -498,8 +505,60 @@ estone(Opts) when is_list(Opts) -> run(emulator,estone_SUITE,Opts).
cross_cover_analyse([Level]) ->
cross_cover_analyse(Level);
cross_cover_analyse(Level) ->
- test_server_ctrl:cross_cover_analyse(Level).
-
+ Apps = get_last_app_tests(),
+ Modules = get_cross_modules(Apps,[]),
+ test_server_ctrl:cross_cover_analyse(Level,Apps,Modules).
+
+get_last_app_tests() ->
+ AllTests = filelib:wildcard(filename:join(["*","*_test.logs"])),
+ {ok,RE} = re:compile("^[^/]*/[^\.]*\.(.*)_test\.logs$"),
+ get_last_app_tests(AllTests,RE,[]).
+
+get_last_app_tests([Dir|Dirs],RE,Acc) ->
+ NewAcc =
+ case re:run(Dir,RE,[{capture,all,list}]) of
+ {match,[Dir,AppStr]} ->
+ App = list_to_atom(AppStr),
+ case lists:keytake(App,1,Acc) of
+ {value,{App,LastDir},Rest} ->
+ if Dir > LastDir ->
+ [{App,Dir}|Rest];
+ true ->
+ Acc
+ end;
+ false ->
+ [{App,Dir} | Acc]
+ end;
+ _ ->
+ Acc
+ end,
+ get_last_app_tests(Dirs,RE,NewAcc);
+get_last_app_tests([],_,Acc) ->
+ Acc.
+
+get_cross_modules([{App,_}|Apps],Acc) ->
+ Mods = cross_modules(App),
+ get_cross_modules(Apps,lists:umerge(Mods,Acc));
+get_cross_modules([],Acc) ->
+ Acc.
+
+cross_modules(App) ->
+ case default_coverfile(App) of
+ none ->
+ [];
+ File ->
+ case catch file:consult(File) of
+ {ok,CoverSpec} ->
+ case lists:keyfind(cross_apps,1,CoverSpec) of
+ false ->
+ [];
+ {cross_apps,App,Modules} ->
+ lists:usort(Modules)
+ end;
+ _ ->
+ []
+ end
+ end.
%%% Implementation.
diff --git a/lib/test_server/src/ts_run.erl b/lib/test_server/src/ts_run.erl
index 57d1b8806e..741dd483f5 100644
--- a/lib/test_server/src/ts_run.erl
+++ b/lib/test_server/src/ts_run.erl
@@ -368,7 +368,7 @@ make_common_test_args(Args0, Options0, _Vars) ->
io:format("No cover file found for ~p~n",[App]),
[];
{value,{cover,_App,File,_Analyse}} ->
- [{cover,to_list(File)}];
+ [{cover,to_list(File)},{cover_stop,false}];
false ->
[]
end,
diff --git a/lib/test_server/test/test_server.cover b/lib/test_server/test/test_server.cover
index c16212567e..052415377d 100644
--- a/lib/test_server/test/test_server.cover
+++ b/lib/test_server/test/test_server.cover
@@ -1,21 +1 @@
{incl_app,test_server,details}.
-
-{excl_mods, test_server, [test_server,
- test_server_ctrl,
- ts_selftest]}.
-
-%% Using incl_mods list here because the test_server might not find
-%% lib_dir for test_server - and so it will not find which modules to
-%% compile.
-{incl_mods, test_server, [erl2html2,
- test_server_node,
- test_server_sup,
- ts,
- ts_autoconf_win32,
- ts_erl_config,
- ts_install,
- ts_lib,
- ts_make,
- ts_run
- ]}.
-
diff --git a/lib/test_server/test/test_server_SUITE_data/test_server_SUITE.erl b/lib/test_server/test/test_server_SUITE_data/test_server_SUITE.erl
index ab25e4ad2f..fc2adcd651 100644
--- a/lib/test_server/test/test_server_SUITE_data/test_server_SUITE.erl
+++ b/lib/test_server/test/test_server_SUITE_data/test_server_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2011. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -433,7 +433,7 @@ start_stop_node(Config) when is_list(Config) ->
?t:comment("WARNING: Node started with {wait,false}"
" is up faster than expected...");
false ->
- wait_for_node(Node4,0),
+ test_server:wait_for_node(Node4),
true = lists:member(Node4,nodes())
end,
@@ -450,16 +450,6 @@ start_stop_node(Config) when is_list(Config) ->
ok.
-
-wait_for_node(Node,Acc) ->
- case net_adm:ping(Node) of
- pang ->
- timer:sleep(100),
- wait_for_node(Node,Acc+100);
- pong ->
- Acc
- end.
-
cleanup_nodes_init(doc) -> ["Test that nodes are terminated when test case"
" is finished unless {cleanup,false} is given."];
cleanup_nodes_init(Config) when is_list(Config) ->
diff --git a/lib/test_server/test/test_server_test_lib.erl b/lib/test_server/test/test_server_test_lib.erl
index 8a808c5360..4e89abf308 100644
--- a/lib/test_server/test/test_server_test_lib.erl
+++ b/lib/test_server/test/test_server_test_lib.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2009-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2009-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -83,7 +83,12 @@ start_slave(Config,_Level) ->
post_end_per_testcase(_TC, Config, Return, State) ->
Node = proplists:get_value(node, Config),
- cover:stop(Node),
+ case test_server:is_cover() of
+ true ->
+ cover:flush(Node);
+ false ->
+ ok
+ end,
slave:stop(Node),
{Return, State}.
diff --git a/lib/tools/doc/src/cover.xml b/lib/tools/doc/src/cover.xml
index 683acc025d..a2444ec947 100644
--- a/lib/tools/doc/src/cover.xml
+++ b/lib/tools/doc/src/cover.xml
@@ -5,7 +5,7 @@
<header>
<copyright>
<year>2001</year>
- <year>2011</year>
+ <year>2012</year>
<holder>Ericsson AB, All Rights Reserved</holder>
</copyright>
<legalnotice>
@@ -104,6 +104,13 @@
remove nodes. The same Cover compiled code will be loaded on each
node, and analysis will collect and sum up coverage data results
from all nodes.</p>
+ <p>To only collect data from remote nodes without stopping
+ <c>cover</c> on those nodes, use <c>cover:flush/1</c></p>
+ <p>If the connection to a remote node goes down, the main node
+ will mark it as lost. If the node comes back it will be added
+ again. If the remote node was alive during the disconnected
+ periode, cover data from before and during this periode will be
+ included in the analysis.</p>
</description>
<funcs>
<func>
@@ -477,6 +484,17 @@
remote nodes is fetched and stored on the main node.</p>
</desc>
</func>
+ <func>
+ <name>flush(Nodes) -> ok | {error,not_main_node}</name>
+ <fsummary>Collect cover data from remote nodes.</fsummary>
+ <type>
+ <v>Nodes = [atom()]</v>
+ </type>
+ <desc>
+ <p>Fetch data from the Cover database on the remote nodes and
+ stored on the main node.</p>
+ </desc>
+ </func>
</funcs>
<section>
diff --git a/lib/tools/src/cover.erl b/lib/tools/src/cover.erl
index e21bd1b88c..10f14b0a49 100644
--- a/lib/tools/src/cover.erl
+++ b/lib/tools/src/cover.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2001-2011. All Rights Reserved.
+%% Copyright Ericsson AB 2001-2012. All Rights Reserved.
%%
%% The contents of this file are subject to the Erlang Public License,
%% Version 1.1, (the "License"); you may not use this file except in
@@ -26,12 +26,17 @@
%% ARCHITECTURE
%% The coverage tool consists of one process on each node involved in
%% coverage analysis. The process is registered as 'cover_server'
-%% (?SERVER). All cover_servers in the distributed system are linked
-%% together. The cover_server on the 'main' node is in charge, and it
-%% traps exits so it can detect nodedown or process crashes on the
-%% remote nodes. This process is implemented by the functions
-%% init_main/1 and main_process_loop/1. The cover_server on the remote
-%% nodes are implemented by the functions init_remote/2 and
+%% (?SERVER). The cover_server on the 'main' node is in charge, and
+%% it monitors the cover_servers on all remote nodes. When it gets a
+%% 'DOWN' message for another cover_server, it marks the node as
+%% 'lost'. If a nodeup is received for a lost node the main node
+%% ensures that the cover compiled modules are loaded again. If the
+%% remote node was alive during the disconnected periode, cover data
+%% for this periode will also be included in the analysis.
+%%
+%% The cover_server process on the main node is implemented by the
+%% functions init_main/1 and main_process_loop/1. The cover_server on
+%% the remote nodes are implemented by the functions init_remote/2 and
%% remote_process_loop/1.
%%
%% TABLES
@@ -81,15 +86,17 @@
export/1, export/2, import/1,
modules/0, imported/0, imported_modules/0, which_nodes/0, is_compiled/1,
reset/1, reset/0,
+ flush/1,
stop/0, stop/1]).
--export([remote_start/1]).
+-export([remote_start/1,get_main_node/0]).
%-export([bump/5]).
-export([transform/4]). % for test purposes
-record(main_state, {compiled=[], % [{Module,File}]
imported=[], % [{Module,File,ImportFile}]
stopper, % undefined | pid()
- nodes=[]}). % [Node]
+ nodes=[], % [Node]
+ lost_nodes=[]}). % [Node]
-record(remote_state, {compiled=[], % [{Module,File}]
main_node}). % atom()
@@ -497,6 +504,19 @@ stop(Node) when is_atom(Node) ->
stop(Nodes) ->
call({stop,remove_myself(Nodes,[])}).
+%% flush(Nodes) -> ok | {error,not_main_node}
+%% Nodes = [Node] | Node
+%% Node = atom()
+%% Error = {not_cover_compiled,Module}
+flush(Node) when is_atom(Node) ->
+ flush([Node]);
+flush(Nodes) ->
+ call({flush,remove_myself(Nodes,[])}).
+
+%% Used by test_server only. Not documented.
+get_main_node() ->
+ call(get_main_node).
+
%% bump(Module, Function, Arity, Clause, Line)
%% Module = Function = atom()
%% Arity = Clause = Line = integer()
@@ -541,7 +561,10 @@ remote_call(Node,Request) ->
Return =
receive
{'DOWN', Ref, _Type, _Object, _Info} ->
- {error,node_dead};
+ case Request of
+ {remote,stop} -> ok;
+ _ -> {error,node_dead}
+ end;
{?SERVER,Reply} ->
Reply
end,
@@ -569,40 +592,14 @@ init_main(Starter) ->
ets:new(?BINARY_TABLE, [set, named_table]),
ets:new(?COLLECTION_TABLE, [set, public, named_table]),
ets:new(?COLLECTION_CLAUSE_TABLE, [set, public, named_table]),
- process_flag(trap_exit,true),
+ net_kernel:monitor_nodes(true),
Starter ! {?SERVER,started},
main_process_loop(#main_state{}).
main_process_loop(State) ->
receive
{From, {start_nodes,Nodes}} ->
- ThisNode = node(),
- StartedNodes =
- lists:foldl(
- fun(Node,Acc) ->
- case rpc:call(Node,cover,remote_start,[ThisNode]) of
- {ok,RPid} ->
- link(RPid),
- [Node|Acc];
- Error ->
- io:format("Could not start cover on ~w: ~p\n",
- [Node,Error]),
- Acc
- end
- end,
- [],
- Nodes),
-
- %% In case some of the compiled modules have been unloaded they
- %% should not be loaded on the new node.
- {_LoadedModules,Compiled} =
- get_compiled_still_loaded(State#main_state.nodes,
- State#main_state.compiled),
- remote_load_compiled(StartedNodes,Compiled),
-
- State1 =
- State#main_state{nodes = State#main_state.nodes ++ StartedNodes,
- compiled = Compiled},
+ {StartedNodes,State1} = do_start_nodes(Nodes, State),
reply(From, {ok,StartedNodes}),
main_process_loop(State1);
@@ -707,8 +704,13 @@ main_process_loop(State) ->
{From, {stop,Nodes}} ->
remote_collect('_',Nodes,true),
reply(From, ok),
- State1 = State#main_state{nodes=State#main_state.nodes--Nodes},
- main_process_loop(State1);
+ Nodes1 = State#main_state.nodes--Nodes,
+ main_process_loop(State#main_state{nodes=Nodes1});
+
+ {From, {flush,Nodes}} ->
+ remote_collect('_',Nodes,false),
+ reply(From, ok),
+ main_process_loop(State);
{From, stop} ->
lists:foreach(
@@ -788,14 +790,30 @@ main_process_loop(State) ->
end,
main_process_loop(S);
- {'EXIT',Pid,_Reason} ->
- %% Exit is trapped on the main node only, so this will only happen
- %% there. I assume that I'm only linked to cover_servers on remote
- %% nodes, so this must be one of them crashing.
- %% Remove node from list!
- State1 = State#main_state{nodes=State#main_state.nodes--[node(Pid)]},
+ {'DOWN', _MRef, process, {?SERVER,Node}, _Info} ->
+ %% A remote cover_server is down, mark as lost
+ Nodes = State#main_state.nodes--[Node],
+ Lost = [Node|State#main_state.lost_nodes],
+ main_process_loop(State#main_state{nodes=Nodes,lost_nodes=Lost});
+
+ {nodeup,Node} ->
+ State1 =
+ case lists:member(Node,State#main_state.lost_nodes) of
+ true ->
+ sync_compiled(Node,State);
+ false ->
+ State
+ end,
main_process_loop(State1);
+
+ {nodedown,_} ->
+ %% Will be taken care of when 'DOWN' message arrives
+ main_process_loop(State);
+ {From, get_main_node} ->
+ reply(From, node()),
+ main_process_loop(State);
+
get_status ->
io:format("~p~n",[State]),
main_process_loop(State)
@@ -850,7 +868,16 @@ remote_process_loop(State) ->
{remote,stop} ->
reload_originals(State#remote_state.compiled),
unregister(?SERVER),
- remote_reply(State#remote_state.main_node, ok);
+ ok; % not replying since 'DOWN' message will be received anyway
+
+ {remote,get_compiled} ->
+ remote_reply(State#remote_state.main_node,
+ State#remote_state.compiled),
+ remote_process_loop(State);
+
+ {From, get_main_node} ->
+ remote_reply(From, State#remote_state.main_node),
+ remote_process_loop(State);
get_status ->
io:format("~p~n",[State]),
@@ -961,6 +988,36 @@ unload([]) ->
%%%--Handling of remote nodes--------------------------------------------
+do_start_nodes(Nodes, State) ->
+ ThisNode = node(),
+ StartedNodes =
+ lists:foldl(
+ fun(Node,Acc) ->
+ case rpc:call(Node,cover,remote_start,[ThisNode]) of
+ {ok,_RPid} ->
+ erlang:monitor(process,{?SERVER,Node}),
+ [Node|Acc];
+ Error ->
+ io:format("Could not start cover on ~w: ~p\n",
+ [Node,Error]),
+ Acc
+ end
+ end,
+ [],
+ Nodes),
+
+ %% In case some of the compiled modules have been unloaded they
+ %% should not be loaded on the new node.
+ {_LoadedModules,Compiled} =
+ get_compiled_still_loaded(State#main_state.nodes,
+ State#main_state.compiled),
+ remote_load_compiled(StartedNodes,Compiled),
+
+ State1 =
+ State#main_state{nodes = State#main_state.nodes ++ StartedNodes,
+ compiled = Compiled},
+ {StartedNodes, State1}.
+
%% start the cover_server on a remote node
remote_start(MainNode) ->
case whereis(?SERVER) of
@@ -984,6 +1041,30 @@ remote_start(MainNode) ->
{error,{already_started,Pid}}
end.
+%% If a lost node comes back, ensure that main and remote node has the
+%% same cover compiled modules. Note that no action is taken if the
+%% same {Mod,File} eksists on both, i.e. code change is not handled!
+sync_compiled(Node,State) ->
+ #main_state{compiled=Compiled0,nodes=Nodes,lost_nodes=Lost}=State,
+ State1 =
+ case remote_call(Node,{remote,get_compiled}) of
+ {error,node_dead} ->
+ {_,S} = do_start_nodes([Node],State),
+ S;
+ {error,_} ->
+ State;
+ RemoteCompiled ->
+ {_,Compiled} = get_compiled_still_loaded(Nodes,Compiled0),
+ Unload = [UM || {UM,_}=U <- RemoteCompiled,
+ false == lists:member(U,Compiled)],
+ remote_unload([Node],Unload),
+ Load = [L || L <- Compiled,
+ false == lists:member(L,RemoteCompiled)],
+ remote_load_compiled([Node],Load),
+ State#main_state{compiled=Compiled, nodes=[Node|Nodes]}
+ end,
+ State1#main_state{lost_nodes=Lost--[Node]}.
+
%% Load a set of cover compiled modules on remote nodes,
%% We do it ?MAX_MODS modules at a time so that we don't
%% run out of memory on the cover_server node.
@@ -1094,7 +1175,6 @@ remove_myself([Node|Nodes],Acc) ->
remove_myself(Nodes,[Node|Acc]);
remove_myself([],Acc) ->
Acc.
-
%%%--Handling of modules state data--------------------------------------
@@ -2254,7 +2334,13 @@ do_reset2([]) ->
do_clear(Module) ->
ets:match_delete(?COVER_CLAUSE_TABLE, {Module,'_'}),
ets:match_delete(?COVER_TABLE, {#bump{module=Module},'_'}),
- ets:match_delete(?COLLECTION_TABLE, {#bump{module=Module},'_'}).
+ case lists:member(?COLLECTION_TABLE, ets:all()) of
+ true ->
+ %% We're on the main node
+ ets:match_delete(?COLLECTION_TABLE, {#bump{module=Module},'_'});
+ false ->
+ ok
+ end.
not_loaded(Module, unloaded, State) ->
do_clear(Module),
@@ -2307,7 +2393,7 @@ pmap(Fun, [E | Rest], Pids, Limit, Cnt, Acc) when Cnt < Limit ->
pmap(Fun, Rest, Pids ++ [Pid], Limit, Cnt + 1, Acc);
pmap(Fun, List, [Pid | Pids], Limit, Cnt, Acc) ->
receive
- {'DOWN', _Ref, process, _, _} ->
+ {'DOWN', _Ref, process, X, _} when is_pid(X) ->
pmap(Fun, List, [Pid | Pids], Limit, Cnt - 1, Acc);
{res, Pid, Res} ->
pmap(Fun, List, Pids, Limit, Cnt, [Res | Acc])
@@ -2316,6 +2402,6 @@ pmap(_Fun, [], [], _Limit, 0, Acc) ->
lists:reverse(Acc);
pmap(Fun, [], [], Limit, Cnt, Acc) ->
receive
- {'DOWN', _Ref, process, _, _} ->
+ {'DOWN', _Ref, process, X, _} when is_pid(X) ->
pmap(Fun, [], [], Limit, Cnt - 1, Acc)
end.
diff --git a/lib/tools/test/cover_SUITE.erl b/lib/tools/test/cover_SUITE.erl
index c2c708d806..3bf1b44af8 100644
--- a/lib/tools/test/cover_SUITE.erl
+++ b/lib/tools/test/cover_SUITE.erl
@@ -23,7 +23,7 @@
init_per_group/2,end_per_group/2]).
-export([start/1, compile/1, analyse/1, misc/1, stop/1,
- distribution/1, export_import/1,
+ distribution/1, reconnect/1, die_and_reconnect/1, export_import/1,
otp_5031/1, eif/1, otp_5305/1, otp_5418/1, otp_6115/1, otp_7095/1,
otp_8188/1, otp_8270/1, otp_8273/1, otp_8340/1]).
@@ -45,7 +45,8 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
case whereis(cover_server) of
undefined ->
- [start, compile, analyse, misc, stop, distribution,
+ [start, compile, analyse, misc, stop,
+ distribution, reconnect, die_and_reconnect,
export_import, otp_5031, eif, otp_5305, otp_5418,
otp_6115, otp_7095, otp_8188, otp_8270, otp_8273,
otp_8340];
@@ -326,14 +327,16 @@ distribution(Config) when is_list(Config) ->
?line {ok,N1} = ?t:start_node(cover_SUITE_distribution1,slave,[]),
?line {ok,N2} = ?t:start_node(cover_SUITE_distribution2,slave,[]),
?line {ok,N3} = ?t:start_node(cover_SUITE_distribution3,slave,[]),
+ ?line {ok,N4} = ?t:start_node(cover_SUITE_distribution4,slave,[]),
%% Check that an already compiled module is loaded on new nodes
?line {ok,f} = cover:compile(f),
- ?line {ok,[_,_,_]} = cover:start(nodes()),
+ ?line {ok,[_,_,_,_]} = cover:start(nodes()),
?line cover_compiled = code:which(f),
?line cover_compiled = rpc:call(N1,code,which,[f]),
?line cover_compiled = rpc:call(N2,code,which,[f]),
?line cover_compiled = rpc:call(N3,code,which,[f]),
+ ?line cover_compiled = rpc:call(N4,code,which,[f]),
%% Check that a node cannot be started twice
?line {ok,[]} = cover:start(N2),
@@ -351,6 +354,7 @@ distribution(Config) when is_list(Config) ->
?line cover_compiled = rpc:call(N1,code,which,[v]),
?line cover_compiled = rpc:call(N2,code,which,[v]),
?line cover_compiled = rpc:call(N3,code,which,[v]),
+ ?line cover_compiled = rpc:call(N4,code,which,[v]),
%% this is lost when the node is killed
?line rpc:call(N3,f,f2,[]),
@@ -385,6 +389,18 @@ distribution(Config) when is_list(Config) ->
%% reset on the remote node(s))
?line check_f_calls(1,1),
+ %% Another checn that data is not fetched twice, i.e. when flushed
+ %% then analyse should not add the same data again.
+ ?line rpc:call(N4,f,f2,[]),
+ ?line ok = cover:flush(N4),
+ ?line check_f_calls(1,2),
+
+ %% Check that flush collects data so calls are not lost if node is killed
+ ?line rpc:call(N4,f,f2,[]),
+ ?line ok = cover:flush(N4),
+ ?line rpc:call(N4,erlang,halt,[]),
+ ?line check_f_calls(1,3),
+
%% Check that stop() unloads on all nodes
?line ok = cover:stop(),
?line timer:sleep(100), %% Give nodes time to unload on slow machines.
@@ -393,20 +409,117 @@ distribution(Config) when is_list(Config) ->
?line true = is_unloaded(LocalBeam),
?line true = is_unloaded(N2Beam),
- %% Check that cover_server on remote node dies if main node dies
+ %% Check that cover_server on remote node does not die if main node dies
?line {ok,[N1]} = cover:start(N1),
- ?line true = is_pid(rpc:call(N1,erlang,whereis,[cover_server])),
+ ?line true = is_pid(N1Server = rpc:call(N1,erlang,whereis,[cover_server])),
?line exit(whereis(cover_server),kill),
- ?line timer:sleep(10),
- ?line undefined = rpc:call(N1,erlang,whereis,[cover_server]),
-
+ ?line timer:sleep(100),
+ ?line N1Server = rpc:call(N1,erlang,whereis,[cover_server]),
+
%% Cleanup
?line Files = lsfiles(),
?line remove(files(Files, ".beam")),
?line ?t:stop_node(N1),
?line ?t:stop_node(N2).
-
+%% Test that a lost node is reconnected
+reconnect(Config) ->
+ DataDir = ?config(data_dir, Config),
+ ok = file:set_cwd(DataDir),
+
+ {ok,a} = compile:file(a),
+ {ok,b} = compile:file(b),
+ {ok,f} = compile:file(f),
+
+ {ok,N1} = ?t:start_node(cover_SUITE_reconnect,peer,
+ [{args," -pa " ++ DataDir},{start_cover,false}]),
+ {ok,a} = cover:compile(a),
+ {ok,f} = cover:compile(f),
+ {ok,[N1]} = cover:start(nodes()),
+
+ %% Some calls to check later
+ rpc:call(N1,f,f1,[]),
+ cover:flush(N1),
+ rpc:call(N1,f,f1,[]),
+
+ %% This will cause a call to f:f2() when nodes()==[] on N1
+ rpc:cast(N1,f,call_f2_when_isolated,[]),
+
+ %% Disconnect and check that node is removed from main cover node
+ net_kernel:disconnect(N1),
+ [] = cover:which_nodes(),
+ timer:sleep(500), % allow some time for the f:f2() call
+
+ %% Do some add one module (b) and remove one module (a)
+ code:purge(a),
+ {module,a} = code:load_file(a),
+ {ok,b} = cover:compile(b),
+ cover_compiled = code:which(b),
+
+ [] = cover:which_nodes(),
+ check_f_calls(1,0), % only the first call - before the flush
+
+ %% Reconnect the node and check that b and f are cover compiled but not a
+ net_kernel:connect_node(N1),
+ timer:sleep(100),
+ [N1] = cover:which_nodes(), % we are reconnected
+ cover_compiled = rpc:call(N1,code,which,[b]),
+ cover_compiled = rpc:call(N1,code,which,[f]),
+ ABeam = rpc:call(N1,code,which,[a]),
+ false = (cover_compiled==ABeam),
+
+ %% Ensure that we have:
+ %% * one f1 call from before the flush,
+ %% * one f1 call from after the flush but before disconnect
+ %% * one f2 call when disconnected
+ check_f_calls(2,1),
+
+ cover:stop(),
+ ?t:stop_node(N1),
+ ok.
+
+%% Test that a lost node is reconnected - also if it has been dead
+die_and_reconnect(Config) ->
+ DataDir = ?config(data_dir, Config),
+ ok = file:set_cwd(DataDir),
+
+ {ok,f} = compile:file(f),
+
+ NodeName = cover_SUITE_die_and_reconnect,
+ {ok,N1} = ?t:start_node(NodeName,peer,
+ [{args," -pa " ++ DataDir},{start_cover,false}]),
+ %% {ok,a} = cover:compile(a),
+ {ok,f} = cover:compile(f),
+ {ok,[N1]} = cover:start(nodes()),
+
+ %% Some calls to check later
+ rpc:call(N1,f,f1,[]),
+ cover:flush(N1),
+ rpc:call(N1,f,f1,[]),
+
+ %% Kill the node
+ rpc:call(N1,erlang,halt,[]),
+ [] = cover:which_nodes(),
+
+ check_f_calls(1,0), % only the first call - before the flush
+
+ %% Restart the node and check that cover reconnects
+ {ok,N1} = ?t:start_node(NodeName,peer,
+ [{args," -pa " ++ DataDir},{start_cover,false}]),
+ timer:sleep(100),
+ [N1] = cover:which_nodes(), % we are reconnected
+ cover_compiled = rpc:call(N1,code,which,[f]),
+
+ %% One more call...
+ rpc:call(N1,f,f1,[]),
+
+ %% Ensure that no more calls are counted
+ check_f_calls(2,0),
+
+ cover:stop(),
+ ?t:stop_node(N1),
+ ok.
+
export_import(suite) -> [];
export_import(Config) when is_list(Config) ->
?line DataDir = ?config(data_dir, Config),
@@ -1238,4 +1351,4 @@ is_unloaded(What) ->
end.
check_f_calls(F1,F2) ->
- {ok,[{{f,f1,0},F1},{{f,f2,0},F2}]} = cover:analyse(f,calls,function).
+ {ok,[{{f,f1,0},F1},{{f,f2,0},F2}|_]} = cover:analyse(f,calls,function).
diff --git a/lib/tools/test/cover_SUITE_data/f.erl b/lib/tools/test/cover_SUITE_data/f.erl
index 1ef8bbdb49..ce2963014a 100644
--- a/lib/tools/test/cover_SUITE_data/f.erl
+++ b/lib/tools/test/cover_SUITE_data/f.erl
@@ -1,5 +1,5 @@
-module(f).
--export([f1/0,f2/0]).
+-export([f1/0,f2/0,call_f2_when_isolated/0]).
f1() ->
f1_line1,
@@ -8,3 +8,12 @@ f1() ->
f2() ->
f2_line1,
f2_line2.
+
+call_f2_when_isolated() ->
+ case nodes() of
+ [] ->
+ f2();
+ _ ->
+ timer:sleep(100),
+ call_f2_when_isolated()
+ end.