aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/compiler/src/beam_jump.erl15
-rw-r--r--lib/compiler/test/beam_jump_SUITE.erl19
-rw-r--r--lib/crypto/src/crypto.erl2
-rw-r--r--lib/crypto/test/engine_SUITE.erl15
-rw-r--r--lib/eldap/doc/src/notes.xml33
-rw-r--r--lib/hipe/main/hipe.erl2
-rw-r--r--lib/kernel/doc/src/logger.xml92
-rw-r--r--lib/kernel/doc/src/logger_chapter.xml30
-rw-r--r--lib/kernel/doc/src/logger_disk_log_h.xml10
-rw-r--r--lib/kernel/doc/src/logger_std_h.xml4
-rw-r--r--lib/kernel/src/inet_tcp_dist.erl2
-rw-r--r--lib/kernel/src/logger.erl35
-rw-r--r--lib/kernel/src/logger_disk_log_h.erl81
-rw-r--r--lib/kernel/src/logger_h_common.erl5
-rw-r--r--lib/kernel/src/logger_server.erl146
-rw-r--r--lib/kernel/src/logger_std_h.erl67
-rw-r--r--lib/kernel/test/gen_tcp_misc_SUITE.erl146
-rw-r--r--lib/kernel/test/logger_SUITE.erl12
-rw-r--r--lib/kernel/test/logger_disk_log_h_SUITE.erl179
-rw-r--r--lib/kernel/test/logger_std_h_SUITE.erl175
-rw-r--r--lib/observer/src/Makefile1
-rw-r--r--lib/observer/src/cdv_html_wx.erl26
-rw-r--r--lib/observer/src/cdv_persistent_cb.erl32
-rw-r--r--lib/observer/src/cdv_wx.erl16
-rw-r--r--lib/observer/src/crashdump_viewer.erl237
-rw-r--r--lib/observer/src/observer.app.src1
-rw-r--r--lib/observer/src/observer_html_lib.erl3
-rw-r--r--lib/observer/test/crashdump_helper.erl49
-rw-r--r--lib/observer/test/crashdump_viewer_SUITE.erl32
-rw-r--r--lib/os_mon/c_src/cpu_sup.c50
-rw-r--r--lib/os_mon/src/cpu_sup.erl26
-rw-r--r--lib/public_key/asn1/OTP-PKIX.asn124
-rw-r--r--lib/public_key/src/public_key.erl4
-rw-r--r--lib/public_key/test/public_key_SUITE.erl11
-rw-r--r--lib/sasl/src/sasl.app.src2
-rw-r--r--lib/sasl/src/systools_make.erl4
-rw-r--r--lib/ssh/doc/src/Makefile3
-rw-r--r--lib/ssh/doc/src/notes.xml17
-rw-r--r--lib/ssh/doc/src/ref_man.xml1
-rw-r--r--lib/ssh/doc/src/specs.xml1
-rw-r--r--lib/ssh/doc/src/ssh.xml113
-rw-r--r--lib/ssh/doc/src/ssh_app.xml7
-rw-r--r--lib/ssh/doc/src/ssh_file.xml275
-rw-r--r--lib/ssh/doc/src/ssh_sftp.xml1
-rw-r--r--lib/ssh/doc/src/terminology.xml185
-rw-r--r--lib/ssh/doc/src/usersguide.xml1
-rw-r--r--lib/ssh/doc/src/using_ssh.xml11
-rw-r--r--lib/ssh/src/ssh.hrl24
-rw-r--r--lib/ssh/src/ssh_connection_handler.erl4
-rw-r--r--lib/ssh/src/ssh_file.erl17
-rw-r--r--lib/ssh/src/ssh_options.erl12
-rw-r--r--lib/ssh/src/ssh_transport.erl3
-rw-r--r--lib/ssh/test/.gitignore5
-rw-r--r--lib/ssh/test/property_test/ssh_eqc_client_info_timing.erl20
-rw-r--r--lib/ssh/test/property_test/ssh_eqc_client_server.erl230
-rw-r--r--lib/ssh/test/ssh_compat_SUITE.erl13
-rwxr-xr-xlib/ssh/test/ssh_compat_SUITE_data/build_scripts/create_all6
-rw-r--r--lib/ssh/test/ssh_property_test_SUITE.erl7
-rw-r--r--lib/ssl/doc/src/notes.xml34
-rw-r--r--lib/ssl/src/dtls_record.erl22
-rw-r--r--lib/ssl/src/inet_tls_dist.erl2
-rw-r--r--lib/ssl/src/ssl_cipher.erl90
-rw-r--r--lib/ssl/src/ssl_cipher.hrl3
-rw-r--r--lib/ssl/src/ssl_connection.erl4
-rw-r--r--lib/ssl/src/ssl_handshake.erl7
-rw-r--r--lib/ssl/src/ssl_pem_cache.erl23
-rw-r--r--lib/ssl/src/ssl_record.erl79
-rw-r--r--lib/ssl/src/tls_connection.erl3
-rw-r--r--lib/ssl/src/tls_record.erl24
-rw-r--r--lib/ssl/src/tls_sender.erl2
-rw-r--r--lib/ssl/test/ssl_ECC_SUITE.erl17
-rw-r--r--lib/ssl/test/ssl_basic_SUITE.erl48
-rw-r--r--lib/ssl/test/ssl_bench_SUITE.erl1
-rw-r--r--lib/ssl/test/ssl_bench_test_lib.erl4
-rw-r--r--lib/ssl/test/ssl_engine_SUITE.erl15
-rw-r--r--lib/ssl/test/ssl_test_lib.erl87
-rw-r--r--lib/stdlib/doc/src/beam_lib.xml4
-rw-r--r--lib/stdlib/doc/src/gen_event.xml2
-rw-r--r--lib/stdlib/doc/src/lists.xml8
-rw-r--r--lib/stdlib/doc/src/notes.xml31
-rw-r--r--lib/stdlib/src/beam_lib.erl2
-rw-r--r--lib/stdlib/test/gen_fsm_SUITE.erl2
-rw-r--r--lib/stdlib/test/lists_SUITE.erl49
-rw-r--r--lib/stdlib/test/sys_SUITE.erl17
84 files changed, 2497 insertions, 627 deletions
diff --git a/lib/compiler/src/beam_jump.erl b/lib/compiler/src/beam_jump.erl
index 9eee56d604..22974da398 100644
--- a/lib/compiler/src/beam_jump.erl
+++ b/lib/compiler/src/beam_jump.erl
@@ -128,7 +128,7 @@
%%% on the program state.
%%%
--import(lists, [reverse/1,reverse/2,foldl/3]).
+-import(lists, [dropwhile/2,reverse/1,reverse/2,foldl/3]).
-type instruction() :: beam_utils:instruction().
@@ -411,14 +411,19 @@ opt_useless_loads([{test,_,{f,L},_}=I|Is], L, St) ->
opt_useless_loads(Is, _L, St) ->
{Is,St}.
-opt_useless_block_loads([{set,[Dst],_,_}=I|Is], L, Index) ->
- BlockJump = [{block,Is},{jump,{f,L}}],
+opt_useless_block_loads([{set,[Dst],_,_}=I|Is0], L, Index) ->
+ BlockJump = [{block,Is0},{jump,{f,L}}],
case beam_utils:is_killed(Dst, BlockJump, Index) of
true ->
- %% The register is killed and not used, we can remove the load
+ %% The register is killed and not used, we can remove the load.
+ %% Remove any `put` instructions in case we just
+ %% removed a `put_tuple` instruction.
+ Is = dropwhile(fun({set,_,_,put}) -> true;
+ (_) -> false
+ end, Is0),
opt_useless_block_loads(Is, L, Index);
false ->
- [I|opt_useless_block_loads(Is, L, Index)]
+ [I|opt_useless_block_loads(Is0, L, Index)]
end;
opt_useless_block_loads([I|Is], L, Index) ->
[I|opt_useless_block_loads(Is, L, Index)];
diff --git a/lib/compiler/test/beam_jump_SUITE.erl b/lib/compiler/test/beam_jump_SUITE.erl
index c61e4ab65c..faedc0c1f1 100644
--- a/lib/compiler/test/beam_jump_SUITE.erl
+++ b/lib/compiler/test/beam_jump_SUITE.erl
@@ -21,7 +21,8 @@
-export([all/0,suite/0,groups/0,init_per_suite/1,end_per_suite/1,
init_per_group/2,end_per_group/2,
- undefined_label/1,ambiguous_catch_try_state/1]).
+ undefined_label/1,ambiguous_catch_try_state/1,
+ build_tuple/1]).
suite() ->
[{ct_hooks,[ts_install_cth]}].
@@ -32,7 +33,8 @@ all() ->
groups() ->
[{p,[parallel],
[undefined_label,
- ambiguous_catch_try_state
+ ambiguous_catch_try_state,
+ build_tuple
]}].
init_per_suite(Config) ->
@@ -72,3 +74,16 @@ river() -> song.
checks(Wanted) ->
%% Must be one line to cause the unsafe optimization.
{catch case river() of sheet -> begin +Wanted, if "da" -> Wanted end end end, catch case river() of sheet -> begin + Wanted, if "da" -> Wanted end end end}.
+
+-record(message2, {id, p1}).
+-record(message3, {id, p1, p2}).
+
+build_tuple(_Config) ->
+ {'EXIT',{{badrecord,message3},_}} = (catch do_build_tuple(#message2{})),
+ ok.
+
+do_build_tuple(Message) ->
+ if is_record(Message, message2) ->
+ Res = {res, rand:uniform(100)},
+ {Message#message3.id, Res}
+ end.
diff --git a/lib/crypto/src/crypto.erl b/lib/crypto/src/crypto.erl
index 2db73c4af0..c2ab88417e 100644
--- a/lib/crypto/src/crypto.erl
+++ b/lib/crypto/src/crypto.erl
@@ -2026,7 +2026,7 @@ check_otp_test_engine(LibDir) ->
case filelib:wildcard("otp_test_engine*", LibDir) of
[] ->
{error, notexist};
- [LibName] ->
+ [LibName|_] -> % In case of Valgrind there could be more than one
LibPath = filename:join(LibDir,LibName),
case filelib:is_file(LibPath) of
true ->
diff --git a/lib/crypto/test/engine_SUITE.erl b/lib/crypto/test/engine_SUITE.erl
index b083b30d70..0427923941 100644
--- a/lib/crypto/test/engine_SUITE.erl
+++ b/lib/crypto/test/engine_SUITE.erl
@@ -75,11 +75,18 @@ groups() ->
init_per_suite(Config) ->
- case crypto:info_lib() of
- [{_,_, <<"OpenSSL 1.0.1s-freebsd 1 Mar 2016">>}] ->
+ case {os:type(), crypto:info_lib()} of
+ {_, [{_,_, <<"OpenSSL 1.0.1s-freebsd 1 Mar 2016">>}]} ->
{skip, "Problem with engine on OpenSSL 1.0.1s-freebsd"};
- Res ->
- ct:log("crypto:info_lib() -> ~p\n", [Res]),
+
+ {{unix,darwin}, _} ->
+ {skip, "Engine unsupported on Darwin"};
+
+ {{win32,_}, _} ->
+ {skip, "Engine unsupported on Windows"};
+
+ {OS, Res} ->
+ ct:log("crypto:info_lib() -> ~p\nos:type() -> ~p", [Res,OS]),
try crypto:start() of
ok ->
Config;
diff --git a/lib/eldap/doc/src/notes.xml b/lib/eldap/doc/src/notes.xml
index b390e0c047..bf9358c4d1 100644
--- a/lib/eldap/doc/src/notes.xml
+++ b/lib/eldap/doc/src/notes.xml
@@ -77,6 +77,22 @@
</section>
+<section><title>Eldap 1.2.3.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ A race condition at close could cause the eldap client to
+ exit with a badarg message as cause.</p>
+ <p>
+ Own Id: OTP-15342 Aux Id: ERIERL-242 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Eldap 1.2.3</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -92,6 +108,22 @@
</section>
+<section><title>Eldap 1.2.2.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ A race condition at close could cause the eldap client to
+ exit with a badarg message as cause.</p>
+ <p>
+ Own Id: OTP-15342 Aux Id: ERIERL-242 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Eldap 1.2.2</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -368,4 +400,3 @@
<p>New application. </p>
</section>
</chapter>
-
diff --git a/lib/hipe/main/hipe.erl b/lib/hipe/main/hipe.erl
index ac2e6c1e3b..e2cb9c0f0b 100644
--- a/lib/hipe/main/hipe.erl
+++ b/lib/hipe/main/hipe.erl
@@ -542,7 +542,7 @@ file(File) ->
| {'error', term()}
when Mod :: mod().
file(File, Options) when is_atom(File) ->
- case beam_lib:info(File) of
+ case beam_lib:info(atom_to_list(File)) of
L when is_list(L) ->
{module, Mod} = lists:keyfind(module, 1, L),
case compile(Mod, File, Options) of
diff --git a/lib/kernel/doc/src/logger.xml b/lib/kernel/doc/src/logger.xml
index 464c65ba76..2bcf137299 100644
--- a/lib/kernel/doc/src/logger.xml
+++ b/lib/kernel/doc/src/logger.xml
@@ -748,6 +748,14 @@ start(_, []) ->
exists, its associated value will be changed
to the given value. If it does not exist, it will
be added.</p>
+ <p>If the value is incomplete, which for example can be the
+ case for the <c>config</c> key, it is up to the handler
+ implementation how the unspecified parts are set. For all
+ handlers in the Kernel application, unspecified data for
+ the <c>config</c> key is set to default values. To update
+ only specified data, and keep the existing configuration for
+ the rest, use <seealso marker="#update_handler_config-3">
+ <c>update_handler_config/3</c></seealso>.</p>
<p>See the definition of
the <seealso marker="#type-handler_config">
<c>handler_config()</c></seealso> type for more
@@ -933,6 +941,42 @@ logger:set_handler_config(HandlerId, maps:merge(Old, Config)).
</func>
<func>
+ <name name="update_handler_config" arity="3" clause_i="1"/>
+ <name name="update_handler_config" arity="3" clause_i="2"/>
+ <name name="update_handler_config" arity="3" clause_i="3"/>
+ <name name="update_handler_config" arity="3" clause_i="4"/>
+ <name name="update_handler_config" arity="3" clause_i="5"/>
+ <fsummary>Add or update configuration data for the specified
+ handler.</fsummary>
+ <type variable="HandlerId"/>
+ <type variable="Level" name_i="1"/>
+ <type variable="FilterDefault" name_i="2"/>
+ <type variable="Filters" name_i="3"/>
+ <type variable="Formatter" name_i="4"/>
+ <type variable="Config" name_i="5"/>
+ <type variable="Return"/>
+ <desc>
+ <p>Add or update configuration data for the specified
+ handler. If the given <c><anno>Key</anno></c> already
+ exists, its associated value will be changed
+ to the given value. If it does not exist, it will
+ be added.</p>
+ <p>If the value is incomplete, which for example can be the
+ case for the <c>config</c> key, it is up to the handler
+ implementation how the unspecified parts are set. For all
+ handlers in the Kernel application, unspecified data for
+ the <c>config</c> key is not changed. To reset unspecified
+ data to default values,
+ use <seealso marker="#set_handler_config-3">
+ <c>set_handler_config/3</c></seealso>.</p>
+ <p>See the definition of
+ the <seealso marker="#type-handler_config">
+ <c>handler_config()</c></seealso> type for more
+ information about the different parameters.</p>
+ </desc>
+ </func>
+
+ <func>
<name name="update_primary_config" arity="1"/>
<fsummary>Update primary configuration data for Logger.</fsummary>
<desc>
@@ -1041,10 +1085,11 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(), Meta)).
</func>
<func>
- <name>HModule:changing_config(Config1, Config2) -> {ok, Config3} | {error, Reason}</name>
+ <name>HModule:changing_config(SetOrUpdate, OldConfig, NewConfig) -> {ok, Config} | {error, Reason}</name>
<fsummary>The configuration for this handler is about to change.</fsummary>
<type>
- <v>Config1 = Config2 = Config3 =
+ <v>SetOrUpdate = set | update</v>
+ <v>OldConfig = NewConfig = Config =
<seealso marker="#type-handler_config">handler_config()</seealso></v>
<v>Reason = term()</v>
</type>
@@ -1053,18 +1098,51 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(), Meta)).
<p>The function is called on a temporary process when the
configuration for a handler is about to change. The purpose
is to verify and act on the new configuration.</p>
- <p><c>Config1</c> is the existing configuration
- and <c>Config2</c> is the new configuration.</p>
+ <p><c>OldConfig</c> is the existing configuration
+ and <c>NewConfig</c> is the new configuration.</p>
<p>The handler identity is associated with the <c>id</c> key
- in <c>Config1</c>.</p>
+ in <c>OldConfig</c>.</p>
+ <p><c>SetOrUpdate</c> has the value <c>set</c> if the
+ configuration change originates from a call to
+ <seealso marker="#set_handler_config-2">
+ <c>set_handler_config/2,3</c></seealso>, and <c>update</c>
+ if it originates from <seealso marker="#update_handler_config-2">
+ <c>update_handler_config/2,3</c></seealso>. The handler can
+ use this parameteter to decide how to update the value of
+ the <c>config</c> field, that is, the handler specific
+ configuration data. Typically, if <c>SetOrUpdate</c>
+ equals <c>set</c>, values that are not specified must be
+ given their default values. If <c>SetOrUpdate</c>
+ equals <c>update</c>, the values found in <c>OldConfig</c>
+ must be used instead.</p>
<p>If everything succeeds, the callback function must return a
- possibly adjusted configuration in <c>{ok,Config3}</c>.</p>
+ possibly adjusted configuration in <c>{ok,Config}</c>.</p>
<p>If the configuration is faulty, the callback function must
return <c>{error,Reason}</c>.</p>
</desc>
</func>
<func>
+ <name>HModule:filter_config(Config) -> FilteredConfig</name>
+ <fsummary>Remove internal data from configuration.</fsummary>
+ <type>
+ <v>Config = FilteredConfig =
+ <seealso marker="#type-handler_config">handler_config()</seealso></v>
+ </type>
+ <desc>
+ <p>This callback function is optional.</p>
+ <p>The function is called when one of the Logger API functions
+ for fetching the handler configuration is called, for
+ example
+ <seealso marker="#get_handler_config-1">
+ <c>logger:get_handler_config/1</c></seealso>.</p>
+ <p>It allows the handler to remove internal data fields from
+ its configuration data before it is returned to the
+ caller.</p>
+ </desc>
+ </func>
+
+ <func>
<name>HModule:log(LogEvent, Config) -> void()</name>
<fsummary>Log the given log event.</fsummary>
<type>
@@ -1136,7 +1214,7 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(), Meta)).
<item><seealso marker="logger#set_handler_config-2">
<c>logger:set_handler_config/2,3</c></seealso></item>
<item><seealso marker="logger#update_handler_config-2">
- <c>logger:updata_handler_config/2</c></seealso></item>
+ <c>logger:updata_handler_config/2,3</c></seealso></item>
<item><seealso marker="logger#update_formatter_config-2">
<c>logger:update_formatter_config/2</c></seealso></item>
</list>
diff --git a/lib/kernel/doc/src/logger_chapter.xml b/lib/kernel/doc/src/logger_chapter.xml
index 4a81cfa34a..1870d2ab79 100644
--- a/lib/kernel/doc/src/logger_chapter.xml
+++ b/lib/kernel/doc/src/logger_chapter.xml
@@ -384,8 +384,8 @@ logger:debug(#{got => connection_request, id => Id, state => State},
<p>In addition to the mandatory callback function <c>log/2</c>, a
handler module can export the optional callback
- functions <c>adding_handler/1</c>, <c>changing_config/2</c>
- and <c>removing_handler/1</c>. See
+ functions <c>adding_handler/1</c>, <c>changing_config/3</c>,
+ <c>filter_config/1</c>, and <c>removing_handler/1</c>. See
section <seealso marker="logger#handler_callback_functions">Handler
Callback Functions</seealso> in the logger(3) manual page for
more information about these function.</p>
@@ -555,7 +555,7 @@ logger:debug(#{got => connection_request, id => Id, state => State},
<item><seealso marker="logger#set_handler_config-2">
<c>set_handler_config/2,3</c></seealso></item>
<item><seealso marker="logger#update_handler_config-2">
- <c>update_handler_config/2</c></seealso></item>
+ <c>update_handler_config/2,3</c></seealso></item>
<item><seealso marker="logger#add_handler_filter-3">
<c>add_handler_filter/3</c></seealso></item>
<item><seealso marker="logger#remove_handler_filter-2">
@@ -704,9 +704,13 @@ logger:debug(#{got => connection_request, id => Id, state => State},
<item>
<p>If <c>HandlerId</c> is <c>default</c>, then this entry
modifies the default handler, equivalent to calling</p>
- <pre><seealso marker="logger#set_handler_config-2">
- logger:set_handler_config(default, Module, HandlerConfig)
- </seealso></pre>
+ <pre><seealso marker="logger#remove_handler-1">
+ logger:remove_handler(default)
+ </seealso></pre>
+ <p>followed by</p>
+ <pre><seealso marker="logger#add_handler-3">
+ logger:add_handler(default, Module, HandlerConfig)
+ </seealso></pre>
<p>For all other values of <c>HandlerId</c>, this entry
adds a new handler, equivalent to calling</p>
<pre><seealso marker="logger:add_handler/3">
@@ -1024,7 +1028,8 @@ ok</pre>
<list>
<item><c>adding_handler(Config)</c></item>
<item><c>removing_handler(Config)</c></item>
- <item><c>changing_config(OldConfig, NewConfig)</c></item>
+ <item><c>changing_config(SetOrUpdate, OldConfig, NewConfig)</c></item>
+ <item><c>filter_config(Config)</c></item>
</list>
<p>When a handler is added, by for example a call
to <seealso marker="logger#add_handler-3">
@@ -1043,11 +1048,18 @@ ok</pre>
<p>When <seealso marker="logger#set_handler_config-2">
<c>logger:set_handler_config/2,3</c></seealso>
or <seealso marker="logger#update_handler_config/2">
- <c>logger:update_handler_config/2</c></seealso> is called,
+ <c>logger:update_handler_config/2,3</c></seealso> is called,
Logger
- calls <c>HModule:changing_config(OldConfig, NewConfig)</c>. If
+ calls <c>HModule:changing_config(SetOrUpdate, OldConfig, NewConfig)</c>. If
this function returns <c>{ok,NewConfig1}</c>, Logger
writes <c>NewConfig1</c> to the configuration database.</p>
+ <p>When <seealso marker="logger#get_config-0">
+ <c>logger:get_config/0</c></seealso> or
+ <seealso marker="logger#get_handler_config-0">
+ <c>logger:get_handler_config/0,1</c></seealso> is called,
+ Logger calls <c>HModule:filter_config(Config)</c>. This function
+ must return the handler configuration where internal data is
+ removed.</p>
<p>A simple handler that prints to the terminal can be implemented
as follows:</p>
diff --git a/lib/kernel/doc/src/logger_disk_log_h.xml b/lib/kernel/doc/src/logger_disk_log_h.xml
index dfe2ab3275..d9b941a0a9 100644
--- a/lib/kernel/doc/src/logger_disk_log_h.xml
+++ b/lib/kernel/doc/src/logger_disk_log_h.xml
@@ -66,6 +66,10 @@
corresponds to the <c>name</c> property in the
<seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
datatype.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to the same name as the handler identity, in the
+ current directory.</p>
</item>
<tag><c>type</c></tag>
<item>
@@ -73,6 +77,8 @@
corresponds to the <c>type</c> property in the
<seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
datatype.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
<p>Defaults to <c>wrap</c>.</p>
</item>
<tag><c>max_no_files</c></tag>
@@ -82,6 +88,8 @@
corresponds to the <c>MaxNoFiles</c> element in the <c>size</c> property in the
<seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
datatype.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
<p>Defaults to <c>10</c>.</p>
<p>The setting has no effect on a halt log.</p>
</item>
@@ -93,6 +101,8 @@
corresponds to the <c>MaxNoBytes</c> element in the <c>size</c> property in the
<seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
datatype.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
<p>Defaults to <c>1048576</c> bytes for a wrap log, and
<c>infinity</c> for a halt log.</p>
</item>
diff --git a/lib/kernel/doc/src/logger_std_h.xml b/lib/kernel/doc/src/logger_std_h.xml
index fcd8189bae..e156f5719b 100644
--- a/lib/kernel/doc/src/logger_std_h.xml
+++ b/lib/kernel/doc/src/logger_std_h.xml
@@ -74,7 +74,9 @@
circular logging. Use the disk_log handler,
<seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c></seealso>,
for this.</p>
- <p> Defaults to <c>standard_io</c>.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to <c>standard_io</c>.</p>
</item>
<tag><c>filesync_repeat_interval</c></tag>
<item>
diff --git a/lib/kernel/src/inet_tcp_dist.erl b/lib/kernel/src/inet_tcp_dist.erl
index d1701afdaa..c37212b0f9 100644
--- a/lib/kernel/src/inet_tcp_dist.erl
+++ b/lib/kernel/src/inet_tcp_dist.erl
@@ -450,7 +450,7 @@ get_tcp_address(Driver, Socket) ->
get_address_resolver(EpmdModule) ->
case erlang:function_exported(EpmdModule, address_please, 3) of
true -> {EpmdModule, address_please};
- _ -> {inet, getaddr}
+ _ -> {erl_epmd, address_please}
end.
%% ------------------------------------------------------------
diff --git a/lib/kernel/src/logger.erl b/lib/kernel/src/logger.erl
index 752dd8d493..6762998d4f 100644
--- a/lib/kernel/src/logger.erl
+++ b/lib/kernel/src/logger.erl
@@ -43,7 +43,8 @@
get_module_level/0, get_module_level/1,
set_primary_config/1, set_primary_config/2,
set_handler_config/2, set_handler_config/3,
- update_primary_config/1, update_handler_config/2,
+ update_primary_config/1,
+ update_handler_config/2, update_handler_config/3,
update_formatter_config/2, update_formatter_config/3,
get_primary_config/0, get_handler_config/1,
get_handler_config/0, get_handler_ids/0, get_config/0,
@@ -423,6 +424,29 @@ set_handler_config(HandlerId,Config) ->
update_primary_config(Config) ->
logger_server:update_config(primary,Config).
+-spec update_handler_config(HandlerId,level,Level) -> Return when
+ HandlerId :: handler_id(),
+ Level :: level() | all | none,
+ Return :: ok | {error,term()};
+ (HandlerId,filter_default,FilterDefault) -> Return when
+ HandlerId :: handler_id(),
+ FilterDefault :: log | stop,
+ Return :: ok | {error,term()};
+ (HandlerId,filters,Filters) -> Return when
+ HandlerId :: handler_id(),
+ Filters :: [{filter_id(),filter()}],
+ Return :: ok | {error,term()};
+ (HandlerId,formatter,Formatter) -> Return when
+ HandlerId :: handler_id(),
+ Formatter :: {module(), formatter_config()},
+ Return :: ok | {error,term()};
+ (HandlerId,config,Config) -> Return when
+ HandlerId :: handler_id(),
+ Config :: term(),
+ Return :: ok | {error,term()}.
+update_handler_config(HandlerId,Key,Value) ->
+ logger_server:update_config(HandlerId,Key,Value).
+
-spec update_handler_config(HandlerId,Config) -> ok | {error,term()} when
HandlerId :: handler_id(),
Config :: handler_config().
@@ -439,7 +463,14 @@ get_primary_config() ->
HandlerId :: handler_id(),
Config :: handler_config().
get_handler_config(HandlerId) ->
- logger_config:get(?LOGGER_TABLE,HandlerId).
+ case logger_config:get(?LOGGER_TABLE,HandlerId) of
+ {ok,#{module:=Module}=Config} ->
+ {ok,try Module:filter_config(Config)
+ catch _:_ -> Config
+ end};
+ Error ->
+ Error
+ end.
-spec get_handler_config() -> [Config] when
Config :: handler_config().
diff --git a/lib/kernel/src/logger_disk_log_h.erl b/lib/kernel/src/logger_disk_log_h.erl
index a8f141f135..2a81458ec8 100644
--- a/lib/kernel/src/logger_disk_log_h.erl
+++ b/lib/kernel/src/logger_disk_log_h.erl
@@ -33,7 +33,8 @@
terminate/2, code_change/3]).
%% logger callbacks
--export([log/2, adding_handler/1, removing_handler/1, changing_config/2]).
+-export([log/2, adding_handler/1, removing_handler/1, changing_config/3,
+ filter_config/1]).
%% handler internal
-export([log_handler_info/4]).
@@ -114,9 +115,8 @@ reset(Name) ->
%%% Handler being added
adding_handler(#{id:=Name}=Config) ->
case check_config(adding, Config) of
- {ok, Config1} ->
+ {ok, #{config:=HConfig}=Config1} ->
%% create initial handler state by merging defaults with config
- HConfig = maps:get(config, Config1, #{}),
HState = maps:merge(get_init_state(), HConfig),
case logger_h_common:overload_levels_ok(HState) of
true ->
@@ -133,32 +133,40 @@ adding_handler(#{id:=Name}=Config) ->
%%%-----------------------------------------------------------------
%%% Updating handler config
-changing_config(OldConfig = #{id:=Name, config:=OldHConfig},
- NewConfig = #{id:=Name, config:=NewHConfig}) ->
- #{type:=Type, file:=File, max_no_files:=MaxFs,
- max_no_bytes:=MaxBytes} = OldHConfig,
- case NewHConfig of
- #{type:=Type, file:=File, max_no_files:=MaxFs,
- max_no_bytes:=MaxBytes} ->
- changing_config1(OldConfig, NewConfig);
- _ ->
- {error,{illegal_config_change,OldConfig,NewConfig}}
- end;
-changing_config(OldConfig, NewConfig) ->
- {error,{illegal_config_change,OldConfig,NewConfig}}.
+changing_config(SetOrUpdate,OldConfig=#{config:=OldHConfig},NewConfig) ->
+ WriteOnce = maps:with([type,file,max_no_files,max_no_bytes],OldHConfig),
+ ReadOnly = maps:with([handler_pid,mode_tab],OldHConfig),
+ NewHConfig0 = maps:get(config, NewConfig, #{}),
+ Default =
+ case SetOrUpdate of
+ set ->
+ %% Do not reset write-once fields to defaults
+ maps:merge(get_default_config(),WriteOnce);
+ update ->
+ OldHConfig
+ end,
-changing_config1(OldConfig=#{config:=OldHConfig}, NewConfig) ->
+ %% Allow (accidentially) included read-only fields - just overwrite them
+ NewHConfig = maps:merge(maps:merge(Default,NewHConfig0),ReadOnly),
+
+ %% But fail if write-once fields are changed
+ case maps:with([type,file,max_no_files,max_no_bytes],NewHConfig) of
+ WriteOnce ->
+ changing_config1(maps:get(handler_pid,OldHConfig),
+ OldConfig,
+ NewConfig#{config=>NewHConfig});
+ Other ->
+ {Old,New} = logger_server:diff_maps(WriteOnce,Other),
+ {error,{illegal_config_change,#{config=>Old},#{config=>New}}}
+ end.
+
+changing_config1(HPid, OldConfig, NewConfig) ->
case check_config(changing, NewConfig) of
- {ok,NewConfig1 = #{config:=NewHConfig}} ->
- #{handler_pid:=HPid,
- mode_tab:=ModeTab} = OldHConfig,
- NewHConfig1 = NewHConfig#{handler_pid=>HPid,
- mode_tab=>ModeTab},
- NewConfig2 = NewConfig1#{config=>NewHConfig1},
- try gen_server:call(HPid, {change_config,OldConfig,NewConfig2},
+ Result = {ok,NewConfig1} ->
+ try gen_server:call(HPid, {change_config,OldConfig,NewConfig1},
?DEFAULT_CALL_TIMEOUT) of
- ok -> {ok,NewConfig2};
- HError -> HError
+ ok -> Result;
+ Error -> Error
catch
_:{timeout,_} -> {error,handler_busy}
end;
@@ -168,10 +176,12 @@ changing_config1(OldConfig=#{config:=OldHConfig}, NewConfig) ->
check_config(adding, #{id:=Name}=Config) ->
%% merge handler specific config data
- HConfig = merge_default_logopts(Name, maps:get(config, Config, #{})),
- case check_h_config(maps:to_list(HConfig)) of
+ HConfig1 = maps:get(config, Config, #{}),
+ HConfig2 = maps:merge(get_default_config(), HConfig1),
+ HConfig3 = merge_default_logopts(Name, HConfig2),
+ case check_h_config(maps:to_list(HConfig3)) of
ok ->
- {ok,Config#{config=>HConfig}};
+ {ok,Config#{config=>HConfig3}};
Error ->
Error
end;
@@ -238,6 +248,11 @@ log(LogEvent, Config = #{id := Name,
Bin = logger_h_common:log_to_binary(LogEvent, Config),
logger_h_common:call_cast_or_drop(Name, HPid, ModeTab, Bin).
+%%%-----------------------------------------------------------------
+%%% Remove internal fields from configuration
+filter_config(#{config:=HConfig}=Config) ->
+ Config#{config=>maps:without([handler_pid,mode_tab],HConfig)}.
+
%%%===================================================================
%%% gen_server callbacks
%%%===================================================================
@@ -438,7 +453,7 @@ code_change(_OldVsn, State, _Extra) ->
%%%-----------------------------------------------------------------
%%%
-get_init_state() ->
+get_default_config() ->
#{sync_mode_qlen => ?SYNC_MODE_QLEN,
drop_mode_qlen => ?DROP_MODE_QLEN,
flush_qlen => ?FLUSH_QLEN,
@@ -449,10 +464,12 @@ get_init_state() ->
overload_kill_qlen => ?OVERLOAD_KILL_QLEN,
overload_kill_mem_size => ?OVERLOAD_KILL_MEM_SIZE,
overload_kill_restart_after => ?OVERLOAD_KILL_RESTART_AFTER,
- dl_sync_int => ?CONTROLLER_SYNC_INTERVAL,
- filesync_ok_qlen => ?FILESYNC_OK_QLEN,
filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}.
+get_init_state() ->
+ #{dl_sync_int => ?CONTROLLER_SYNC_INTERVAL,
+ filesync_ok_qlen => ?FILESYNC_OK_QLEN}.
+
%%%-----------------------------------------------------------------
%%% Add a disk_log handler to the logger.
%%% This starts a dedicated handler process which should always
diff --git a/lib/kernel/src/logger_h_common.erl b/lib/kernel/src/logger_h_common.erl
index 38ac7d8ffc..94c640cb92 100644
--- a/lib/kernel/src/logger_h_common.erl
+++ b/lib/kernel/src/logger_h_common.erl
@@ -306,8 +306,11 @@ stop_or_restart(Name, {shutdown,Reason={overloaded,_Name,_QLen,_Mem}},
exit(HandlerPid, kill)
end,
case ConfigResult of
- {ok,#{module:=HMod}=HConfig} when is_integer(RestartAfter) ->
+ {ok,#{module:=HMod}=HConfig0} when is_integer(RestartAfter) ->
_ = logger:remove_handler(Name),
+ HConfig = try HMod:filter_config(HConfig0)
+ catch _:_ -> HConfig0
+ end,
_ = timer:apply_after(RestartAfter, logger, add_handler,
[Name,HMod,HConfig]);
{ok,_} ->
diff --git a/lib/kernel/src/logger_server.erl b/lib/kernel/src/logger_server.erl
index a1d40f1123..b7735dbcf7 100644
--- a/lib/kernel/src/logger_server.erl
+++ b/lib/kernel/src/logger_server.erl
@@ -27,9 +27,13 @@
add_filter/2, remove_filter/2,
set_module_level/2, unset_module_level/0,
unset_module_level/1, cache_module_level/1,
- set_config/2, set_config/3, update_config/2,
+ set_config/2, set_config/3,
+ update_config/2, update_config/3,
update_formatter_config/2]).
+%% Helper
+-export([diff_maps/2]).
+
%% gen_server callbacks
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
terminate/2]).
@@ -105,12 +109,25 @@ cache_module_level(Module) ->
gen_server:cast(?SERVER,{cache_module_level,Module}).
set_config(Owner,Key,Value) ->
- update_config(Owner,#{Key=>Value}).
+ case sanity_check(Owner,Key,Value) of
+ ok ->
+ call({change_config,set,Owner,Key,Value});
+ Error ->
+ Error
+ end.
set_config(Owner,Config) ->
case sanity_check(Owner,Config) of
ok ->
- call({set_config,Owner,Config});
+ call({change_config,set,Owner,Config});
+ Error ->
+ Error
+ end.
+
+update_config(Owner,Key,Value) ->
+ case sanity_check(Owner,Key,Value) of
+ ok ->
+ call({change_config,update,Owner,Key,Value});
Error ->
Error
end.
@@ -118,7 +135,7 @@ set_config(Owner,Config) ->
update_config(Owner, Config) ->
case sanity_check(Owner,Config) of
ok ->
- call({update_config,Owner,Config});
+ call({change_config,update,Owner,Config});
Error ->
Error
end.
@@ -204,46 +221,72 @@ handle_call({add_filter,Id,Filter}, _From,#state{tid=Tid}=State) ->
handle_call({remove_filter,Id,FilterId}, _From, #state{tid=Tid}=State) ->
Reply = do_remove_filter(Tid,Id,FilterId),
{reply,Reply,State};
-handle_call({update_config,primary,NewConfig}, _From, #state{tid=Tid}=State) ->
+handle_call({change_config,SetOrUpd,primary,Config0}, _From,
+ #state{tid=Tid}=State) ->
+ {ok,#{handlers:=Handlers}=OldConfig} = logger_config:get(Tid,primary),
+ Default =
+ case SetOrUpd of
+ set -> default_config(primary);
+ update -> OldConfig
+ end,
+ Config = maps:merge(Default,Config0),
+ Reply = logger_config:set(Tid,primary,Config#{handlers=>Handlers}),
+ {reply,Reply,State};
+handle_call({change_config,_SetOrUpd,primary,Key,Value}, _From,
+ #state{tid=Tid}=State) ->
{ok,OldConfig} = logger_config:get(Tid,primary),
- Config = maps:merge(OldConfig,NewConfig),
- {reply,logger_config:set(Tid,primary,Config),State};
-handle_call({update_config,HandlerId,NewConfig}, From, #state{tid=Tid}=State) ->
+ Reply = logger_config:set(Tid,primary,OldConfig#{Key=>Value}),
+ {reply,Reply,State};
+handle_call({change_config,SetOrUpd,HandlerId,Config0}, From,
+ #state{tid=Tid}=State) ->
case logger_config:get(Tid,HandlerId) of
{ok,#{module:=Module}=OldConfig} ->
- Config = maps:merge(OldConfig,NewConfig),
- call_h_async(
- fun() ->
- call_h(Module,changing_config,[OldConfig,Config],
- {ok,Config})
- end,
- fun({ok,Config1}) ->
- logger_config:set(Tid,HandlerId,Config1);
- (Error) ->
- Error
- end,From,State);
- Error ->
- {reply,Error,State}
+ Default =
+ case SetOrUpd of
+ set -> default_config(HandlerId,Module);
+ update -> OldConfig
+ end,
+ Config = maps:merge(Default,Config0),
+ case check_config_change(OldConfig,Config) of
+ ok ->
+ call_h_async(
+ fun() ->
+ call_h(Module,changing_config,
+ [SetOrUpd,OldConfig,Config],
+ {ok,Config})
+ end,
+ fun({ok,Config1}) ->
+ logger_config:set(Tid,HandlerId,Config1);
+ (Error) ->
+ Error
+ end,From,State);
+ Error ->
+ {reply,Error,State}
+ end;
+ _ ->
+ {reply,{error,{not_found,HandlerId}},State}
end;
-handle_call({set_config,primary,Config0}, _From, #state{tid=Tid}=State) ->
- Config = maps:merge(default_config(primary),Config0),
- {ok,#{handlers:=Handlers}} = logger_config:get(Tid,primary),
- Reply = logger_config:set(Tid,primary,Config#{handlers=>Handlers}),
- {reply,Reply,State};
-handle_call({set_config,HandlerId,Config0}, From, #state{tid=Tid}=State) ->
+handle_call({change_config,SetOrUpd,HandlerId,Key,Value}, From,
+ #state{tid=Tid}=State) ->
case logger_config:get(Tid,HandlerId) of
{ok,#{module:=Module}=OldConfig} ->
- Config = maps:merge(default_config(HandlerId,Module),Config0),
- call_h_async(
- fun() ->
- call_h(Module,changing_config,[OldConfig,Config],
- {ok,Config})
- end,
- fun({ok,Config1}) ->
- logger_config:set(Tid,HandlerId,Config1);
- (Error) ->
- Error
- end,From,State);
+ Config = OldConfig#{Key=>Value},
+ case check_config_change(OldConfig,Config) of
+ ok ->
+ call_h_async(
+ fun() ->
+ call_h(Module,changing_config,
+ [SetOrUpd,OldConfig,Config],
+ {ok,Config})
+ end,
+ fun({ok,Config1}) ->
+ logger_config:set(Tid,HandlerId,Config1);
+ (Error) ->
+ Error
+ end,From,State);
+ Error ->
+ {reply,Error,State}
+ end;
_ ->
{reply,{error,{not_found,HandlerId}},State}
end;
@@ -320,7 +363,7 @@ call(Request) ->
true when
Action == add_handler; Action == remove_handler;
Action == add_filter; Action == remove_filter;
- Action == update_config; Action == set_config ->
+ Action == change_config ->
{error,{attempting_syncronous_call_to_self,Request}};
_ ->
gen_server:call(?SERVER,Request,?DEFAULT_LOGGER_CALL_TIMEOUT)
@@ -458,6 +501,15 @@ check_formatter({Mod,Config}) ->
check_formatter(Formatter) ->
throw({invalid_formatter,Formatter}).
+%% When changing configuration for a handler, the id and module fields
+%% can not be changed.
+check_config_change(#{id:=Id,module:=Module},#{id:=Id,module:=Module}) ->
+ ok;
+check_config_change(OldConfig,NewConfig) ->
+ {Old,New} = logger_server:diff_maps(maps:with([id,module],OldConfig),
+ maps:with([id,module],NewConfig)),
+ {error,{illegal_config_change,Old,New}}.
+
call_h(Module, Function, Args, DefRet) ->
%% Not calling code:ensure_loaded + erlang:function_exported here,
%% since in some rare terminal cases, the code_server might not
@@ -466,6 +518,11 @@ call_h(Module, Function, Args, DefRet) ->
catch
C:R:S ->
case {C,R,S} of
+ {error,undef,[{Module,Function=changing_config,Args,_}|_]}
+ when length(Args)=:=3 ->
+ %% Backwards compatible call, if changing_config/3
+ %% did not exist.
+ call_h(Module, Function, tl(Args), DefRet);
{error,undef,[{Module,Function,Args,_}|_]} ->
DefRet;
_ ->
@@ -525,3 +582,14 @@ call_h_reply(Unexpected,State) ->
{process,?SERVER},
{message,Unexpected}]),
{noreply,State}.
+
+%% Return two maps containing only the fields that differ.
+diff_maps(M1,M2) ->
+ diffs(lists:sort(maps:to_list(M1)),lists:sort(maps:to_list(M2)),#{},#{}).
+
+diffs([H|T1],[H|T2],D1,D2) ->
+ diffs(T1,T2,D1,D2);
+diffs([{K,V1}|T1],[{K,V2}|T2],D1,D2) ->
+ diffs(T1,T2,D1#{K=>V1},D2#{K=>V2});
+diffs([],[],D1,D2) ->
+ {D1,D2}.
diff --git a/lib/kernel/src/logger_std_h.erl b/lib/kernel/src/logger_std_h.erl
index 66fa6b6ab6..42e0f5caf4 100644
--- a/lib/kernel/src/logger_std_h.erl
+++ b/lib/kernel/src/logger_std_h.erl
@@ -35,7 +35,8 @@
terminate/2, code_change/3]).
%% logger callbacks
--export([log/2, adding_handler/1, removing_handler/1, changing_config/2]).
+-export([log/2, adding_handler/1, removing_handler/1, changing_config/3,
+ filter_config/1]).
%% handler internal
-export([log_handler_info/4]).
@@ -116,9 +117,8 @@ reset(Name) ->
%%% Handler being added
adding_handler(#{id:=Name}=Config) ->
case check_config(adding, Config) of
- {ok, Config1} ->
+ {ok, #{config:=HConfig}=Config1} ->
%% create initial handler state by merging defaults with config
- HConfig = maps:get(config, Config1, #{}),
HState = maps:merge(get_init_state(), HConfig),
case logger_h_common:overload_levels_ok(HState) of
true ->
@@ -135,22 +135,31 @@ adding_handler(#{id:=Name}=Config) ->
%%%-----------------------------------------------------------------
%%% Updating handler config
-changing_config(OldConfig=#{id:=Name, config:=OldHConfig},
- NewConfig=#{id:=Name}) ->
- #{type:=Type, handler_pid:=HPid, mode_tab:=ModeTab} = OldHConfig,
- NewHConfig = maps:get(config, NewConfig, #{}),
- case maps:get(type, NewHConfig, Type) of
- Type ->
- NewHConfig1 = NewHConfig#{type=>Type,
- handler_pid=>HPid,
- mode_tab=>ModeTab},
- changing_config1(HPid, OldConfig,
- NewConfig#{config=>NewHConfig1});
- _ ->
- {error,{illegal_config_change,OldConfig,NewConfig}}
- end;
-changing_config(OldConfig, NewConfig) ->
- {error,{illegal_config_change,OldConfig,NewConfig}}.
+changing_config(SetOrUpdate,OldConfig=#{config:=OldHConfig},NewConfig) ->
+ WriteOnce = maps:with([type],OldHConfig),
+ ReadOnly = maps:with([handler_pid,mode_tab],OldHConfig),
+ NewHConfig0 = maps:get(config, NewConfig, #{}),
+ Default =
+ case SetOrUpdate of
+ set ->
+ %% Do not reset write-once fields to defaults
+ maps:merge(get_default_config(),WriteOnce);
+ update ->
+ OldHConfig
+ end,
+
+ %% Allow (accidentially) included read-only fields - just overwrite them
+ NewHConfig = maps:merge(maps:merge(Default, NewHConfig0),ReadOnly),
+
+ %% But fail if write-once fields are changed
+ case maps:with([type],NewHConfig) of
+ WriteOnce ->
+ changing_config1(maps:get(handler_pid,OldHConfig),
+ OldConfig,
+ NewConfig#{config=>NewHConfig});
+ Other ->
+ {error,{illegal_config_change,#{config=>WriteOnce},#{config=>Other}}}
+ end.
changing_config1(HPid, OldConfig, NewConfig) ->
case check_config(changing, NewConfig) of
@@ -169,8 +178,7 @@ changing_config1(HPid, OldConfig, NewConfig) ->
check_config(adding, Config) ->
%% Merge in defaults on handler level
HConfig0 = maps:get(config, Config, #{}),
- HConfig = maps:merge(#{type => standard_io},
- HConfig0),
+ HConfig = maps:merge(get_default_config(),HConfig0),
case check_h_config(maps:to_list(HConfig)) of
ok ->
{ok,Config#{config=>HConfig}};
@@ -223,6 +231,11 @@ log(LogEvent, Config = #{id := Name,
Bin = logger_h_common:log_to_binary(LogEvent, Config),
logger_h_common:call_cast_or_drop(Name, HPid, ModeTab, Bin).
+%%%-----------------------------------------------------------------
+%%% Remove internal fields from configuration
+filter_config(#{config:=HConfig}=Config) ->
+ Config#{config=>maps:without([handler_pid,mode_tab],HConfig)}.
+
%%%===================================================================
%%% gen_server callbacks
%%%===================================================================
@@ -428,8 +441,9 @@ code_change(_OldVsn, State, _Extra) ->
%%%-----------------------------------------------------------------
%%%
-get_init_state() ->
- #{sync_mode_qlen => ?SYNC_MODE_QLEN,
+get_default_config() ->
+ #{type => standard_io,
+ sync_mode_qlen => ?SYNC_MODE_QLEN,
drop_mode_qlen => ?DROP_MODE_QLEN,
flush_qlen => ?FLUSH_QLEN,
burst_limit_enable => ?BURST_LIMIT_ENABLE,
@@ -439,10 +453,12 @@ get_init_state() ->
overload_kill_qlen => ?OVERLOAD_KILL_QLEN,
overload_kill_mem_size => ?OVERLOAD_KILL_MEM_SIZE,
overload_kill_restart_after => ?OVERLOAD_KILL_RESTART_AFTER,
- file_ctrl_sync_int => ?CONTROLLER_SYNC_INTERVAL,
- filesync_ok_qlen => ?FILESYNC_OK_QLEN,
filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}.
+get_init_state() ->
+ #{file_ctrl_sync_int => ?CONTROLLER_SYNC_INTERVAL,
+ filesync_ok_qlen => ?FILESYNC_OK_QLEN}.
+
%%%-----------------------------------------------------------------
%%% Add a standard handler to the logger.
%%% This starts a dedicated handler process which should always
@@ -825,4 +841,3 @@ sync_dev(Fd, DevName, PrevSyncResult, HandlerName) ->
logger_h_common:error_notify({HandlerName,filesync,DevName,Error}),
Error
end.
-
diff --git a/lib/kernel/test/gen_tcp_misc_SUITE.erl b/lib/kernel/test/gen_tcp_misc_SUITE.erl
index 194522c009..04c0c48e3a 100644
--- a/lib/kernel/test/gen_tcp_misc_SUITE.erl
+++ b/lib/kernel/test/gen_tcp_misc_SUITE.erl
@@ -52,7 +52,8 @@
several_accepts_in_one_go/1, accept_system_limit/1,
active_once_closed/1, send_timeout/1, send_timeout_active/1,
otp_7731/1, zombie_sockets/1, otp_7816/1, otp_8102/1,
- wrapping_oct/0, wrapping_oct/1, otp_9389/1, otp_13939/1]).
+ wrapping_oct/0, wrapping_oct/1, otp_9389/1, otp_13939/1,
+ otp_12242/1]).
%% Internal exports.
-export([sender/3, not_owner/1, passive_sockets_server/2, priority_server/1,
@@ -95,7 +96,8 @@ all() ->
killing_multi_acceptors2, several_accepts_in_one_go, accept_system_limit,
active_once_closed, send_timeout, send_timeout_active, otp_7731,
wrapping_oct,
- zombie_sockets, otp_7816, otp_8102, otp_9389].
+ zombie_sockets, otp_7816, otp_8102, otp_9389,
+ otp_12242].
groups() ->
[].
@@ -3284,3 +3286,143 @@ otp_13939(Config) when is_list(Config) ->
exit(Pid, normal),
ct:fail("Server process blocked on send.")
end.
+
+otp_12242(Config) when is_list(Config) ->
+ case os:type() of
+ {win32,_} ->
+ %% Even if we set sndbuf and recbuf to small sizes
+ %% Windows either happily accepts to send GBytes of data
+ %% in no time, so the second send below that is supposed
+ %% to time out just succedes, or the first send that
+ %% is supposed to fill the inet_drv I/O queue and
+ %% start waiting for when more data can be sent
+ %% instead sends all data but suffers a send
+ %% failure that closes the socket
+ {skipped,backpressure_broken_on_win32};
+ _ ->
+ %% Find the IPv4 address of an up and running interface
+ %% that is not loopback nor pointtopoint
+ {ok,IFList} = inet:getifaddrs(),
+ ct:pal("IFList ~p~n", [IFList]),
+ case
+ lists:flatten(
+ [lists:filtermap(
+ fun ({addr,Addr}) when tuple_size(Addr) =:= 4 ->
+ {true,Addr};
+ (_) ->
+ false
+ end, Opts)
+ || {_,Opts} <- IFList,
+ case lists:keyfind(flags, 1, Opts) of
+ {_,Flags} ->
+ lists:member(up, Flags)
+ andalso
+ lists:member(running, Flags)
+ andalso
+ not lists:member(loopback, Flags)
+ andalso
+ not lists:member(pointtopoint, Flags);
+ false ->
+ false
+ end])
+ of
+ [Addr|_] ->
+ otp_12242(Addr);
+ Other ->
+ {skipped,{no_external_address,Other}}
+ end
+ end;
+%%
+otp_12242(Addr) when tuple_size(Addr) =:= 4 ->
+ ct:timetrap(30000),
+ ct:pal("Using address ~p~n", [Addr]),
+ Bufsize = 16 * 1024,
+ Datasize = 128 * 1024 * 1024, % At least 1 s on GBit interface
+ Blob = binary:copy(<<$x>>, Datasize),
+ LOpts =
+ [{backlog,4},{reuseaddr,true},{ip,Addr},
+ binary,{active,false},
+ {recbuf,Bufsize},{sndbuf,Bufsize},{buffer,Bufsize}],
+ COpts =
+ [binary,{active,false},{ip,Addr},
+ {linger,{true,1}}, % 1 s
+ {send_timeout,500},
+ {recbuf,Bufsize},{sndbuf,Bufsize},{buffer,Bufsize}],
+ Dir = filename:dirname(code:which(?MODULE)),
+ {ok,ListenerNode} =
+ test_server:start_node(
+ ?UNIQ_NODE_NAME, slave, [{args,"-pa " ++ Dir}]),
+ Tester = self(),
+ Listener =
+ spawn(
+ ListenerNode,
+ fun () ->
+ {ok,L} = gen_tcp:listen(0, LOpts),
+ {ok,LPort} = inet:port(L),
+ Tester ! {self(),port,LPort},
+ {ok,A} = gen_tcp:accept(L),
+ ok = gen_tcp:close(L),
+ receive
+ {Tester,stop} ->
+ ok = gen_tcp:close(A)
+ end
+ end),
+ ListenerMref = monitor(process, Listener),
+ LPort = receive {Listener,port,P} -> P end,
+ {ok,C} = gen_tcp:connect(Addr, LPort, COpts, infinity),
+ {ok,ReadCOpts} = inet:getopts(C, [recbuf,sndbuf,buffer]),
+ ct:pal("ReadCOpts ~p~n", [ReadCOpts]),
+ %%
+ %% Fill the buffers
+ ct:pal("Sending ~p bytes~n", [Datasize]),
+ ok = gen_tcp:send(C, Blob),
+ ct:pal("Sent ~p bytes~n", [Datasize]),
+ %% Spawn the Closer,
+ %% try to ensure that the close call is in progress
+ %% before the owner proceeds with sending
+ Owner = self(),
+ {_Closer,CloserMref} =
+ spawn_opt(
+ fun () ->
+ Owner ! {tref, erlang:start_timer(50, Owner, closing)},
+ ct:pal("Calling gen_tcp:close(C)~n"),
+ try gen_tcp:close(C) of
+ Result ->
+ ct:pal("gen_tcp:close(C) -> ~p~n", [Result]),
+ ok = Result
+ catch
+ Class:Reason:Stacktrace ->
+ ct:pal(
+ "gen_tcp:close(C) >< ~p:~p~n ~p~n",
+ [Class,Reason,Stacktrace]),
+ erlang:raise(Class, Reason, Stacktrace)
+ end
+ end, [link,monitor]),
+ receive
+ {tref,Tref} ->
+ receive {timeout,Tref,_} -> ok end,
+ ct:pal("Sending ~p bytes again~n", [Datasize]),
+ %% Now should the close be in progress...
+ %% All buffers are full, remote end is not reading,
+ %% and the send timeout is 1 s so this will timeout:
+ {error,timeout} = gen_tcp:send(C, Blob),
+ ct:pal("Sending ~p bytes again timed out~n", [Datasize]),
+ ok = inet:setopts(C, [{send_timeout,10000}]),
+ %% There is a hidden timeout here. Port close is sampled
+ %% every 5 s by prim_inet:send_recv_reply.
+ %% Linger is 3 s so the Closer will finish this send:
+ ct:pal("Sending ~p bytes with 10 s timeout~n", [Datasize]),
+ {error,closed} = gen_tcp:send(C, Blob),
+ ct:pal("Sending ~p bytes with 10 s timeout was closed~n",
+ [Datasize]),
+ normal = wait(CloserMref),
+ ct:pal("The Closer has exited~n"),
+ Listener ! {Tester,stop},
+ receive {'DOWN',ListenerMref,_,_,_} -> ok end,
+ ct:pal("The Listener has exited~n"),
+ test_server:stop_node(ListenerNode),
+ ok
+ end.
+
+wait(Mref) ->
+ receive {'DOWN',Mref,_,_,Reason} -> Reason end.
diff --git a/lib/kernel/test/logger_SUITE.erl b/lib/kernel/test/logger_SUITE.erl
index b7ccba8e70..d831d0d108 100644
--- a/lib/kernel/test/logger_SUITE.erl
+++ b/lib/kernel/test/logger_SUITE.erl
@@ -246,6 +246,18 @@ change_config(_Config) ->
{ok,C4} = logger:get_handler_config(h1),
C4 = C3#{custom:=new_custom},
+ %% Change handler config: Id and module can not be changed
+ {error,{illegal_config_change,Old,New}} =
+ logger:set_handler_config(h1,id,newid),
+ %% Check that only the faulty field is included in return
+ [{id,h1}] = maps:to_list(Old),
+ [{id,newid}] = maps:to_list(New),
+ %% Check that both fields are included when both are changed
+ {error,{illegal_config_change,
+ #{id:=h1,module:=?MODULE},
+ #{id:=newid,module:=newmodule}}} =
+ logger:set_handler_config(h1,#{id=>newid,module=>newmodule}),
+
%% Change primary config: Single key
PConfig0 = logger:get_primary_config(),
ok = logger:set_primary_config(level,warning),
diff --git a/lib/kernel/test/logger_disk_log_h_SUITE.erl b/lib/kernel/test/logger_disk_log_h_SUITE.erl
index a815db14e9..905c2c52c5 100644
--- a/lib/kernel/test/logger_disk_log_h_SUITE.erl
+++ b/lib/kernel/test/logger_disk_log_h_SUITE.erl
@@ -92,6 +92,7 @@ all() ->
disk_log_opts,
default_formatter,
logging,
+ filter_config,
errors,
formatter_fail,
config_fail,
@@ -302,6 +303,20 @@ logging(cleanup, _Config) ->
Name = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
remove_and_stop(Name).
+filter_config(_Config) ->
+ ok = logger:add_handler(?MODULE,logger_disk_log_h,#{}),
+ {ok,#{config:=HConfig}=Config} = logger:get_handler_config(?MODULE),
+ HConfig = maps:without([handler_pid,mode_tab],HConfig),
+
+ FakeFullHConfig = HConfig#{handler_pid=>self(),mode_tab=>erlang:make_ref()},
+ #{config:=HConfig} =
+ logger_disk_log_h:filter_config(Config#{config=>FakeFullHConfig}),
+ ok.
+
+filter_config(cleanup,_Config) ->
+ logger:remove_handler(?MODULE),
+ ok.
+
errors(Config) ->
PrivDir = ?config(priv_dir,Config),
Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
@@ -316,13 +331,29 @@ errors(Config) ->
%%! TODO:
%%! Check how bad log_opts are handled!
- {error,{illegal_config_change,_,_}} =
- logger:set_handler_config(Name1,
- config,
- #{file=>LogFile1,
- type=>halt}),
- {error,{illegal_config_change,_,_}} =
- logger:set_handler_config(Name1,id,new),
+ {error,{illegal_config_change,
+ #{config:=#{type:=wrap}},
+ #{config:=#{type:=halt}}}} =
+ logger:update_handler_config(Name1,
+ config,
+ #{type=>halt,
+ file=>LogFile1}),
+
+ {error,{illegal_config_change,
+ #{config:=#{file:=LogFile1}},
+ #{config:=#{file:="newfilename"}}}} =
+ logger:update_handler_config(Name1,
+ config,
+ #{file=>"newfilename"}),
+
+ %% Read-only fields may (accidentially) be included in the change,
+ %% but it won't take effect
+ {ok,C} = logger:get_handler_config(Name1),
+ ok = logger:set_handler_config(Name1,config,
+ #{handler_pid=>self(),
+ mode_tab=>erlang:make_ref()}),
+ {ok,C} = logger:get_handler_config(Name1),
+
ok = logger:remove_handler(Name1),
{error,{not_found,Name1}} = logger:remove_handler(Name1),
@@ -403,21 +434,21 @@ config_fail(_Config) ->
formatter=>{?MODULE,self()}}),
%% can't change the disk log options for a log already in use
{error,{illegal_config_change,_,_}} =
- logger:set_handler_config(?MODULE,config,
- #{max_no_files=>2}),
+ logger:update_handler_config(?MODULE,config,
+ #{max_no_files=>2}),
%% can't change name of an existing handler
{error,{illegal_config_change,_,_}} =
- logger:set_handler_config(?MODULE,id,bad),
+ logger:update_handler_config(?MODULE,id,bad),
%% incorrect values of OP params
{ok,#{config := HConfig}} = logger:get_handler_config(?MODULE),
{error,{invalid_levels,_}} =
- logger:set_handler_config(?MODULE,config,
- HConfig#{sync_mode_qlen=>100,
- flush_qlen=>99}),
+ logger:update_handler_config(?MODULE,config,
+ HConfig#{sync_mode_qlen=>100,
+ flush_qlen=>99}),
%% invalid name of config parameter
{error,{invalid_config,logger_disk_log_h,{filesync_rep_int,2000}}} =
- logger:set_handler_config(?MODULE, config,
- HConfig#{filesync_rep_int => 2000}),
+ logger:update_handler_config(?MODULE, config,
+ HConfig#{filesync_rep_int => 2000}),
ok.
config_fail(cleanup,_Config) ->
logger:remove_handler(?MODULE).
@@ -459,10 +490,26 @@ reconfig(Config) ->
log_opts := #{type := ?DISK_LOG_TYPE,
max_no_files := ?DISK_LOG_MAX_NO_FILES,
max_no_bytes := ?DISK_LOG_MAX_NO_BYTES,
- file := _DiskLogFile}} =
+ file := DiskLogFile}} =
logger_disk_log_h:info(?MODULE),
+ {ok,#{config :=
+ #{sync_mode_qlen := ?SYNC_MODE_QLEN,
+ drop_mode_qlen := ?DROP_MODE_QLEN,
+ flush_qlen := ?FLUSH_QLEN,
+ burst_limit_enable := ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable := ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
+ filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL,
+ file := DiskLogFile,
+ max_no_files := ?DISK_LOG_MAX_NO_FILES,
+ max_no_bytes := ?DISK_LOG_MAX_NO_BYTES,
+ type := wrap} = HConfig0}} =
+ logger:get_handler_config(?MODULE),
- {ok,#{config := HConfig0}} = logger:get_handler_config(?MODULE),
HConfig1 = HConfig0#{sync_mode_qlen => 1,
drop_mode_qlen => 2,
flush_qlen => 3,
@@ -488,6 +535,29 @@ reconfig(Config) ->
overload_kill_restart_after := infinity,
filesync_repeat_interval := no_repeat} =
logger_disk_log_h:info(?MODULE),
+ {ok,#{config:=HConfig1}} = logger:get_handler_config(?MODULE),
+
+ ok = logger:update_handler_config(?MODULE, config,
+ #{flush_qlen => ?FLUSH_QLEN}),
+ {ok,#{config:=C1}} = logger:get_handler_config(?MODULE),
+ ct:log("C1: ~p",[C1]),
+ C1 = HConfig1#{flush_qlen => ?FLUSH_QLEN},
+
+ ok = logger:set_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C2}} = logger:get_handler_config(?MODULE),
+ ct:log("C2: ~p",[C2]),
+ C2 = HConfig0#{sync_mode_qlen => 1},
+
+ ok = logger:set_handler_config(?MODULE, config, #{drop_mode_qlen => 100}),
+ {ok,#{config:=C3}} = logger:get_handler_config(?MODULE),
+ ct:log("C3: ~p",[C3]),
+ C3 = HConfig0#{drop_mode_qlen => 100},
+
+ ok = logger:update_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C4}} = logger:get_handler_config(?MODULE),
+ ct:log("C4: ~p",[C4]),
+ C4 = HConfig0#{sync_mode_qlen => 1,
+ drop_mode_qlen => 100},
ok = logger:remove_handler(?MODULE),
@@ -507,6 +577,43 @@ reconfig(Config) ->
max_no_bytes := 1024,
file := File}} =
logger_disk_log_h:info(?MODULE),
+ {ok,#{config :=
+ #{type := halt,
+ max_no_files := 1,
+ max_no_bytes := 1024,
+ file := File}=HaltHConfig} = Config2} =
+ logger:get_handler_config(?MODULE),
+
+ ok = logger:update_handler_config(?MODULE, level, notice),
+ {ok,C5} = logger:get_handler_config(?MODULE),
+ ct:log("C5: ~p",[C5]),
+ C5 = Config2#{level => notice},
+
+ ok = logger:set_handler_config(?MODULE, level, info),
+ {ok,C6} = logger:get_handler_config(?MODULE),
+ ct:log("C6: ~p",[C6]),
+ C6 = Config2#{level => info},
+
+ %% You are not allowed to actively set the write once fields
+ %% (type, max_no_files, max_no_bytes, file) in runtime.
+ {error, {illegal_config_change,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{type=>wrap}),
+ {error, {illegal_config_change,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{max_no_files=>2}),
+ {error, {illegal_config_change,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{max_no_bytes=>2048}),
+ {error, {illegal_config_change,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{file=>"otherfile.log"}),
+ {ok,C7} = logger:get_handler_config(?MODULE),
+ ct:log("C7: ~p",[C7]),
+ C7 = C6,
+
+ %% ... but if you don't specify the write once fields, then
+ %% set_handler_config shall NOT reset them to their default value
+ ok = logger:set_handler_config(?MODULE,config,#{sync_mode_qlen=>1}),
+ {ok,#{config:=C8}} = logger:get_handler_config(?MODULE),
+ ct:log("C8: ~p",[C8]),
+ C8 = HaltHConfig#{sync_mode_qlen=>1},
ok.
reconfig(cleanup, _Config) ->
@@ -536,7 +643,7 @@ sync(Config) ->
%% a disk_log_sync is still performed when handler goes idle
{ok,#{config := HConfig}} = logger:get_handler_config(?MODULE),
HConfig1 = HConfig#{filesync_repeat_interval => no_repeat},
- ok = logger:set_handler_config(?MODULE, config, HConfig1),
+ ok = logger:update_handler_config(?MODULE, config, HConfig1),
no_repeat = maps:get(filesync_repeat_interval,
logger_disk_log_h:info(?MODULE)),
@@ -569,13 +676,13 @@ sync(Config) ->
[OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]),
HConfig2 = HConfig#{filesync_repeat_interval => SyncInt},
- ok = logger:set_handler_config(?MODULE, config, HConfig2),
+ ok = logger:update_handler_config(?MODULE, config, HConfig2),
SyncInt = maps:get(filesync_repeat_interval,
logger_disk_log_h:info(?MODULE)),
timer:sleep(WaitT),
HConfig3 = HConfig#{filesync_repeat_interval => no_repeat},
- ok = logger:set_handler_config(?MODULE, config, HConfig3),
+ ok = logger:update_handler_config(?MODULE, config, HConfig3),
check_tracer(100),
ok.
sync(cleanup,_Config) ->
@@ -797,7 +904,7 @@ sync_failure(Config) ->
LogOpts = maps:get(log_opts, HState),
SyncInt = 500,
- ok = rpc:call(Node, logger, set_handler_config,
+ ok = rpc:call(Node, logger, update_handler_config,
[?STANDARD_HANDLER, config,
#{filesync_repeat_interval => SyncInt}]),
Info = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]),
@@ -872,7 +979,7 @@ op_switch_to_sync(Config) ->
drop_mode_qlen => NumOfReqs+1,
flush_qlen => 2*NumOfReqs,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Lines = count_lines(Log),
NumOfReqs = Lines,
@@ -897,7 +1004,7 @@ op_switch_to_drop(Config) ->
drop_mode_qlen => 2,
flush_qlen => Procs*NumOfReqs*Bursts,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
%% It sometimes happens that the handler either gets
%% the requests in a slow enough pace so that dropping
%% never occurs. Therefore, lets generate a number of
@@ -943,7 +1050,7 @@ op_switch_to_flush(Config) ->
drop_mode_qlen => 300,
flush_qlen => 300,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 1500,
Procs = 10,
Bursts = 10,
@@ -985,7 +1092,7 @@ limit_burst_disabled(Config) ->
burst_limit_window_time => 2000,
drop_mode_qlen => 200,
flush_qlen => 300}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -1005,7 +1112,7 @@ limit_burst_enabled_one(Config) ->
burst_limit_window_time => 2000,
drop_mode_qlen => 200,
flush_qlen => 300}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -1026,7 +1133,7 @@ limit_burst_enabled_period(Config) ->
burst_limit_window_time => BurstTWin,
drop_mode_qlen => 20000,
flush_qlen => 20001}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
Windows = 3,
Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, notice),
@@ -1046,7 +1153,7 @@ kill_disabled(Config) ->
HConfig#{config=>DLHConfig#{overload_kill_enable=>false,
overload_kill_qlen=>10,
overload_kill_mem_size=>100}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -1068,7 +1175,7 @@ qlen_kill_new(Config) ->
overload_kill_qlen=>10,
overload_kill_mem_size=>Mem0+50000,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
MRef = erlang:monitor(process, Pid0),
NumOfReqs = 100,
Procs = 4,
@@ -1105,7 +1212,7 @@ mem_kill_new(Config) ->
overload_kill_qlen=>50000,
overload_kill_mem_size=>Mem0+500,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
MRef = erlang:monitor(process, Pid0),
NumOfReqs = 100,
Procs = 4,
@@ -1139,7 +1246,7 @@ restart_after(Config) ->
HConfig#{config=>DLHConfig#{overload_kill_enable=>true,
overload_kill_qlen=>10,
overload_kill_restart_after=>infinity}},
- ok = logger:set_handler_config(?MODULE, NewHConfig1),
+ ok = logger:update_handler_config(?MODULE, NewHConfig1),
MRef1 = erlang:monitor(process, whereis(h_proc_name())),
%% kill handler
send_burst({n,100}, {spawn,4,0}, {chars,79}, notice),
@@ -1161,7 +1268,7 @@ restart_after(Config) ->
HConfig#{config=>DLHConfig#{overload_kill_enable=>true,
overload_kill_qlen=>10,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig2),
+ ok = logger:update_handler_config(?MODULE, NewHConfig2),
Pid0 = whereis(h_proc_name()),
MRef2 = erlang:monitor(process, Pid0),
%% kill handler
@@ -1194,7 +1301,7 @@ handler_requests_under_load(Config) ->
drop_mode_qlen => 1000,
flush_qlen => 2000,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]},
{info,[]},
{reset,[]},
@@ -1227,9 +1334,9 @@ send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) ->
Result =
case Req of
change_config ->
- logger:set_handler_config(HName, logger_disk_log_h,
- #{overload_kill_enable =>
- false});
+ logger:update_handler_config(HName, logger_disk_log_h,
+ #{overload_kill_enable =>
+ false});
Func ->
logger_disk_log_h:Func(HName)
end,
diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl
index 3426567bbf..b6a09f4980 100644
--- a/lib/kernel/test/logger_std_h_SUITE.erl
+++ b/lib/kernel/test/logger_std_h_SUITE.erl
@@ -108,6 +108,7 @@ all() ->
add_remove_instance_file1,
add_remove_instance_file2,
default_formatter,
+ filter_config,
errors,
formatter_fail,
config_fail,
@@ -204,6 +205,20 @@ default_formatter(_Config) ->
match = re:run(Msg,"=NOTICE REPORT====.*\n"++M1,[{capture,none}]),
ok.
+filter_config(_Config) ->
+ ok = logger:add_handler(?MODULE,logger_std_h,#{}),
+ {ok,#{config:=HConfig}=Config} = logger:get_handler_config(?MODULE),
+ HConfig = maps:without([handler_pid,mode_tab],HConfig),
+
+ FakeFullHConfig = HConfig#{handler_pid=>self(),mode_tab=>erlang:make_ref()},
+ #{config:=HConfig} =
+ logger_std_h:filter_config(Config#{config=>FakeFullHConfig}),
+ ok.
+
+filter_config(cleanup,_Config) ->
+ logger:remove_handler(?MODULE),
+ ok.
+
errors(Config) ->
Dir = ?config(priv_dir,Config),
Log = filename:join(Dir,?FUNCTION_NAME),
@@ -319,11 +334,10 @@ config_fail(_Config) ->
ok = logger:add_handler(?MODULE,logger_std_h,
#{filter_default=>log,
formatter=>{?MODULE,self()}}),
- {error,{illegal_config_change,_,_}} =
+ {error,{illegal_config_change,#{config:=#{type:=_}},#{config:=#{type:=_}}}} =
logger:set_handler_config(?MODULE,config,
#{type=>{file,"file"}}),
- {error,{illegal_config_change,_,_}} =
- logger:set_handler_config(?MODULE,id,bad),
+
{error,{invalid_levels,_}} =
logger:set_handler_config(?MODULE,config,
#{sync_mode_qlen=>100,
@@ -331,6 +345,15 @@ config_fail(_Config) ->
{error,{invalid_config,logger_std_h,{filesync_rep_int,2000}}} =
logger:set_handler_config(?MODULE, config,
#{filesync_rep_int => 2000}),
+
+ %% Read-only fields may (accidentially) be included in the change,
+ %% but it won't take effect
+ {ok,C} = logger:get_handler_config(?MODULE),
+ ok = logger:set_handler_config(?MODULE,config,
+ #{handler_pid=>self(),
+ mode_tab=>erlang:make_ref()}),
+ {ok,C} = logger:get_handler_config(?MODULE),
+
ok.
config_fail(cleanup,_Config) ->
@@ -457,9 +480,26 @@ reconfig(Config) ->
overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
- filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL} =
+ filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL} = DefaultInfo =
logger_std_h:info(?MODULE),
+ {ok,
+ #{config:=
+ #{type := standard_io,
+ sync_mode_qlen := ?SYNC_MODE_QLEN,
+ drop_mode_qlen := ?DROP_MODE_QLEN,
+ flush_qlen := ?FLUSH_QLEN,
+ burst_limit_enable := ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable := ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
+ filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL} =
+ DefaultHConf}}
+ = logger:get_handler_config(?MODULE),
+
ok = logger:set_handler_config(?MODULE, config,
#{sync_mode_qlen => 1,
drop_mode_qlen => 2,
@@ -485,7 +525,77 @@ reconfig(Config) ->
overload_kill_qlen := 100000,
overload_kill_mem_size := 10000000,
overload_kill_restart_after := infinity,
- filesync_repeat_interval := no_repeat} = logger_std_h:info(?MODULE),
+ filesync_repeat_interval := no_repeat} = Info = logger_std_h:info(?MODULE),
+
+ {ok,#{config :=
+ #{type := standard_io,
+ sync_mode_qlen := 1,
+ drop_mode_qlen := 2,
+ flush_qlen := 3,
+ burst_limit_enable := false,
+ burst_limit_max_count := 10,
+ burst_limit_window_time := 10,
+ overload_kill_enable := true,
+ overload_kill_qlen := 100000,
+ overload_kill_mem_size := 10000000,
+ overload_kill_restart_after := infinity,
+ filesync_repeat_interval := no_repeat} = HConf}} =
+ logger:get_handler_config(?MODULE),
+
+ ok = logger:update_handler_config(?MODULE, config,
+ #{flush_qlen => ?FLUSH_QLEN}),
+ {ok,#{config:=C1}} = logger:get_handler_config(?MODULE),
+ ct:log("C1: ~p",[C1]),
+ C1 = HConf#{flush_qlen => ?FLUSH_QLEN},
+
+ ok = logger:set_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C2}} = logger:get_handler_config(?MODULE),
+ ct:log("C2: ~p",[C2]),
+ C2 = DefaultHConf#{sync_mode_qlen => 1},
+
+ ok = logger:set_handler_config(?MODULE, config, #{drop_mode_qlen => 100}),
+ {ok,#{config:=C3}} = logger:get_handler_config(?MODULE),
+ ct:log("C3: ~p",[C3]),
+ C3 = DefaultHConf#{drop_mode_qlen => 100},
+
+ ok = logger:update_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C4}} = logger:get_handler_config(?MODULE),
+ ct:log("C4: ~p",[C4]),
+ C4 = DefaultHConf#{sync_mode_qlen => 1,
+ drop_mode_qlen => 100},
+
+ ok = logger:remove_handler(?MODULE),
+
+ File = filename:join(Dir,lists:concat([?FUNCTION_NAME,".log"])),
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => {file,File}},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+
+ {ok,#{config:=#{filesync_repeat_interval:=FSI}=FileHConfig}} =
+ logger:get_handler_config(?MODULE),
+ ok = logger:update_handler_config(?MODULE,config,
+ #{filesync_repeat_interval=>FSI+2000}),
+ {ok,#{config:=C5}} = logger:get_handler_config(?MODULE),
+ ct:log("C5: ~p",[C5]),
+ C5 = FileHConfig#{filesync_repeat_interval=>FSI+2000},
+
+ %% You are not allowed to actively set 'type' in runtime, since
+ %% this is a write once field.
+ {error, {illegal_config_change,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{type=>standard_io}),
+ {ok,#{config:=C6}} = logger:get_handler_config(?MODULE),
+ ct:log("C6: ~p",[C6]),
+ C6 = C5,
+
+ %% ... but if you don't specify 'type', then set_handler_config shall
+ %% NOT reset it to its default value
+ ok = logger:set_handler_config(?MODULE,config,#{sync_mode_qlen=>1}),
+ {ok,#{config:=C7}} = logger:get_handler_config(?MODULE),
+ ct:log("C7: ~p",[C7]),
+ C7 = FileHConfig#{sync_mode_qlen=>1},
ok.
reconfig(cleanup, _Config) ->
@@ -561,8 +671,8 @@ sync(Config) ->
%% check that if there's no repeated filesync active,
%% a filesync is still performed when handler goes idle
- logger:set_handler_config(?MODULE, config,
- #{filesync_repeat_interval => no_repeat}),
+ ok = logger:update_handler_config(?MODULE, config,
+ #{filesync_repeat_interval => no_repeat}),
no_repeat = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)),
%% The following timer is to make sure the time from last log
%% ("second") to next ("third") is long enough, so the a flush is
@@ -592,12 +702,12 @@ sync(Config) ->
start_tracer([{logger_std_h,handle_cast,2}],
[OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]),
- logger:set_handler_config(?MODULE, config,
- #{filesync_repeat_interval => SyncInt}),
+ ok = logger:update_handler_config(?MODULE, config,
+ #{filesync_repeat_interval => SyncInt}),
SyncInt = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)),
timer:sleep(WaitT),
- logger:set_handler_config(?MODULE, config,
- #{filesync_repeat_interval => no_repeat}),
+ ok = logger:update_handler_config(?MODULE, config,
+ #{filesync_repeat_interval => no_repeat}),
check_tracer(100),
ok.
sync(cleanup, _Config) ->
@@ -652,7 +762,7 @@ sync_failure(Config) ->
rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]),
SyncInt = 500,
- ok = rpc:call(Node, logger, set_handler_config,
+ ok = rpc:call(Node, logger, update_handler_config,
[?STANDARD_HANDLER, config,
#{filesync_repeat_interval => SyncInt}]),
Info = rpc:call(Node, logger_std_h, info, [?STANDARD_HANDLER]),
@@ -718,7 +828,7 @@ op_switch_to_sync_file(Config) ->
drop_mode_qlen => NumOfReqs+1,
flush_qlen => 2*NumOfReqs,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
%% TRecvPid = start_op_trace(),
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Lines = count_lines(Log),
@@ -747,7 +857,7 @@ op_switch_to_sync_tty(Config) ->
drop_mode_qlen => NumOfReqs+1,
flush_qlen => 2*NumOfReqs,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
ok.
op_switch_to_sync_tty(cleanup, _Config) ->
@@ -770,7 +880,7 @@ op_switch_to_drop_file(Config) ->
flush_qlen =>
Procs*NumOfReqs*Bursts,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
%% It sometimes happens that the handler gets the
%% requests in a slow enough pace so that dropping
%% never occurs. Therefore, lets generate a number of
@@ -807,7 +917,7 @@ op_switch_to_drop_tty(Config) ->
flush_qlen =>
Procs*NumOfReqs+1,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
ok.
op_switch_to_drop_tty(cleanup, _Config) ->
@@ -832,7 +942,7 @@ op_switch_to_flush_file(Config) ->
drop_mode_qlen => 300,
flush_qlen => 300,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 1500,
Procs = 10,
Bursts = 10,
@@ -879,7 +989,7 @@ op_switch_to_flush_tty(Config) ->
drop_mode_qlen => 100,
flush_qlen => 100,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 1000,
Procs = 100,
send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
@@ -895,7 +1005,7 @@ limit_burst_disabled(Config) ->
burst_limit_window_time => 2000,
drop_mode_qlen => 200,
flush_qlen => 300}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -915,7 +1025,7 @@ limit_burst_enabled_one(Config) ->
burst_limit_window_time => 2000,
drop_mode_qlen => 200,
flush_qlen => 300}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -936,7 +1046,7 @@ limit_burst_enabled_period(Config) ->
burst_limit_window_time => BurstTWin,
drop_mode_qlen => 20000,
flush_qlen => 20001}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
Windows = 3,
Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, notice),
@@ -956,7 +1066,7 @@ kill_disabled(Config) ->
HConfig#{config=>StdHConfig#{overload_kill_enable=>false,
overload_kill_qlen=>10,
overload_kill_mem_size=>100}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -977,7 +1087,7 @@ qlen_kill_new(Config) ->
overload_kill_qlen=>10,
overload_kill_mem_size=>Mem0+50000,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
MRef = erlang:monitor(process, Pid0),
NumOfReqs = 100,
Procs = 4,
@@ -1011,7 +1121,7 @@ qlen_kill_std(_Config) ->
%% File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]),
%% Log = filename:join(Dir, File),
%% Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log),
- %% ok = rpc:call(Node, logger, set_handler_config,
+ %% ok = rpc:call(Node, logger, update_handler_config,
%% [?STANDARD_HANDLER, config,
%% #{overload_kill_enable=>true,
%% overload_kill_qlen=>10,
@@ -1028,7 +1138,7 @@ mem_kill_new(Config) ->
overload_kill_qlen=>50000,
overload_kill_mem_size=>Mem0+500,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
MRef = erlang:monitor(process, Pid0),
NumOfReqs = 100,
Procs = 4,
@@ -1067,7 +1177,7 @@ restart_after(Config) ->
HConfig#{config=>StdHConfig#{overload_kill_enable=>true,
overload_kill_qlen=>10,
overload_kill_restart_after=>infinity}},
- ok = logger:set_handler_config(?MODULE, NewHConfig1),
+ ok = logger:update_handler_config(?MODULE, NewHConfig1),
MRef1 = erlang:monitor(process, whereis(h_proc_name())),
%% kill handler
send_burst({n,100}, {spawn,4,0}, {chars,79}, notice),
@@ -1082,14 +1192,15 @@ restart_after(Config) ->
ct:pal("Handler state = ~p", [Info1]),
ct:fail("Handler not dead! It should not have survived this!")
end,
-
+
{Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER,
+
NewHConfig2 =
HConfig#{config=>StdHConfig#{overload_kill_enable=>true,
overload_kill_qlen=>10,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig2),
+ ok = logger:update_handler_config(?MODULE, NewHConfig2),
Pid0 = whereis(h_proc_name()),
MRef2 = erlang:monitor(process, Pid0),
%% kill handler
@@ -1123,7 +1234,7 @@ handler_requests_under_load(Config) ->
drop_mode_qlen => 1000,
flush_qlen => 2000,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]},
{info,[]},
{reset,[]},
@@ -1155,9 +1266,9 @@ send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) ->
Result =
case Req of
change_config ->
- logger:set_handler_config(HName, config,
- #{overload_kill_enable =>
- false});
+ logger:update_handler_config(HName, config,
+ #{overload_kill_enable =>
+ false});
Func ->
logger_std_h:Func(HName)
end,
diff --git a/lib/observer/src/Makefile b/lib/observer/src/Makefile
index ff2bcbdb99..f9f239db37 100644
--- a/lib/observer/src/Makefile
+++ b/lib/observer/src/Makefile
@@ -50,6 +50,7 @@ MODULES= \
cdv_mem_cb \
cdv_mod_cb \
cdv_multi_wx \
+ cdv_persistent_cb \
cdv_port_cb \
cdv_proc_cb \
cdv_sched_cb \
diff --git a/lib/observer/src/cdv_html_wx.erl b/lib/observer/src/cdv_html_wx.erl
index d9efa7fc2f..ffef83227c 100644
--- a/lib/observer/src/cdv_html_wx.erl
+++ b/lib/observer/src/cdv_html_wx.erl
@@ -33,13 +33,17 @@
{panel,
app, %% which tool is the user
expand_table,
- expand_wins=[]}).
+ expand_wins=[],
+ delayed_fetch,
+ trunc_warn=[]}).
start_link(ParentWin, Info) ->
wx_object:start_link(?MODULE, [ParentWin, Info], []).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+init([ParentWin, Callback]) when is_atom(Callback) ->
+ init(ParentWin, Callback);
init([ParentWin, {App, Fun}]) when is_function(Fun) ->
init([ParentWin, {App, Fun()}]);
init([ParentWin, {expand,HtmlText,Tab}]) ->
@@ -60,9 +64,29 @@ init(ParentWin, HtmlText, Tab, App) ->
wx_misc:endBusyCursor(),
{HtmlWin, #state{panel=HtmlWin,expand_table=Tab,app=App}}.
+init(ParentWin, Callback) ->
+ {HtmlWin, State} = init(ParentWin, "", undefined, cdv),
+ {HtmlWin, State#state{delayed_fetch=Callback}}.
+
%%%%%%%%%%%%%%%%%%%%%%% Callbacks %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+handle_info(active, #state{panel=HtmlWin,delayed_fetch=Callback}=State)
+ when Callback=/=undefined ->
+ observer_lib:display_progress_dialog(HtmlWin,
+ "Crashdump Viewer",
+ "Reading data"),
+ {{expand,HtmlText,Tab},TW} = Callback:get_info(),
+ observer_lib:sync_destroy_progress_dialog(),
+ wx_misc:beginBusyCursor(),
+ wxHtmlWindow:setPage(HtmlWin,HtmlText),
+ cdv_wx:set_status(TW),
+ wx_misc:endBusyCursor(),
+ {noreply, State#state{expand_table=Tab,
+ delayed_fetch=undefined,
+ trunc_warn=TW}};
+
handle_info(active, State) ->
+ cdv_wx:set_status(State#state.trunc_warn),
{noreply, State};
handle_info(Info, State) ->
diff --git a/lib/observer/src/cdv_persistent_cb.erl b/lib/observer/src/cdv_persistent_cb.erl
new file mode 100644
index 0000000000..d5da18f7fc
--- /dev/null
+++ b/lib/observer/src/cdv_persistent_cb.erl
@@ -0,0 +1,32 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+
+-module(cdv_persistent_cb).
+
+-export([get_info/0]).
+
+-include_lib("wx/include/wx.hrl").
+
+get_info() ->
+ Tab = ets:new(pt_expand,[set,public]),
+ {ok,PT,TW} = crashdump_viewer:persistent_terms(),
+ {{expand,
+ observer_html_lib:expandable_term("Persistent Terms",PT,Tab),
+ Tab},
+ TW}.
diff --git a/lib/observer/src/cdv_wx.erl b/lib/observer/src/cdv_wx.erl
index 78a897111c..1e9cef8952 100644
--- a/lib/observer/src/cdv_wx.erl
+++ b/lib/observer/src/cdv_wx.erl
@@ -51,6 +51,7 @@
-define(DIST_STR, "Nodes").
-define(MOD_STR, "Modules").
-define(MEM_STR, "Memory").
+-define(PERSISTENT_STR, "Persistent Terms").
-define(INT_STR, "Internal Tables").
%% Records
@@ -74,6 +75,7 @@
dist_panel,
mod_panel,
mem_panel,
+ persistent_panel,
int_panel,
active_tab
}).
@@ -193,6 +195,10 @@ setup(#state{frame=Frame, notebook=Notebook}=State) ->
%% Memory Panel
MemPanel = add_page(Notebook, ?MEM_STR, cdv_multi_wx, cdv_mem_cb),
+ %% Persistent Terms Panel
+ PersistentPanel = add_page(Notebook, ?PERSISTENT_STR,
+ cdv_html_wx, cdv_persistent_cb),
+
%% Memory Panel
IntPanel = add_page(Notebook, ?INT_STR, cdv_multi_wx, cdv_int_tab_cb),
@@ -215,6 +221,7 @@ setup(#state{frame=Frame, notebook=Notebook}=State) ->
dist_panel = DistPanel,
mod_panel = ModPanel,
mem_panel = MemPanel,
+ persistent_panel = PersistentPanel,
int_panel = IntPanel,
active_tab = GenPid
}}.
@@ -250,6 +257,7 @@ handle_event(#wx{id = ?wxID_OPEN,
State#state.dist_panel,
State#state.mod_panel,
State#state.mem_panel,
+ State#state.persistent_panel,
State#state.int_panel],
_ = [wx_object:call(Panel,new_dump) || Panel<-Panels],
wxNotebook:setSelection(State#state.notebook,0),
@@ -343,8 +351,8 @@ check_page_title(Notebook) ->
get_active_pid(#state{notebook=Notebook, gen_panel=Gen, pro_panel=Pro,
port_panel=Ports, ets_panel=Ets, timer_panel=Timers,
fun_panel=Funs, atom_panel=Atoms, dist_panel=Dist,
- mod_panel=Mods, mem_panel=Mem, int_panel=Int,
- sched_panel=Sched
+ mod_panel=Mods, mem_panel=Mem, persistent_panel=Persistent,
+ int_panel=Int, sched_panel=Sched
}) ->
Panel = case check_page_title(Notebook) of
?GEN_STR -> Gen;
@@ -358,6 +366,7 @@ get_active_pid(#state{notebook=Notebook, gen_panel=Gen, pro_panel=Pro,
?DIST_STR -> Dist;
?MOD_STR -> Mods;
?MEM_STR -> Mem;
+ ?PERSISTENT_STR -> Persistent;
?INT_STR -> Int
end,
wx_object:get_pid(Panel).
@@ -365,7 +374,7 @@ get_active_pid(#state{notebook=Notebook, gen_panel=Gen, pro_panel=Pro,
pid2panel(Pid, #state{gen_panel=Gen, pro_panel=Pro, port_panel=Ports,
ets_panel=Ets, timer_panel=Timers, fun_panel=Funs,
atom_panel=Atoms, dist_panel=Dist, mod_panel=Mods,
- mem_panel=Mem, int_panel=Int}) ->
+ mem_panel=Mem, persistent_panel=Persistent, int_panel=Int}) ->
case Pid of
Gen -> ?GEN_STR;
Pro -> ?PRO_STR;
@@ -377,6 +386,7 @@ pid2panel(Pid, #state{gen_panel=Gen, pro_panel=Pro, port_panel=Ports,
Dist -> ?DIST_STR;
Mods -> ?MOD_STR;
Mem -> ?MEM_STR;
+ ?PERSISTENT_STR -> Persistent;
Int -> ?INT_STR;
_ -> "unknown"
end.
diff --git a/lib/observer/src/crashdump_viewer.erl b/lib/observer/src/crashdump_viewer.erl
index 14b086ff58..0627c15b1c 100644
--- a/lib/observer/src/crashdump_viewer.erl
+++ b/lib/observer/src/crashdump_viewer.erl
@@ -74,6 +74,7 @@
loaded_modules/0,
loaded_mod_details/1,
memory/0,
+ persistent_terms/0,
allocated_areas/0,
allocator_info/0,
hash_tables/0,
@@ -139,6 +140,7 @@
-define(node,node).
-define(not_connected,not_connected).
-define(old_instr_data,old_instr_data).
+-define(persistent_terms,persistent_terms).
-define(port,port).
-define(proc,proc).
-define(proc_dictionary,proc_dictionary).
@@ -293,6 +295,8 @@ loaded_mod_details(Mod) ->
call({loaded_mod_details,Mod}).
memory() ->
call(memory).
+persistent_terms() ->
+ call(persistent_terms).
allocated_areas() ->
call(allocated_areas).
allocator_info() ->
@@ -471,6 +475,11 @@ handle_call(memory,_From,State=#state{file=File}) ->
Memory=memory(File),
TW = truncated_warning([?memory]),
{reply,{ok,Memory,TW},State};
+handle_call(persistent_terms,_From,State=#state{file=File,dump_vsn=DumpVsn}) ->
+ TW = truncated_warning([?persistent_terms,?literals]),
+ DecodeOpts = get_decode_opts(DumpVsn),
+ Terms = persistent_terms(File, DecodeOpts),
+ {reply,{ok,Terms,TW},State};
handle_call(allocated_areas,_From,State=#state{file=File}) ->
AllocatedAreas=allocated_areas(File),
TW = truncated_warning([?allocated_areas]),
@@ -1444,15 +1453,7 @@ maybe_other_node2(Channel) ->
expand_memory(Fd,Pid,DumpVsn) ->
DecodeOpts = get_decode_opts(DumpVsn),
put(fd,Fd),
- Dict0 = case get(?literals) of
- undefined ->
- Literals = read_literals(Fd,DecodeOpts),
- put(?literals,Literals),
- put(fd,Fd),
- Literals;
- Literals ->
- Literals
- end,
+ Dict0 = get_literals(Fd,DecodeOpts),
Dict = read_heap(Fd,Pid,DecodeOpts,Dict0),
Expanded = {read_stack_dump(Fd,Pid,DecodeOpts,Dict),
read_messages(Fd,Pid,DecodeOpts,Dict),
@@ -1468,6 +1469,18 @@ expand_memory(Fd,Pid,DumpVsn) ->
end,
{Expanded,IncompleteWarning}.
+get_literals(Fd,DecodeOpts) ->
+ case get(?literals) of
+ undefined ->
+ OldFd = put(fd,Fd),
+ Literals = read_literals(Fd,DecodeOpts),
+ put(fd,OldFd),
+ put(?literals,Literals),
+ Literals;
+ Literals ->
+ Literals
+ end.
+
read_literals(Fd,DecodeOpts) ->
case lookup_index(?literals,[]) of
[{_,Start}] ->
@@ -1594,31 +1607,92 @@ read_heap(Fd,Pid,DecodeOpts,Dict0) ->
Dict0
end.
-read_heap(DecodeOpts,Dict0) ->
- %% This function is never called if the dump is truncated in {?proc_heap,Pid}
- case get(fd) of
- end_of_heap ->
+read_heap(DecodeOpts, Dict0) ->
+ %% This function is never called if the dump is truncated in
+ %% {?proc_heap,Pid}.
+ %%
+ %% It is not always possible to reconstruct the heap terms
+ %% in a single pass, especially if maps are involved.
+ %% See crashdump_helper:literal_map/0 for an example.
+ %%
+ %% Therefore, we need two passes. In the first pass
+ %% we collect all lines without parsing them, and in the
+ %% second pass we parse them.
+ %%
+ %% The first pass follows.
+
+ Lines0 = read_heap_lines(),
+
+ %% Save a map of all unprocessed lines so that deref_ptr() can
+ %% access any line when there are references to terms not yet
+ %% built.
+
+ LineMap = maps:from_list(Lines0),
+ put(line_map, LineMap),
+
+ %% Refc binaries (tag "Yc") must be processed before any sub
+ %% binaries (tag "Ys") referencing them, so we make sure to
+ %% process all the refc binaries first.
+ %%
+ %% The other lines can be processed in any order, but processing
+ %% them in the reverse order compared to how they are printed in
+ %% the crash dump seems to minimize the number of references to
+ %% terms that have not yet been built. That happens to be the
+ %% order of the line list as returned by read_heap_lines/0.
+
+ RefcBins = [Refc || {_,<<"Yc",_/binary>>}=Refc <- Lines0],
+ Lines = RefcBins ++ Lines0,
+
+ %% Second pass.
+
+ init_progress("Processing terms", map_size(LineMap)),
+ Dict = parse_heap_terms(Lines, DecodeOpts, Dict0),
+ erase(line_map),
+ end_progress(),
+ Dict.
+
+read_heap_lines() ->
+ read_heap_lines_1(get(fd), []).
+
+read_heap_lines_1(Fd, Acc) ->
+ case bytes(Fd) of
+ "=" ++ _next_tag ->
end_progress(),
- Dict0;
- Fd ->
- case bytes(Fd) of
- "=" ++ _next_tag ->
- end_progress(),
- put(fd, end_of_heap),
- Dict0;
- Line ->
- update_progress(length(Line)+1),
- Dict = parse(Line,DecodeOpts,Dict0),
- read_heap(DecodeOpts,Dict)
- end
+ put(fd, end_of_heap),
+ Acc;
+ Line0 ->
+ update_progress(length(Line0)+1),
+ {Addr,":"++Line1} = get_hex(Line0),
+
+ %% Reduce the memory consumption by converting the
+ %% line to a binary. Measurements show that it may also
+ %% be benefical for performance, too, because it makes the
+ %% garbage collections cheaper.
+
+ Line = list_to_binary(Line1),
+ read_heap_lines_1(Fd, [{Addr,Line}|Acc])
end.
-parse(Line0, DecodeOpts, Dict0) ->
- {Addr,":"++Line1} = get_hex(Line0),
- {_Term,Line,Dict} = parse_heap_term(Line1, Addr, DecodeOpts, Dict0),
- [] = skip_blanks(Line),
+parse_heap_terms([{Addr,Line0}|T], DecodeOpts, Dict0) ->
+ case gb_trees:is_defined(Addr, Dict0) of
+ true ->
+ %% Already parsed (by a recursive call from do_deref_ptr()
+ %% to parse_line()). Nothing to do.
+ parse_heap_terms(T, DecodeOpts, Dict0);
+ false ->
+ %% Parse this previously unparsed term.
+ Dict = parse_line(Addr, Line0, DecodeOpts, Dict0),
+ parse_heap_terms(T, DecodeOpts, Dict)
+ end;
+parse_heap_terms([], _DecodeOpts, Dict) ->
Dict.
+parse_line(Addr, Line0, DecodeOpts, Dict0) ->
+ update_progress(1),
+ Line1 = binary_to_list(Line0),
+ {_Term,Line,Dict} = parse_heap_term(Line1, Addr, DecodeOpts, Dict0),
+ [] = skip_blanks(Line), %Assertion.
+ Dict.
%%-----------------------------------------------------------------
%% Page with one port
@@ -2142,6 +2216,56 @@ get_atom(Atom) when is_binary(Atom) ->
{Atom,nq}. % not quoted
%%-----------------------------------------------------------------
+%% Page with list of all persistent terms
+persistent_terms(File, DecodeOpts) ->
+ case lookup_index(?persistent_terms) of
+ [{_Id,Start}] ->
+ Fd = open(File),
+ pos_bof(Fd,Start),
+ Terms = get_persistent_terms(Fd),
+ Dict = get_literals(Fd,DecodeOpts),
+ parse_persistent_terms(Terms,DecodeOpts,Dict);
+ _ ->
+ []
+ end.
+
+parse_persistent_terms([[Name0,Val0]|Terms],DecodeOpts,Dict) ->
+ {Name,_,_} = parse_term(binary_to_list(Name0),DecodeOpts,Dict),
+ {Val,_,_} = parse_term(binary_to_list(Val0),DecodeOpts,Dict),
+ [{Name,Val}|parse_persistent_terms(Terms,DecodeOpts,Dict)];
+parse_persistent_terms([],_,_) -> [].
+
+get_persistent_terms(Fd) ->
+ case get_chunk(Fd) of
+ {ok,Bin} ->
+ get_persistent_terms(Fd,Bin,[]);
+ eof ->
+ []
+ end.
+
+
+%% Persistent_Terms are written one per line in the crash dump.
+get_persistent_terms(Fd,Bin,PersistentTerms) ->
+ Bins = binary:split(Bin,<<"\n">>,[global]),
+ get_persistent_terms1(Fd,Bins,PersistentTerms).
+
+get_persistent_terms1(_Fd,[<<"=",_/binary>>|_],PersistentTerms) ->
+ PersistentTerms;
+get_persistent_terms1(Fd,[LastBin],PersistentTerms) ->
+ case get_chunk(Fd) of
+ {ok,Bin0} ->
+ get_persistent_terms(Fd,<<LastBin/binary,Bin0/binary>>,PersistentTerms);
+ eof ->
+ [get_persistent_term(LastBin)|PersistentTerms]
+ end;
+get_persistent_terms1(Fd,[Bin|Bins],Persistent_Terms) ->
+ get_persistent_terms1(Fd,Bins,[get_persistent_term(Bin)|Persistent_Terms]).
+
+get_persistent_term(Bin) ->
+ binary:split(Bin,<<"|">>).
+
+
+%%-----------------------------------------------------------------
%% Page with memory information
memory(File) ->
case lookup_index(?memory) of
@@ -2871,16 +2995,18 @@ parse_atom_translation_table(N, Line0, As) ->
deref_ptr(Ptr, Line, DecodeOpts, D) ->
- Lookup = fun(D0) ->
- gb_trees:lookup(Ptr, D0)
- end,
+ Lookup0 = fun(D0) ->
+ gb_trees:lookup(Ptr, D0)
+ end,
+ Lookup = wrap_line_map(Ptr, Lookup0),
do_deref_ptr(Lookup, Line, DecodeOpts, D).
deref_bin(Binp0, Offset, Sz, Line, DecodeOpts, D) ->
Binp = Binp0 bor DecodeOpts#dec_opts.bin_addr_adj,
- Lookup = fun(D0) ->
- lookup_binary(Binp, Offset, Sz, D0)
- end,
+ Lookup0 = fun(D0) ->
+ lookup_binary(Binp, Offset, Sz, D0)
+ end,
+ Lookup = wrap_line_map(Binp, Lookup0),
do_deref_ptr(Lookup, Line, DecodeOpts, D).
lookup_binary(Binp, Offset, Sz, D) ->
@@ -2899,26 +3025,36 @@ lookup_binary(Binp, Offset, Sz, D) ->
end
end.
+wrap_line_map(Ptr, Lookup) ->
+ wrap_line_map_1(get(line_map), Ptr, Lookup).
+
+wrap_line_map_1(#{}=LineMap, Ptr, Lookup) ->
+ fun(D) ->
+ case Lookup(D) of
+ {value,_}=Res ->
+ Res;
+ none ->
+ case LineMap of
+ #{Ptr:=Line} ->
+ {line,Ptr,Line};
+ #{} ->
+ none
+ end
+ end
+ end;
+wrap_line_map_1(undefined, _Ptr, Lookup) ->
+ Lookup.
+
do_deref_ptr(Lookup, Line, DecodeOpts, D0) ->
case Lookup(D0) of
{value,Term} ->
{Term,Line,D0};
none ->
- case get(fd) of
- end_of_heap ->
- put(incomplete_heap,true),
- {['#CDVIncompleteHeap'],Line,D0};
- Fd ->
- case bytes(Fd) of
- "="++_ ->
- put(fd, end_of_heap),
- do_deref_ptr(Lookup, Line, DecodeOpts, D0);
- L ->
- update_progress(length(L)+1),
- D = parse(L, DecodeOpts, D0),
- do_deref_ptr(Lookup, Line, DecodeOpts, D)
- end
- end
+ put(incomplete_heap, true),
+ {['#CDVIncompleteHeap'],Line,D0};
+ {line,Addr,NewLine} ->
+ D = parse_line(Addr, NewLine, DecodeOpts, D0),
+ do_deref_ptr(Lookup, Line, DecodeOpts, D)
end.
get_hex(L) ->
@@ -3119,6 +3255,7 @@ tag_to_atom("literals") -> ?literals;
tag_to_atom("loaded_modules") -> ?loaded_modules;
tag_to_atom("memory") -> ?memory;
tag_to_atom("mod") -> ?mod;
+tag_to_atom("persistent_terms") -> ?persistent_terms;
tag_to_atom("no_distribution") -> ?no_distribution;
tag_to_atom("node") -> ?node;
tag_to_atom("not_connected") -> ?not_connected;
diff --git a/lib/observer/src/observer.app.src b/lib/observer/src/observer.app.src
index d73293a5f9..d48b846ad2 100644
--- a/lib/observer/src/observer.app.src
+++ b/lib/observer/src/observer.app.src
@@ -34,6 +34,7 @@
cdv_mem_cb,
cdv_mod_cb,
cdv_multi_wx,
+ cdv_persistent_cb,
cdv_port_cb,
cdv_proc_cb,
cdv_table_wx,
diff --git a/lib/observer/src/observer_html_lib.erl b/lib/observer/src/observer_html_lib.erl
index 0c4e32af49..c67fa28c6d 100644
--- a/lib/observer/src/observer_html_lib.erl
+++ b/lib/observer/src/observer_html_lib.erl
@@ -62,7 +62,8 @@ expandable_term_body(Heading,[],_Tab) ->
"Dictionary" -> "No dictionary was found";
"ProcState" -> "Information could not be retrieved,"
" system messages may not be handled by this process.";
- "SaslLog" -> "No log entry was found"
+ "SaslLog" -> "No log entry was found";
+ "Persistent Terms" -> "No persistent terms were found"
end];
expandable_term_body(Heading,Expanded,Tab) ->
Attr = "BORDER=0 CELLPADDING=0 CELLSPACING=1 WIDTH=100%",
diff --git a/lib/observer/test/crashdump_helper.erl b/lib/observer/test/crashdump_helper.erl
index 145ff56b71..576d112154 100644
--- a/lib/observer/test/crashdump_helper.erl
+++ b/lib/observer/test/crashdump_helper.erl
@@ -21,7 +21,9 @@
-module(crashdump_helper).
-export([n1_proc/2,remote_proc/2,
dump_maps/0,create_maps/0,
- create_binaries/0,create_sub_binaries/1]).
+ create_binaries/0,create_sub_binaries/1,
+ dump_persistent_terms/0,
+ create_persistent_terms/0]).
-compile(r18).
-include_lib("common_test/include/ct.hrl").
@@ -142,4 +144,47 @@ create_maps() ->
Map3 = lists:foldl(fun(I, A) ->
A#{I=>I*I}
end, Map2, lists:seq(-10, 0)),
- #{a=>Map0,b=>Map1,c=>Map2,d=>Map3,e=>#{}}.
+ #{a=>Map0,b=>Map1,c=>Map2,d=>Map3,e=>#{},literal=>literal_map()}.
+
+literal_map() ->
+ %% A literal map such as the one below will produce a heap dump
+ %% like this:
+ %%
+ %% Address1:t4:H<Address3>,H<Address4>,H<Address5>,H<Address6>
+ %% Address2:Mf4:H<Adress1>:I1,I2,I3,I4
+ %% Address3: ... % "one"
+ %% Address4: ... % "two"
+ %% Address5: ... % "three"
+ %% Address6: ... % "four"
+ %%
+ %% The map cannot be reconstructed in a single sequential pass.
+ %%
+ %% To reconstruct the map, first the string keys "one"
+ %% through "four" must be reconstructed, then the tuple at
+ %% Adress1, then the map at Address2.
+
+ #{"one"=>1,"two"=>2,"three"=>3,"four"=>4}.
+
+%%%
+%%% Test dumping of persistent terms (from OTP 21.2).
+%%%
+
+dump_persistent_terms() ->
+ Parent = self(),
+ F = fun() ->
+ register(aaaaaaaa_persistent_terms, self()),
+ put(pts, create_persistent_terms()),
+ Parent ! {self(),done},
+ receive _ -> ok end
+ end,
+ Pid = spawn_link(F),
+ receive
+ {Pid,done} ->
+ {ok,Pid}
+ end.
+
+create_persistent_terms() ->
+ persistent_term:put({?MODULE,first}, {pid,42.0}),
+ persistent_term:put({?MODULE,second}, [1,2,3]),
+ persistent_term:get().
+
diff --git a/lib/observer/test/crashdump_viewer_SUITE.erl b/lib/observer/test/crashdump_viewer_SUITE.erl
index 864454cdff..ed53c46a0d 100644
--- a/lib/observer/test/crashdump_viewer_SUITE.erl
+++ b/lib/observer/test/crashdump_viewer_SUITE.erl
@@ -345,6 +345,7 @@ browse_file(File) ->
{ok,_AllocINfo,_AllocInfoTW} = crashdump_viewer:allocator_info(),
{ok,_HashTabs,_HashTabsTW} = crashdump_viewer:hash_tables(),
{ok,_IndexTabs,_IndexTabsTW} = crashdump_viewer:index_tables(),
+ {ok,_PTs,_PTsTW} = crashdump_viewer:persistent_terms(),
io:format(" info read",[]),
@@ -595,6 +596,23 @@ special(File,Procs) ->
Maps = proplists:get_value(maps,Dict),
io:format(" maps ok",[]),
ok;
+ ".persistent_terms" ->
+ %% I registered a process as aaaaaaaa_persistent_term in
+ %% the dump to make sure it will be the first in the list
+ %% when sorted on names.
+ [#proc{pid=Pid0,name=Name}|_Rest] = lists:keysort(#proc.name,Procs),
+ "aaaaaaaa_persistent_terms" = Name,
+ Pid = pid_to_list(Pid0),
+ {ok,ProcDetails=#proc{},[]} = crashdump_viewer:proc_details(Pid),
+ io:format(" process details ok",[]),
+
+ #proc{dict=Dict} = ProcDetails,
+ %% io:format("~p\n", [Dict]),
+ Pts1 = crashdump_helper:create_persistent_terms(),
+ Pts2 = proplists:get_value(pts,Dict),
+ true = lists:sort(Pts1) =:= lists:sort(Pts2),
+ io:format(" persistent terms ok",[]),
+ ok;
_ ->
ok
end,
@@ -679,9 +697,11 @@ do_create_dumps(DataDir,Rel) ->
CD5 = dump_with_size_limit_reached(DataDir,Rel,"trunc_bytes"),
CD6 = dump_with_unicode_atoms(DataDir,Rel,"unicode"),
CD7 = dump_with_maps(DataDir,Rel,"maps"),
+ CD8 = dump_with_persistent_terms(DataDir,Rel,"persistent_terms"),
TruncDumpMod = truncate_dump_mod(CD1),
TruncatedDumpsBinary = truncate_dump_binary(CD1),
- {[CD1,CD2,CD3,CD4,CD5,CD6,CD7,TruncDumpMod|TruncatedDumpsBinary],
+ {[CD1,CD2,CD3,CD4,CD5,CD6,CD7,CD8,
+ TruncDumpMod|TruncatedDumpsBinary],
DosDump};
_ ->
{[CD1,CD2], DosDump}
@@ -850,6 +870,16 @@ dump_with_maps(DataDir,Rel,DumpName) ->
?t:stop_node(n1),
CD.
+dump_with_persistent_terms(DataDir,Rel,DumpName) ->
+ Opt = rel_opt(Rel),
+ Pz = "-pz \"" ++ filename:dirname(code:which(?MODULE)) ++ "\"",
+ PzOpt = [{args,Pz}],
+ {ok,N1} = ?t:start_node(n1,peer,Opt ++ PzOpt),
+ {ok,_Pid} = rpc:call(N1,crashdump_helper,dump_persistent_terms,[]),
+ CD = dump(N1,DataDir,Rel,DumpName),
+ ?t:stop_node(n1),
+ CD.
+
dump(Node,DataDir,Rel,DumpName) ->
Crashdump = filename:join(DataDir, dump_prefix(Rel)++DumpName),
rpc:call(Node,os,putenv,["ERL_CRASH_DUMP",Crashdump]),
diff --git a/lib/os_mon/c_src/cpu_sup.c b/lib/os_mon/c_src/cpu_sup.c
index 17ef48c26e..c96a5c9f7c 100644
--- a/lib/os_mon/c_src/cpu_sup.c
+++ b/lib/os_mon/c_src/cpu_sup.c
@@ -152,6 +152,8 @@ static void util_measure(unsigned int **result_vec, int *result_sz);
#if defined(__sun__)
static unsigned int misc_measure(char* name);
+#elif defined(__linux__)
+static unsigned int misc_measure(char cmd);
#endif
static void sendi(unsigned int data);
static void sendv(unsigned int data[], int ints);
@@ -231,6 +233,11 @@ int main(int argc, char** argv) {
case AVG1: sendi(misc_measure("avenrun_1min")); break;
case AVG5: sendi(misc_measure("avenrun_5min")); break;
case AVG15: sendi(misc_measure("avenrun_15min")); break;
+#elif defined(__linux__)
+ case NPROCS:
+ case AVG1:
+ case AVG5:
+ case AVG15: sendi(misc_measure(cmd)); break;
#elif defined(__OpenBSD__) || (defined(__APPLE__) && defined(__MACH__)) || defined(__FreeBSD__) || defined(__DragonFly__)
case NPROCS: bsd_count_procs(); break;
case AVG1: bsd_loadavg(0); break;
@@ -238,7 +245,7 @@ int main(int argc, char** argv) {
case AVG15: bsd_loadavg(2); break;
#endif
#if defined(__sun__) || defined(__linux__) || (defined(__APPLE__) && defined(__MACH__)) || defined(__FreeBSD__)
- case UTIL: util_measure(&rv,&sz); sendv(rv, sz); break;
+ case UTIL: util_measure(&rv,&sz); sendv(rv, sz); break;
#endif
case QUIT: free((void*)rv); return 0;
default: error("Bad command"); break;
@@ -329,6 +336,22 @@ static void bsd_count_procs(void) {
#if defined(__linux__)
+static unsigned int misc_measure(char cmd) {
+ struct sysinfo info;
+
+ if (sysinfo(&info))
+ error(strerror(errno));
+
+ switch (cmd) {
+ case AVG1: return (unsigned int)(info.loads[0] / 256);
+ case AVG5: return (unsigned int)(info.loads[1] / 256);
+ case AVG15: return (unsigned int)(info.loads[2] / 256);
+ case NPROCS: return info.procs;
+ }
+
+ return -1;
+}
+
static cpu_t *read_procstat(FILE *fp, cpu_t *cpu) {
char buffer[BUFFERSIZE];
@@ -357,8 +380,24 @@ static void util_measure(unsigned int **result_vec, int *result_sz) {
FILE *fp;
unsigned int *rv = NULL;
cpu_t cpu;
-
+
+ rv = *result_vec;
+ rv[0] = no_of_cpus;
+
if ( (fp = fopen(PROCSTAT,"r")) == NULL) {
+ if (errno == EACCES) { /* SELinux */
+ rv[1] = 1; /* just the cpu id */
+ ++rv; /* first value is number of cpus */
+ ++rv; /* second value is number of entries */
+ for (i = 0; i < no_of_cpus; ++i) {
+ rv[0] = CU_CPU_ID;
+ rv[1] = i;
+ rv += 1*2;
+ }
+ *result_sz = 2 + 2*1 * no_of_cpus;
+ return;
+ }
+
/* Check if procfs is mounted,
* otherwise:
* try and try again, bad procsfs.
@@ -367,20 +406,19 @@ static void util_measure(unsigned int **result_vec, int *result_sz) {
return;
}
- /*ignore read*/
+ /*ignore read*/
if (fgets(buffer, BUFFERSIZE, fp) == NULL) {
*result_sz = 0;
return;
}
- rv = *result_vec;
- rv[0] = no_of_cpus;
+
rv[1] = CU_VALUES;
++rv; /* first value is number of cpus */
++rv; /* second value is number of entries */
for (i = 0; i < no_of_cpus; ++i) {
read_procstat(fp, &cpu);
-
+
rv[ 0] = CU_CPU_ID; rv[ 1] = cpu.id;
rv[ 2] = CU_USER; rv[ 3] = cpu.user;
rv[ 4] = CU_NICE_USER; rv[ 5] = cpu.nice_user;
diff --git a/lib/os_mon/src/cpu_sup.erl b/lib/os_mon/src/cpu_sup.erl
index 81e049ef22..ba2d89313e 100644
--- a/lib/os_mon/src/cpu_sup.erl
+++ b/lib/os_mon/src/cpu_sup.erl
@@ -220,17 +220,21 @@ code_change(_OldVsn, State, _Extra) ->
%% internal functions
%%----------------------------------------------------------------------
-get_uint32_measurement(Request, #internal{os_type = {unix, linux}}) ->
- {ok,F} = file:open("/proc/loadavg",[read,raw]),
- {ok,D} = file:read_line(F),
- ok = file:close(F),
- {ok,[Load1,Load5,Load15,_PRun,PTotal],_} = io_lib:fread("~f ~f ~f ~d/~d", D),
- case Request of
- ?avg1 -> sunify(Load1);
- ?avg5 -> sunify(Load5);
- ?avg15 -> sunify(Load15);
- ?ping -> 4711;
- ?nprocs -> PTotal
+get_uint32_measurement(Request, #internal{port = P, os_type = {unix, linux}}) ->
+ case file:open("/proc/loadavg",[read,raw]) of
+ {ok,F} ->
+ {ok,D} = file:read_line(F),
+ ok = file:close(F),
+ {ok,[Load1,Load5,Load15,_PRun,PTotal],_} = io_lib:fread("~f ~f ~f ~d/~d", D),
+ case Request of
+ ?avg1 -> sunify(Load1);
+ ?avg5 -> sunify(Load5);
+ ?avg15 -> sunify(Load15);
+ ?ping -> 4711;
+ ?nprocs -> PTotal
+ end;
+ {error,_} ->
+ port_server_call(P, Request)
end;
get_uint32_measurement(Request, #internal{port = P, os_type = {unix, Sys}}) when
Sys == sunos;
diff --git a/lib/public_key/asn1/OTP-PKIX.asn1 b/lib/public_key/asn1/OTP-PKIX.asn1
index 37196bb9bf..9bcd99fba3 100644
--- a/lib/public_key/asn1/OTP-PKIX.asn1
+++ b/lib/public_key/asn1/OTP-PKIX.asn1
@@ -326,8 +326,13 @@ PublicKeyAlgorithm ::= SEQUENCE {
OPTIONAL }
SupportedSignatureAlgorithms SIGNATURE-ALGORITHM-CLASS ::= {
- dsa-with-sha1 | dsaWithSHA1 | md2-with-rsa-encryption |
- md5-with-rsa-encryption | sha1-with-rsa-encryption | sha-1with-rsa-encryption |
+ dsa-with-sha1 | dsaWithSHA1 |
+ dsa-with-sha224 |
+ dsa-with-sha256 |
+ md2-with-rsa-encryption |
+ md5-with-rsa-encryption |
+ sha1-with-rsa-encryption |
+ sha-1with-rsa-encryption |
sha224-with-rsa-encryption |
sha256-with-rsa-encryption |
sha384-with-rsa-encryption |
@@ -368,6 +373,21 @@ SupportedPublicKeyAlgorithms PUBLIC-KEY-ALGORITHM-CLASS ::= {
ID id-dsaWithSHA1
TYPE DSAParams }
+ dsa-with-sha224 SIGNATURE-ALGORITHM-CLASS ::= {
+ ID id-dsa-with-sha224
+ TYPE DSAParams }
+
+ dsa-with-sha256 SIGNATURE-ALGORITHM-CLASS ::= {
+ ID id-dsa-with-sha256
+ TYPE DSAParams }
+
+ id-dsa-with-sha224 OBJECT IDENTIFIER ::= {
+ joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101)
+ csor(3) algorithms(4) id-dsa-with-sha2(3) 1 }
+
+ id-dsa-with-sha256 OBJECT IDENTIFIER ::= {
+ joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101)
+ csor(3) algorithms(4) id-dsa-with-sha2(3) 2 }
--
-- RSA Keys and Signatures
--
diff --git a/lib/public_key/src/public_key.erl b/lib/public_key/src/public_key.erl
index 3f609ce6c6..dca1e0766e 100644
--- a/lib/public_key/src/public_key.erl
+++ b/lib/public_key/src/public_key.erl
@@ -609,6 +609,10 @@ pkix_sign_types(?'id-dsa-with-sha1') ->
{sha, dsa};
pkix_sign_types(?'id-dsaWithSHA1') ->
{sha, dsa};
+pkix_sign_types(?'id-dsa-with-sha224') ->
+ {sha224, dsa};
+pkix_sign_types(?'id-dsa-with-sha256') ->
+ {sha256, dsa};
pkix_sign_types(?'ecdsa-with-SHA1') ->
{sha, ecdsa};
pkix_sign_types(?'ecdsa-with-SHA256') ->
diff --git a/lib/public_key/test/public_key_SUITE.erl b/lib/public_key/test/public_key_SUITE.erl
index 1955e9e119..878489eb0f 100644
--- a/lib/public_key/test/public_key_SUITE.erl
+++ b/lib/public_key/test/public_key_SUITE.erl
@@ -44,7 +44,9 @@ all() ->
encrypt_decrypt,
{group, sign_verify},
pkix, pkix_countryname, pkix_emailaddress, pkix_path_validation,
- pkix_iso_rsa_oid, pkix_iso_dsa_oid, pkix_crl, general_name,
+ pkix_iso_rsa_oid, pkix_iso_dsa_oid,
+ pkix_dsa_sha2_oid,
+ pkix_crl, general_name,
pkix_verify_hostname_cn,
pkix_verify_hostname_subjAltName,
pkix_verify_hostname_subjAltName_IP,
@@ -1114,6 +1116,13 @@ pkix_iso_dsa_oid(Config) when is_list(Config) ->
{_, dsa} = public_key:pkix_sign_types(SigAlg#'SignatureAlgorithm'.algorithm).
%%--------------------------------------------------------------------
+pkix_dsa_sha2_oid() ->
+ [{doc, "Test support dsa_sha2 oid"}].
+pkix_dsa_sha2_oid(Config) when is_list(Config) ->
+ {sha224, dsa} = public_key:pkix_sign_types(?'id-dsa-with-sha224'),
+ {sha256, dsa} = public_key:pkix_sign_types(?'id-dsa-with-sha256').
+
+%%--------------------------------------------------------------------
pkix_crl() ->
[{doc, "test pkix_crl_* functions"}].
diff --git a/lib/sasl/src/sasl.app.src b/lib/sasl/src/sasl.app.src
index 688aff16f1..5d45af0b50 100644
--- a/lib/sasl/src/sasl.app.src
+++ b/lib/sasl/src/sasl.app.src
@@ -43,5 +43,5 @@
{env, []},
{mod, {sasl, []}},
{runtime_dependencies, ["tools-2.6.14","stdlib-3.4","kernel-5.3",
- "erts-9.0"]}]}.
+ "erts-@OTP-13468@"]}]}.
diff --git a/lib/sasl/src/systools_make.erl b/lib/sasl/src/systools_make.erl
index 6916107623..f085246924 100644
--- a/lib/sasl/src/systools_make.erl
+++ b/lib/sasl/src/systools_make.erl
@@ -1562,10 +1562,10 @@ mandatory_modules() ->
preloaded() ->
%% Sorted
- [erl_prim_loader,erl_tracer,erlang,
+ [atomics, counters, erl_prim_loader,erl_tracer,erlang,
erts_code_purger,erts_dirty_process_signal_handler,
erts_internal,erts_literal_area_collector,
- init,otp_ring0,prim_buffer,prim_eval,prim_file,
+ init,otp_ring0,persistent_term,prim_buffer,prim_eval,prim_file,
prim_inet,prim_zip,zlib].
%%______________________________________________________________________
diff --git a/lib/ssh/doc/src/Makefile b/lib/ssh/doc/src/Makefile
index 77fa356092..4e32dd9976 100644
--- a/lib/ssh/doc/src/Makefile
+++ b/lib/ssh/doc/src/Makefile
@@ -45,6 +45,7 @@ XML_REF3_FILES = \
ssh_connection.xml \
ssh_server_channel.xml \
ssh_server_key_api.xml \
+ ssh_file.xml \
ssh_sftp.xml \
ssh_sftpd.xml \
@@ -56,8 +57,8 @@ XML_CHAPTER_FILES = \
notes.xml \
introduction.xml \
using_ssh.xml \
+ terminology.xml \
configure_algos.xml
-# ssh_protocol.xml \
BOOK_FILES = book.xml
diff --git a/lib/ssh/doc/src/notes.xml b/lib/ssh/doc/src/notes.xml
index 7e77c6a457..42bdf667f8 100644
--- a/lib/ssh/doc/src/notes.xml
+++ b/lib/ssh/doc/src/notes.xml
@@ -230,6 +230,22 @@
</section>
</section>
+<section><title>Ssh 4.6.9.2</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Incompatibility with newer OpenSSH fixed. Previously
+ versions 7.8 and later could cause Erlang SSH to exit.</p>
+ <p>
+ Own Id: OTP-15413</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Ssh 4.6.9.1</title>
<section><title>Fixed Bugs and Malfunctions</title>
<list>
@@ -3869,4 +3885,3 @@
</section>
</chapter>
-
diff --git a/lib/ssh/doc/src/ref_man.xml b/lib/ssh/doc/src/ref_man.xml
index df37b0244f..60572b985b 100644
--- a/lib/ssh/doc/src/ref_man.xml
+++ b/lib/ssh/doc/src/ref_man.xml
@@ -40,6 +40,7 @@
<xi:include href="ssh_connection.xml"/>
<xi:include href="ssh_client_key_api.xml"/>
<xi:include href="ssh_server_key_api.xml"/>
+ <xi:include href="ssh_file.xml"/>
<xi:include href="ssh_sftp.xml"/>
<xi:include href="ssh_sftpd.xml"/>
</application>
diff --git a/lib/ssh/doc/src/specs.xml b/lib/ssh/doc/src/specs.xml
index acdbe2ddfd..a6517f3660 100644
--- a/lib/ssh/doc/src/specs.xml
+++ b/lib/ssh/doc/src/specs.xml
@@ -6,6 +6,7 @@
<xi:include href="../specs/specs_ssh_connection.xml"/>
<xi:include href="../specs/specs_ssh_server_channel.xml"/>
<xi:include href="../specs/specs_ssh_server_key_api.xml"/>
+ <xi:include href="../specs/specs_ssh_file.xml"/>
<xi:include href="../specs/specs_ssh_sftp.xml"/>
<xi:include href="../specs/specs_ssh_sftpd.xml"/>
</specs>
diff --git a/lib/ssh/doc/src/ssh.xml b/lib/ssh/doc/src/ssh.xml
index f238bf2ca8..8435fced11 100644
--- a/lib/ssh/doc/src/ssh.xml
+++ b/lib/ssh/doc/src/ssh.xml
@@ -99,8 +99,8 @@
</p>
<p>The paths could easily be changed by options:
- <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso> and
- <seealso marker="#type-system_dir_daemon_option"><c>system_dir</c></seealso>.
+ <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso> and
+ <seealso marker="ssh_file#type-system_dir_daemon_option"><c>system_dir</c></seealso>.
</p>
<p>A completly different storage could be interfaced by writing call-back modules
using the behaviours
@@ -123,12 +123,12 @@
<item><c>ssh_host_ecdsa_key</c> and <c>ssh_host_ecdsa_key.pub</c></item>
</list>
<p>The host keys directory could be changed with the option
- <seealso marker="#type-system_dir_daemon_option"><c>system_dir</c></seealso>.</p>
+ <seealso marker="ssh_file#type-system_dir_daemon_option"><c>system_dir</c></seealso>.</p>
</item>
<item>Optional: one or more <i>User's public key</i> in case of <c>publickey</c> authorization.
Default is to store them concatenated in the file <c>.ssh/authorized_keys</c> in the user's home directory.
<p>The user keys directory could be changed with the option
- <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso>.</p>
+ <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso>.</p>
</item>
</list>
</section>
@@ -138,7 +138,7 @@
<p>The keys and some other data are by default stored in files in the directory <c>.ssh</c>
in the user's home directory.</p>
<p>The directory could be changed with the option
- <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso>.
+ <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso>.
</p>
<list>
<item>Optional: a list of <i>Host public key(s)</i> for previously connected hosts. This list
@@ -183,31 +183,6 @@
</datatype>
<datatype>
- <name name="pref_public_key_algs_client_option"/>
- <desc>
- <p>List of user (client) public key algorithms to try to use.</p>
- <p>The default value is the <c>public_key</c> entry in the list returned by
- <seealso marker="#default_algorithms/0">ssh:default_algorithms/0</seealso>.
- </p>
- <p>If there is no public key of a specified type available, the corresponding entry is ignored.
- Note that the available set is dependent on the underlying cryptolib and current user's public keys.
- </p>
- <p>See also the option <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso>
- for specifying the path to the user's keys.
- </p>
- </desc>
- </datatype>
-
- <datatype>
- <name name="pubkey_passphrase_client_options"/>
- <desc>
- <p>If the user's DSA, RSA or ECDSA key is protected by a passphrase, it can be
- supplied with thoose options.
- </p>
- </desc>
- </datatype>
-
- <datatype>
<name name="host_accepting_client_options"/>
<name name="accept_hosts"/>
<name name="fp_digest_alg"/>
@@ -220,7 +195,7 @@
<p>This option guides the <c>connect</c> function on how to act when the connected server presents a Host
Key that the client has not seen before. The default is to ask the user with a question on stdio of whether to
accept or reject the new Host Key.
- See the option <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso>
+ See the option <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso>
for specifying the path to the file <c>known_hosts</c> where previously accepted Host Keys are recorded.
See also the option
<seealso marker="#type-key_cb_common_option">key_cb</seealso>
@@ -276,7 +251,7 @@
accept question the next time the same host is connected. If the option
<seealso marker="#type-key_cb_common_option"><c>key_cb</c></seealso>
is not present, the key is saved in the file "known_hosts". See option
- <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso> for
+ <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso> for
the location of that file.
</p>
<p>If <c>false</c>, the key is not saved and the key will still be unknown
@@ -406,9 +381,20 @@
<datatype>
<name name="exec_daemon_option"/>
+ <name name="exec_spec"/>
+ <desc/>
+ </datatype>
+ <datatype>
+ <name name="exec_fun"/>
+ <desc/>
+ </datatype>
+ <datatype>
<name name="'exec_fun/1'"/>
<name name="'exec_fun/2'"/>
<name name="'exec_fun/3'"/>
+ <desc/>
+ </datatype>
+ <datatype>
<name name="exec_result"/>
<desc>
<p>This option changes how the daemon execute exec-requests from clients. The term in the return value
@@ -478,18 +464,6 @@
<name name="pwdfun_4"/>
<desc>
<taglist>
- <tag><marker id="type-system_dir_daemon_option"/><c>system_dir</c></tag>
- <item>
- <p>Sets the system directory, containing the host key files
- that identify the host keys for <c>ssh</c>. Defaults to
- <c>/etc/ssh</c>.</p>
- <p>For security reasons, this directory is normally accessible only to the root user.</p>
- <p>See also the option
- <seealso marker="#type-key_cb_common_option">key_cb</seealso>
- for the general way to handle keys.
- </p>
- </item>
-
<tag><c>auth_method_kb_interactive_data</c></tag>
<item>
<p>Sets the text strings that the daemon sends to the client for presentation to the user when
@@ -502,7 +476,7 @@
</p>
</item>
- <tag><c>user_passwords</c></tag>
+ <tag><marker id="option-user_passwords"/><c>user_passwords</c></tag>
<item>
<p>Provides passwords for password authentication. The passwords are used when someone tries
to connect to the server and public key user-authentication fails. The option provides
@@ -510,7 +484,7 @@
</p>
</item>
- <tag><c>password</c></tag>
+ <tag><marker id="option-password"/><c>password</c></tag>
<item>
<p>Provides a global password that authenticates any user.</p>
<warning>
@@ -519,7 +493,9 @@
</warning>
</item>
- <tag><c>pwdfun</c> with <c>pwdfun_4()</c></tag>
+ <tag><marker id="option-pwdfun"/><c>pwdfun</c> with
+ <seealso marker="#type-pwdfun_4"><c>pwdfun_4()</c></seealso>
+ </tag>
<item>
<p>Provides a function for password validation. This could used for calling an external system or handeling
passwords stored as hash values.
@@ -546,7 +522,9 @@
can be used for this. The return value <c>disconnect</c> is useful for this.</p>
</item>
- <tag><c>pwdfun</c> with <c>pwdfun_2()</c></tag>
+ <tag><c>pwdfun</c> with
+ <seealso marker="#type-pwdfun_2"><c>pwdfun_2()</c></seealso>
+ </tag>
<item>
<p>Provides a function for password validation. This function is called with user and password
as strings, and returns:</p>
@@ -725,21 +703,6 @@
</datatype>
<datatype>
- <name name="user_dir_common_option"/>
- <desc>
- <p>Sets the user directory. That is, the directory containing <c>ssh</c> configuration
- files for the user, such as
- <c>known_hosts</c>, <c>id_rsa</c>, <c>id_dsa</c>>, <c>id_ecdsa</c> and <c>authorized_key</c>.
- Defaults to the directory normally referred to as <c>~/.ssh</c>.
- </p>
- <p>See also the option
- <seealso marker="#type-key_cb_common_option">key_cb</seealso>
- for the general way to handle keys.
- </p>
- </desc>
- </datatype>
-
- <datatype>
<name name="profile_common_option"/>
<desc>
<p>Used together with <c>ip-address</c> and <c>port</c> to
@@ -795,7 +758,8 @@
</p>
<p>The <c>Opts</c> defaults to <c>[]</c> when only the <c>Module</c> is specified.
</p>
- <p>The default value of this option is <c>{ssh_file, []}</c>.
+ <p>The default value of this option is <c>{ssh_file, []}</c>. See also the manpage of
+ <seealso marker="ssh:ssh_file">ssh_file</seealso>.
</p>
<p>A call to the call-back function <c>F</c> will be</p>
<code>
@@ -804,13 +768,32 @@
<p>where <c>...</c> are arguments to <c>F</c> as in
<seealso marker="ssh_client_key_api">ssh_client_key_api</seealso> and/or
<seealso marker="ssh_server_key_api">ssh_server_key_api</seealso>.
- The <c>UserOptions</c> are the options given to <c>ssh:connect</c>, <c>ssh:shell</c> or <c>ssh:daemon</c>.
+ The <c>UserOptions</c> are the options given to
+ <seealso marker="ssh:ssh#connect-3">ssh:connect</seealso>,
+ <seealso marker="ssh:ssh#shell-1">ssh:shell</seealso> or
+ <seealso marker="ssh:ssh#daemon-2">ssh:daemon</seealso>.
</p>
</desc>
</datatype>
<datatype>
+ <name name="pref_public_key_algs_common_option"/>
+ <desc>
+ <p>List of user (client) public key algorithms to try to use.</p>
+ <p>The default value is the <c>public_key</c> entry in the list returned by
+ <seealso marker="#default_algorithms/0">ssh:default_algorithms/0</seealso>.
+ </p>
+ <p>If there is no public key of a specified type available, the corresponding entry is ignored.
+ Note that the available set is dependent on the underlying cryptolib and current user's public keys.
+ </p>
+ <p>See also the option <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso>
+ for specifying the path to the user's keys.
+ </p>
+ </desc>
+ </datatype>
+
+ <datatype>
<name name="disconnectfun_common_option"/>
<desc>
<p>Provides a fun to implement your own logging when the peer disconnects.</p>
diff --git a/lib/ssh/doc/src/ssh_app.xml b/lib/ssh/doc/src/ssh_app.xml
index e80bb1853d..eb804e67dc 100644
--- a/lib/ssh/doc/src/ssh_app.xml
+++ b/lib/ssh/doc/src/ssh_app.xml
@@ -74,13 +74,18 @@
<c>id_ecdsa_key</c>,
<c>known_hosts</c>, and <c>authorized_keys</c> in ~/.ssh,
and for the host key files in <c>/etc/ssh</c>. These locations can be changed
- by the options <c>user_dir</c> and <c>system_dir</c>.
+ by the options
+ <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso> and
+ <seealso marker="ssh_file#type-system_dir_daemon_option"><c>system_dir</c></seealso>.
</p>
<p>Public key handling can also be customized through a callback module that
implements the behaviors
<seealso marker="ssh_client_key_api">ssh_client_key_api</seealso> and
<seealso marker="ssh_server_key_api">ssh_server_key_api</seealso>.
</p>
+ <p>See also the default callback module documentation in
+ <seealso marker="ssh_file">ssh_file</seealso>.
+ </p>
</section>
<section>
diff --git a/lib/ssh/doc/src/ssh_file.xml b/lib/ssh/doc/src/ssh_file.xml
new file mode 100644
index 0000000000..ae6ba2e1d9
--- /dev/null
+++ b/lib/ssh/doc/src/ssh_file.xml
@@ -0,0 +1,275 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2018</year><year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>ssh_file</title>
+ <prepared></prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ </header>
+ <module>ssh_file</module>
+ <modulesummary>Default callback module for the client's and server's database operations in the ssh application</modulesummary>
+ <description>
+ <p>This module is the default callback handler for the client's and the server's user and host "database" operations.
+ All data, for instance key pairs, are stored in files in the normal file system. This page documents the files, where they
+ are stored and configuration options for this callback module.
+ </p>
+ <p>The intention is to be compatible with the
+ <url href="http://www.openssh.com">OpenSSH</url>
+ storage in files. Therefore it mimics directories and filenames of
+ <url href="http://www.openssh.com">OpenSSH</url>.
+ </p>
+
+ <p>Ssh_file implements the <seealso marker="ssh:ssh_server_key_api">ssh_server_key_api</seealso> and
+ the <seealso marker="ssh:ssh_client_key_api">ssh_client_key_api</seealso>.
+ This enables the user to make an own interface using for example a database handler.
+ </p>
+ <p>Such another callback module could be used by setting the option
+ <seealso marker="ssh:ssh#type-key_cb_common_option"><c>key_cb</c></seealso>
+ when starting a client or a server (with for example
+ <seealso marker="ssh:ssh#connect-3">ssh:connect</seealso>,
+ <seealso marker="ssh:ssh#daemon-2">ssh:daemon</seealso> of
+ <seealso marker="ssh:ssh#shell-1">ssh:shell</seealso>
+ ).
+ </p>
+
+ <note>
+ <p>The functions are <i>Callbacks</i> for the SSH app. They are not intended to be called from the user's code!
+ </p>
+ </note>
+ </description>
+
+ <section>
+ <title>Files, directories and who uses them</title>
+ <section>
+ <title>Daemons</title>
+ <p>Daemons uses all files stored in the <seealso marker="#SYSDIR">SYSDIR</seealso> directory.
+ </p>
+ <p>Optionaly, in case of <c>publickey</c> authorization, one or more of the remote user's public keys
+ in the <seealso marker="#USERDIR">USERDIR</seealso> directory are used.
+ See the files
+ <seealso marker="#USERDIR-authorized_keys"><c>USERDIR/authorized_keys</c></seealso> and
+ <seealso marker="#USERDIR-authorized_keys2"><c>USERDIR/authorized_keys2</c></seealso>.
+ </p>
+ </section>
+
+ <section>
+ <title>Clients</title>
+ <p>Clients uses all files stored in the <seealso marker="#USERDIR">USERDIR</seealso> directory.
+ </p>
+ </section>
+
+ <section>
+ <title>Directory contents</title>
+ <taglist>
+ <tag><marker id="LOCALUSER"/>LOCALUSER</tag>
+ <item><p>The user name of the OS process running the Erlang virtual machine (emulator).</p>
+ </item>
+
+ <tag><marker id="SYSDIR"/>SYSDIR</tag>
+ <item><p>This is the directory holding the server's files:</p>
+ <list>
+ <item><marker id="SYSDIR-ssh_host_dsa_key"/><c>ssh_host_dsa_key</c> - private dss host key (optional)</item>
+ <item><marker id="SYSDIR-ssh_host_rsa_key"/><c>ssh_host_rsa_key</c> - private rsa host key (optional)</item>
+ <item><marker id="SYSDIR-ssh_host_ecdsa_key"/><c>ssh_host_ecdsa_key</c> - private ecdsa host key (optional)</item>
+ </list>
+ <p>At least one host key must be defined. The default value of SYSDIR is <marker id="#/etc/ssh"/><c>/etc/ssh</c>.
+ </p>
+ <p>For security reasons, this directory is normally accessible only to the root user.
+ </p>
+ <p>To change the SYSDIR, see the <seealso marker="#type-system_dir_daemon_option">system_dir</seealso> option.
+ </p>
+ </item>
+
+ <tag><marker id="USERDIR"/>USERDIR</tag>
+ <item><p>This is the directory holding the files:</p>
+ <list>
+ <item><marker id="USERDIR-authorized_keys"/><c>authorized_keys</c>
+ and, as second alternative
+ <marker id="USERDIR-authorized_keys2"/><c>authorized_keys2</c> -
+ the user's public keys are stored concatenated in one of those files.
+ </item>
+ <item><marker id="USERDIR-known_hosts"/><c>known_hosts</c> - host keys from hosts visited
+ concatenated. The file is created and used by the client.</item>
+ <item><marker id="USERDIR-id_dsa"/><c>id_dsa</c> - private dss user key (optional)</item>
+ <item><marker id="USERDIR-id_rsa"/><c>id_rsa</c> - private rsa user key (optional)</item>
+ <item><marker id="USERDIR-id_ecdsa"/><c>id_ecdsa</c> - private ecdsa user key (optional)</item>
+ </list>
+ <p>The default value of USERDIR is <c>/home/</c><seealso marker="#LOCALUSER"><c>LOCALUSER</c></seealso><c>/.ssh</c>.
+ </p>
+ <p>To change the USERDIR, see the <seealso marker="#type-user_dir_common_option">user_dir</seealso> option
+ </p>
+ </item>
+ </taglist>
+ </section>
+ </section>
+
+ <datatypes>
+ <datatype_title>Options for the default ssh_file callback module</datatype_title>
+ <datatype>
+ <name name="user_dir_common_option"/>
+ <desc>
+ <p>Sets the <seealso marker="#USERDIR">user directory</seealso>.</p>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="user_dir_fun_common_option"/>
+ <name name="user2dir"/>
+ <desc>
+ <p>Sets the <seealso marker="#USERDIR">user directory</seealso> dynamically
+ by evaluating the <c>user2dir</c> function.
+ </p>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="system_dir_daemon_option"/>
+ <desc>
+ <p>Sets the <seealso marker="#SYSDIR">system directory</seealso>.</p>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="pubkey_passphrase_client_options"/>
+ <desc>
+ <p>If the user's DSA, RSA or ECDSA key is protected by a passphrase, it can be
+ supplied with thoose options.
+ </p>
+ </desc>
+ </datatype>
+
+ </datatypes>
+
+ <funcs>
+ <func>
+ <name>host_key(Algorithm, DaemonOptions) -> {ok, Key} | {error, Reason}</name>
+ <fsummary></fsummary>
+ <desc>
+ <p><strong>Types and description</strong></p>
+ <p>See the api description in
+ <seealso marker="ssh:ssh_server_key_api#Module:host_key-2">ssh_server_key_api, Module:host_key/2</seealso>.
+ </p>
+ <p><strong>Options</strong></p>
+ <list>
+ <item><seealso marker="#type-system_dir_daemon_option">system_dir</seealso></item>
+ <!-- item>dsa_pass_phrase</item -->
+ <!-- item>rsa_pass_phrase</item -->
+ <!-- item>ecdsa_pass_phrase</item -->
+ </list>
+ <p><strong>Files</strong></p>
+ <list>
+ <item><seealso marker="#SYSDIR-ssh_host_rsa_key"><c>SYSDIR/ssh_host_rsa_key</c></seealso></item>
+ <item><seealso marker="#SYSDIR-ssh_host_dsa_key"><c>SYSDIR/ssh_host_dsa_key</c></seealso></item>
+ <item><seealso marker="#SYSDIR-ssh_host_ecdsa_key"><c>SYSDIR/ssh_host_ecdsa_key</c></seealso></item>
+ </list>
+ </desc>
+ </func>
+
+ <func>
+ <name>is_auth_key(PublicUserKey, User, DaemonOptions) -> Result</name>
+ <fsummary></fsummary>
+ <desc>
+ <p><strong>Types and description</strong></p>
+ <p>See the api description in
+ <seealso marker="ssh:ssh_server_key_api#Module:is_auth_key-3">ssh_server_key_api: Module:is_auth_key/3</seealso>.
+ </p>
+ <p><strong>Options</strong></p>
+ <list>
+ <item><seealso marker="#type-user_dir_fun_common_option">user_dir_fun</seealso></item>
+ <item><seealso marker="#type-user_dir_common_option">user_dir</seealso></item>
+ </list>
+ <p><strong>Files</strong></p>
+ <list>
+ <item><seealso marker="#USERDIR-authorized_keys"><c>USERDIR/authorized_keys</c></seealso></item>
+ <item><seealso marker="#USERDIR-authorized_keys2"><c>USERDIR/authorized_keys2</c></seealso></item>
+ </list>
+ </desc>
+ </func>
+
+ <func>
+ <name>add_host_key(HostNames, PublicHostKey, ConnectOptions) -> ok | {error, Reason}</name>
+ <fsummary></fsummary>
+ <desc>
+ <p><strong>Types and description</strong></p>
+ <p>See the api description in
+ <seealso marker="ssh:ssh_client_key_api#Module:add_host_key-3">ssh_client_key_api, Module:add_host_key/3</seealso>.
+ </p>
+ <p><strong>Option</strong></p>
+ <list>
+ <item><seealso marker="#type-user_dir_common_option">user_dir</seealso></item>
+ </list>
+ <p><strong>File</strong></p>
+ <list>
+ <item><seealso marker="#USERDIR-known_hosts"><c>USERDIR/known_hosts</c></seealso></item>
+ </list>
+ </desc>
+ </func>
+
+ <func>
+ <name>is_host_key(Key, Host, Algorithm, ConnectOptions) -> Result</name>
+ <fsummary></fsummary>
+ <desc>
+ <p><strong>Types and description</strong></p>
+ <p>See the api description in
+ <seealso marker="ssh:ssh_client_key_api#Module:is_host_key-4">ssh_client_key_api, Module:is_host_key/4</seealso>.
+ </p>
+ <p><strong>Option</strong></p>
+ <list>
+ <item><seealso marker="#type-user_dir_common_option">user_dir</seealso></item>
+ </list>
+ <p><strong>File</strong></p>
+ <list>
+ <item><seealso marker="#USERDIR-known_hosts"><c>USERDIR/known_hosts</c></seealso></item>
+ </list>
+ </desc>
+ </func>
+
+ <func>
+ <name>user_key(Algorithm, ConnectOptions) -> {ok, PrivateKey} | {error, Reason}</name>
+ <fsummary></fsummary>
+ <desc>
+ <p><strong>Types and description</strong></p>
+ <p>See the api description in
+ <seealso marker="ssh:ssh_client_key_api#Module:user_key-2">ssh_client_key_api, Module:user_key/2</seealso>.
+ </p>
+ <p><strong>Options</strong></p>
+ <list>
+ <item><seealso marker="#type-user_dir_common_option">user_dir</seealso></item>
+ <item><seealso marker="#type-pubkey_passphrase_client_options">dsa_pass_phrase</seealso></item>
+ <item><seealso marker="#type-pubkey_passphrase_client_options">rsa_pass_phrase</seealso></item>
+ <item><seealso marker="#type-pubkey_passphrase_client_options">ecdsa_pass_phrase</seealso></item>
+ </list>
+ <p><strong>Files</strong></p>
+ <list>
+ <item><seealso marker="#USERDIR-id_dsa"><c>USERDIR/id_dsa</c></seealso></item>
+ <item><seealso marker="#USERDIR-id_rsa"><c>USERDIR/id_rsa</c></seealso></item>
+ <item><seealso marker="#USERDIR-id_ecdsa"><c>USERDIR/id_ecdsa</c></seealso></item>
+ </list>
+ </desc>
+ </func>
+
+ </funcs>
+
+</erlref>
diff --git a/lib/ssh/doc/src/ssh_sftp.xml b/lib/ssh/doc/src/ssh_sftp.xml
index ea55126cb3..8c105147d6 100644
--- a/lib/ssh/doc/src/ssh_sftp.xml
+++ b/lib/ssh/doc/src/ssh_sftp.xml
@@ -425,7 +425,6 @@
<type>
<v>ChannelPid = pid()</v>
<v>Handle = term()</v>
- <v>Position = integer()</v>
<v>Len = integer()</v>
<v>Timeout = timeout()</v>
<v>Data = string() | binary()</v>
diff --git a/lib/ssh/doc/src/terminology.xml b/lib/ssh/doc/src/terminology.xml
new file mode 100644
index 0000000000..db1e08970d
--- /dev/null
+++ b/lib/ssh/doc/src/terminology.xml
@@ -0,0 +1,185 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2018</year>
+ <year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>Terminology</title>
+ <prepared></prepared>
+ <docno></docno>
+ <approved></approved>
+ <date></date>
+ <rev></rev>
+ <file>terminology.xml</file>
+ </header>
+
+ <section>
+ <title>General Information</title>
+ <p>In the following terms that may cause confusion are explained.
+ </p>
+ </section>
+
+ <section>
+ <title>The term "user"</title>
+ <p>A "user" is a term that everyone understands intuitively. However, the understandings may differ which can
+ cause confusion.
+ </p>
+ <p>The term is used differently in <url href="http://www.openssh.com">OpenSSH</url> and SSH in Erlang/OTP.
+ The reason is the different environments and use cases that are not immediatly obvious.
+ </p>
+ <p>This chapter aims at explaining the differences and giving a rationale for why Erlang/OTP handles "user" as
+ it does.
+ </p>
+
+ <section>
+ <title>In OpenSSH</title>
+ <p>Many have been in contact with the command 'ssh' on a Linux machine (or similar) to remotly log in on
+ another machine. One types
+ </p>
+ <code>ssh host</code>
+ <p>to log in on the machine named <c>host</c>. The command prompts for your password on the remote <c>host</c> and
+ then you can read, write and execute as your <i>user name</i> has rights on the remote <c>host</c>. There are
+ stronger variants with pre-distributed keys or certificates, but that are for now just details in the
+ authentication process.
+ </p>
+ <p>You could log in as the user <c>anotheruser</c> with
+ </p>
+ <code>ssh anotheruser@host</code>
+ <p>and you will then be enabled to act as <c>anotheruser</c> on the <c>host</c> if authorized correctly.
+ </p>
+ <p>So what does <i>"your user name has rights"</i> mean? In a UNIX/Linux/etc context it is exactly as that context:
+ The <i>user</i> could read, write and execute programs according to the OS rules.
+ In addition, the user has a home directory (<c>$HOME</c>) and there is a <c>$HOME/.ssh/</c> directory
+ with ssh-specific files.
+ </p>
+ <section>
+ <title>SSH password authentication</title>
+ <p>When SSH tries to log in to a host, the ssh protocol communicates the user name (as a string) and a password.
+ The remote ssh server checks that there is such a user defined and that the provided password is acceptable.
+ </p>
+ <p>If so, the user is authorized.
+ </p>
+ </section>
+ <section>
+ <title>SSH public key authentication</title>
+ <p>This is a stronger method where the ssh protocol brings the user name, the user's public key and some
+ cryptographic information which we could ignore here.
+ </p>
+ <p>The ssh server on the remote host checks:
+ </p>
+ <list>
+ <item>That the <i>user</i> has a home directory,</item>
+ <item>that home directory contains a .ssh/ directory and</item>
+ <item>the .ssh/ directory contains the public key just received in the <c>authorized_keys</c> file</item>
+ </list>
+ <p>if so, the user is authorized.
+ </p>
+ </section>
+ <section>
+ <title>The SSH server on UNIX/Linux/etc after a succesful authentication</title>
+ <p>After a succesful incoming authentication, a new process runs as the just authenticated user.</p>
+ <p>Next step is to start a service according to the ssh request. In case of a request of a shell,
+ a new one is started which handles the OS-commands that arrives from the client (that's "you").
+ </p>
+ <p>In case of a sftp request, an sftp server is started in with the user's rights. So it could read, write or delete
+ files if allowed for that user.
+ </p>
+ </section>
+ </section>
+
+ <section>
+ <title>In Erlang/OTP SSH</title>
+ <p>For the Erlang/OTP SSH server the situation is different. The server executes in an Erlang process
+ in the Erlang emulator which in turn executes in an OS process. The emulator does not try to change its
+ user when authenticated over the SSH protocol.
+ So the remote user name is only for authentication purposes in the Erlang/OTP SSH application.
+ </p>
+ <section>
+ <title>Password authentication in Erlang SSH</title>
+ <p>The Erlang/OTP SSH server checks the user name and password in the following order:
+ </p>
+ <list type="ordered">
+ <item>If a
+ <seealso marker="ssh:ssh#option-pwdfun"><c>pwdfun</c></seealso>
+ is defined, that one is called and the returned boolean is the authentication result.
+ </item>
+ <item>Else, if the
+ <seealso marker="ssh:ssh#option-user_passwords"><c>user_passwords</c></seealso>
+ option is defined and the username and the password matches, the authentication is a success.
+ </item>
+ <item>Else, if the option
+ <seealso marker="ssh:ssh#option-password"><c>password</c></seealso>
+ is defined and matches the password the authentication is a success.
+ Note that the use of this option is not recommended in non-test code.
+ </item>
+ </list>
+ </section>
+ <section>
+ <title>Public key authentication in Erlang SSH</title>
+ <p>The user name, public key and cryptographic data (a signature) that is sent by the client, are used as follows
+ (some steps left out for clearity):
+ </p>
+ <list type="ordered">
+ <item>A callback module is selected using the options
+ <seealso marker="ssh:ssh#type-key_cb_common_option"><c>key_cb</c></seealso>.
+ </item>
+ <item>The callback module is used to check that the provided public key is one of the user's pre-stored.
+ In case of the default callback module, the files <c>authorized_keys</c> and <c>authorized_keys2</c>
+ are searched in a directory found in the following order:
+ <list>
+ <item>If the option
+ <seealso marker="ssh:ssh_file#type-user_dir_fun_common_option"><c>user_dir_fun</c></seealso>
+ is defined, that fun is called and the returned directory is used,
+ </item>
+ <item>Else, If the option
+ <seealso marker="ssh:ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso>
+ is defined, that directory is used,
+ </item>
+ <item>Else the subdirectory <c>.ssh</c> in the home directory of the user executing
+ the OS process of the Erlang emulator is used.
+ </item>
+ </list>
+ If the provided public key is not found, the authentication fails.
+ </item>
+ <item>Finally, if the provided public key is found, the signature provided by the client is checked with
+ the public key.
+ </item>
+ </list>
+ </section>
+ <section>
+ <title>The Erlang/OTP SSH server after a succesful authentication</title>
+ <p>After a successful authentication an <i>Erlang process</i> is handling the service request from the remote
+ ssh client. The rights of that process are those of the user of the OS process running the Erlang emulator.
+ </p>
+ <p>If a shell service request arrives to the server, an <i>Erlang shell</i> is opened in the server's emulator.
+ The rights in that shell is independent of the just authenticated user.
+ </p>
+ <p>In case of an sftp request, an sftp server is started with the rights of the user of the Erlang emulator's OS
+ process. So with sftp the authenticated user does not influence the rights.
+ </p>
+ <p>So after an authentication, the user name is not used anymore and has no influence.
+ </p>
+ </section>
+ </section>
+ </section>
+</chapter>
+
diff --git a/lib/ssh/doc/src/usersguide.xml b/lib/ssh/doc/src/usersguide.xml
index 38ffa48cde..8a4df208d8 100644
--- a/lib/ssh/doc/src/usersguide.xml
+++ b/lib/ssh/doc/src/usersguide.xml
@@ -36,5 +36,6 @@
</description>
<xi:include href="introduction.xml"/>
<xi:include href="using_ssh.xml"/>
+ <xi:include href="terminology.xml"/>
<xi:include href="configure_algos.xml"/>
</part>
diff --git a/lib/ssh/doc/src/using_ssh.xml b/lib/ssh/doc/src/using_ssh.xml
index 80662e9a70..4455d5ecc5 100644
--- a/lib/ssh/doc/src/using_ssh.xml
+++ b/lib/ssh/doc/src/using_ssh.xml
@@ -74,16 +74,17 @@
<marker id="Running an Erlang ssh Daemon"></marker>
<title>Running an Erlang ssh Daemon</title>
- <p>The <c>system_dir</c> option must be a directory containing a host
- key file and it defaults to <c>/etc/ssh</c>. For details, see Section
- Configuration Files in <seealso
- marker="SSH_app">ssh(6)</seealso>.
+ <p>The
+ <seealso marker="ssh_file#type-system_dir_daemon_option"><c>system_dir</c></seealso>
+ option must be a directory containing a host key file and it defaults to <c>/etc/ssh</c>.
+ For details, see Section Configuration Files in <seealso marker="SSH_app">ssh(6)</seealso>.
</p>
<note><p>Normally, the <c>/etc/ssh</c> directory is only readable by root.</p>
</note>
- <p>The option <c>user_dir</c> defaults to directory <c>users ~/.ssh</c>.</p>
+ <p>The option <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso>
+ defaults to directory <c>users ~/.ssh</c>.</p>
<p><em>Step 1.</em> To run the example without root privileges,
generate new keys and host keys:</p>
diff --git a/lib/ssh/src/ssh.hrl b/lib/ssh/src/ssh.hrl
index 94b9f3a196..f645201c4f 100644
--- a/lib/ssh/src/ssh.hrl
+++ b/lib/ssh/src/ssh.hrl
@@ -173,7 +173,7 @@
-type common_options() :: [ common_option() ].
-type common_option() ::
- user_dir_common_option()
+ ssh_file:user_dir_common_option()
| profile_common_option()
| max_idle_time_common_option()
| key_cb_common_option()
@@ -182,6 +182,7 @@
| ssh_msg_debug_fun_common_option()
| rekey_limit_common_option()
| id_string_common_option()
+ | pref_public_key_algs_common_option()
| preferred_algorithms_common_option()
| modify_algorithms_common_option()
| auth_methods_common_option()
@@ -191,8 +192,6 @@
-define(COMMON_OPTION, common_option()).
-
--type user_dir_common_option() :: {user_dir, false | string()}.
-type profile_common_option() :: {profile, atom() }.
-type max_idle_time_common_option() :: {idle_time, timeout()}.
-type rekey_limit_common_option() :: {rekey_limit, Bytes::limit_bytes() |
@@ -211,6 +210,7 @@
{ssh_msg_debug_fun, fun((ssh:connection_ref(),AlwaysDisplay::boolean(),Msg::binary(),LanguageTag::binary()) -> any()) } .
-type id_string_common_option() :: {id_string, string() | random | {random,Nmin::pos_integer(),Nmax::pos_integer()} }.
+-type pref_public_key_algs_common_option() :: {pref_public_key_algs, [pubkey_alg()] } .
-type preferred_algorithms_common_option():: {preferred_algorithms, algs_list()}.
-type modify_algorithms_common_option() :: {modify_algorithms, modify_algs_list()}.
-type auth_methods_common_option() :: {auth_methods, string() }.
@@ -223,14 +223,13 @@
{transport, {atom(),atom(),atom()} }
| {vsn, {non_neg_integer(),non_neg_integer()} }
| {tstflg, list(term())}
- | {user_dir_fun, fun()}
+ | ssh_file:user_dir_fun_common_option()
| {max_random_length_padding, non_neg_integer()} .
-type client_option() ::
- pref_public_key_algs_client_option()
- | pubkey_passphrase_client_options()
+ ssh_file:pubkey_passphrase_client_options()
| host_accepting_client_options()
| authentication_client_options()
| diffie_hellman_group_exchange_client_option()
@@ -244,12 +243,6 @@
{keyboard_interact_fun, fun((term(),term(),term()) -> term())}
| opaque_common_options().
--type pref_public_key_algs_client_option() :: {pref_public_key_algs, [pubkey_alg()] } .
-
--type pubkey_passphrase_client_options() :: {dsa_pass_phrase, string()}
- | {rsa_pass_phrase, string()}
- | {ecdsa_pass_phrase, string()} .
-
-type host_accepting_client_options() ::
{silently_accept_hosts, accept_hosts()}
| {user_interaction, boolean()}
@@ -299,8 +292,9 @@
-type 'shell_fun/1'() :: fun((User::string()) -> pid()) .
-type 'shell_fun/2'() :: fun((User::string(), PeerAddr::inet:ip_address()) -> pid()).
--type exec_daemon_option() :: {exec, 'exec_fun/1'() | 'exec_fun/2'() | 'exec_fun/3'() }.
-
+-type exec_daemon_option() :: {exec, exec_spec()} .
+-type exec_spec() :: {direct, exec_fun()} .
+-type exec_fun() :: 'exec_fun/1'() | 'exec_fun/2'() | 'exec_fun/3'().
-type 'exec_fun/1'() :: fun((Cmd::string()) -> exec_result()) .
-type 'exec_fun/2'() :: fun((Cmd::string(), User::string()) -> exec_result()) .
-type 'exec_fun/3'() :: fun((Cmd::string(), User::string(), ClientAddr::ip_port()) -> exec_result()) .
@@ -311,7 +305,7 @@
-type send_ext_info_daemon_option() :: {send_ext_info, boolean()} .
-type authentication_daemon_options() ::
- {system_dir, string()}
+ ssh_file:system_dir_daemon_option()
| {auth_method_kb_interactive_data, prompt_texts() }
| {user_passwords, [{UserName::string(),Pwd::string()}]}
| {password, string()}
diff --git a/lib/ssh/src/ssh_connection_handler.erl b/lib/ssh/src/ssh_connection_handler.erl
index 4b41c10cbb..30eafc2f2a 100644
--- a/lib/ssh/src/ssh_connection_handler.erl
+++ b/lib/ssh/src/ssh_connection_handler.erl
@@ -983,6 +983,10 @@ handle_event(_, #ssh_msg_userauth_info_request{}, {userauth_keyboard_interactive
%%% ######## {connected, client|server} ####
+%% Skip ext_info messages in connected state (for example from OpenSSH >= 7.7)
+handle_event(_, #ssh_msg_ext_info{}, {connected,_Role}, D) ->
+ {keep_state, D};
+
handle_event(_, {#ssh_msg_kexinit{},_}, {connected,Role}, D0) ->
{KeyInitMsg, SshPacket, Ssh} = ssh_transport:key_exchange_init_msg(D0#data.ssh_params),
D = D0#data{ssh_params = Ssh,
diff --git a/lib/ssh/src/ssh_file.erl b/lib/ssh/src/ssh_file.erl
index 832952ed52..669b0f9be2 100644
--- a/lib/ssh/src/ssh_file.erl
+++ b/lib/ssh/src/ssh_file.erl
@@ -39,6 +39,23 @@
is_auth_key/3]).
+-export_type([system_dir_daemon_option/0,
+ user_dir_common_option/0,
+ user_dir_fun_common_option/0,
+ pubkey_passphrase_client_options/0
+ ]).
+
+-type system_dir_daemon_option() :: {system_dir, string()}.
+-type user_dir_common_option() :: {user_dir, string()}.
+-type user_dir_fun_common_option() :: {user_dir_fun, user2dir()}.
+-type user2dir() :: fun((RemoteUserName::string()) -> UserDir :: string()) .
+
+-type pubkey_passphrase_client_options() :: {dsa_pass_phrase, string()}
+ | {rsa_pass_phrase, string()}
+ | {ecdsa_pass_phrase, string()} .
+
+
+
-define(PERM_700, 8#700).
-define(PERM_644, 8#644).
diff --git a/lib/ssh/src/ssh_options.erl b/lib/ssh/src/ssh_options.erl
index bc9f2156bc..bc9b0b6eda 100644
--- a/lib/ssh/src/ssh_options.erl
+++ b/lib/ssh/src/ssh_options.erl
@@ -452,12 +452,6 @@ default(client) ->
class => user_options
},
- {pref_public_key_algs, def} =>
- #{default => ssh_transport:default_algorithms(public_key),
- chk => fun check_pref_public_key_algs/1,
- class => user_options
- },
-
{dh_gex_limits, def} =>
#{default => {1024, 6144, 8192}, % FIXME: Is this true nowadays?
chk => fun({Min,I,Max}) ->
@@ -523,6 +517,12 @@ default(common) ->
class => user_options
},
+ {pref_public_key_algs, def} =>
+ #{default => ssh_transport:default_algorithms(public_key),
+ chk => fun check_pref_public_key_algs/1,
+ class => user_options
+ },
+
{preferred_algorithms, def} =>
#{default => ssh:default_algorithms(),
chk => fun check_preferred_algorithms/1,
diff --git a/lib/ssh/src/ssh_transport.erl b/lib/ssh/src/ssh_transport.erl
index c5b0704925..7424c9bcaf 100644
--- a/lib/ssh/src/ssh_transport.erl
+++ b/lib/ssh/src/ssh_transport.erl
@@ -759,8 +759,7 @@ ext_info_message(#ssh{role=server,
send_ext_info=true,
opts = Opts} = Ssh0) ->
AlgsList = lists:map(fun erlang:atom_to_list/1,
- proplists:get_value(public_key,
- ?GET_OPT(preferred_algorithms, Opts))),
+ ?GET_OPT(pref_public_key_algs, Opts)),
Msg = #ssh_msg_ext_info{nr_extensions = 1,
data = [{"server-sig-algs", string:join(AlgsList,",")}]
},
diff --git a/lib/ssh/test/.gitignore b/lib/ssh/test/.gitignore
new file mode 100644
index 0000000000..c9d5f086b3
--- /dev/null
+++ b/lib/ssh/test/.gitignore
@@ -0,0 +1,5 @@
+
+
+property_test/ssh_eqc_client_server_dirs/system
+property_test/ssh_eqc_client_server_dirs/user
+
diff --git a/lib/ssh/test/property_test/ssh_eqc_client_info_timing.erl b/lib/ssh/test/property_test/ssh_eqc_client_info_timing.erl
index 6d0d8f5d99..f4b521356f 100644
--- a/lib/ssh/test/property_test/ssh_eqc_client_info_timing.erl
+++ b/lib/ssh/test/property_test/ssh_eqc_client_info_timing.erl
@@ -58,6 +58,7 @@
%%% Properties:
prop_seq(Config) ->
+ error_logger:tty(false),
{ok,Pid} = ssh_eqc_event_handler:add_report_handler(),
{_, _, Port} = init_daemon(Config),
numtests(1000,
@@ -66,16 +67,25 @@ prop_seq(Config) ->
send_bad_sequence(Port, Delay, Pid),
not any_relevant_error_report(Pid)
catch
- C:E -> io:format('~p:~p~n',[C,E]),
+ C:E:S -> ct:log("~p:~p~n~p",[C,E,S]),
false
end
)).
send_bad_sequence(Port, Delay, Pid) ->
- {ok,S} = gen_tcp:connect("localhost",Port,[]),
- gen_tcp:send(S,"Illegal info-string\r\n"),
- ssh_test_lib:sleep_microsec(Delay),
- gen_tcp:close(S).
+ send_bad_sequence(Port, Delay, Pid, 10).
+
+send_bad_sequence(Port, Delay, Pid, N) ->
+ case gen_tcp:connect("localhost",Port,[]) of
+ {ok,S} ->
+ gen_tcp:send(S,"Illegal info-string\r\n"),
+ ssh_test_lib:sleep_microsec(Delay),
+ gen_tcp:close(S);
+
+ {error,econnreset} when N>0 ->
+ timer:sleep(1),
+ send_bad_sequence(Port, Delay, Pid, N-1)
+ end.
any_relevant_error_report(Pid) ->
{ok, Reports} = ssh_eqc_event_handler:get_reports(Pid),
diff --git a/lib/ssh/test/property_test/ssh_eqc_client_server.erl b/lib/ssh/test/property_test/ssh_eqc_client_server.erl
index 39d0b4e410..acb0faa0c7 100644
--- a/lib/ssh/test/property_test/ssh_eqc_client_server.erl
+++ b/lib/ssh/test/property_test/ssh_eqc_client_server.erl
@@ -22,25 +22,27 @@
-module(ssh_eqc_client_server).
-compile(export_all).
+
+-proptest([proper]).
--include_lib("common_test/include/ct.hrl").
-
--ifdef(PROPER).
-%% Proper is not supported.
--else.
--ifdef(TRIQ).
-%% Proper is not supported.
+-ifndef(PROPER).
-else.
+%% Only use proper
+%%
+%% Previously only EQC was supported, but the changes to support PROPER is not
+%% just a wrapper. Since we do not have access to eqc we can't test the changes
+%% so therefore eqc is disabeled.
+%% However, with access to eqc it ought to be quite easy to re-enable eqc by
+%% studying the diff.
+-include_lib("proper/include/proper.hrl").
+-define(MOD_eqc,proper).
+
+-include_lib("common_test/include/ct.hrl").
%% Limit the testing time on CI server... this needs to be improved in % from total budget.
-define(TESTINGTIME(Prop), eqc:testing_time(30,Prop)).
-
--include_lib("eqc/include/eqc.hrl").
--include_lib("eqc/include/eqc_statem.hrl").
--eqc_group_commands(true).
-
-define(SSH_DIR,"ssh_eqc_client_server_dirs").
-define(sec, *1000).
@@ -51,10 +53,6 @@
port
}).
--record(conn,{ref,
- srvr_ref
- }).
-
-record(chan, {ref,
conn_ref,
subsystem,
@@ -65,7 +63,7 @@
initialized = false,
servers = [], % [#srvr{}]
clients = [],
- connections = [], % [#conn{}]
+ connections = [],
channels = [], % [#chan{}]
data_dir
}).
@@ -80,9 +78,8 @@
-define(SUBSYSTEMS, ["echo1", "echo2", "echo3", "echo4"]).
--define(SERVER_ADDRESS, { {127,1,0,choose(1,254)}, % IP
- choose(1024,65535) % Port
- }).
+-define(SERVER_ADDRESS, {127,0,0,1}). % Server listening IP. Darwin, Solaris & FreeBSD
+ % dislikes all other in 127.0.0.0/24
-define(SERVER_EXTRA_OPTIONS, [{parallel_login,bool()}] ).
@@ -104,10 +101,12 @@
%% To be called as eqc:quickcheck( ssh_eqc_client_server:prop_seq() ).
prop_seq() ->
- ?TESTINGTIME(do_prop_seq(?SSH_DIR)).
+ error_logger:tty(false),
+ ?TESTINGTIME(do_prop_seq(?SSH_DIR)).
%% To be called from a common_test test suite
prop_seq(CT_Config) ->
+ error_logger:tty(false),
do_prop_seq(full_path(?SSH_DIR, CT_Config)).
@@ -124,10 +123,12 @@ full_path(SSHdir, CT_Config) ->
SSHdir).
%%%----
prop_parallel() ->
+ error_logger:tty(false),
?TESTINGTIME(do_prop_parallel(?SSH_DIR)).
%% To be called from a common_test test suite
prop_parallel(CT_Config) ->
+ error_logger:tty(false),
do_prop_parallel(full_path(?SSH_DIR, CT_Config)).
do_prop_parallel(DataDir) ->
@@ -139,22 +140,22 @@ do_prop_parallel(DataDir) ->
end).
%%%----
-prop_parallel_multi() ->
- ?TESTINGTIME(do_prop_parallel_multi(?SSH_DIR)).
-
-%% To be called from a common_test test suite
-prop_parallel_multi(CT_Config) ->
- do_prop_parallel_multi(full_path(?SSH_DIR, CT_Config)).
-
-do_prop_parallel_multi(DataDir) ->
- setup_rsa(DataDir),
- ?FORALL(Repetitions,?SHRINK(1,[10]),
- ?FORALL(Cmds,parallel_commands(?MODULE),
- ?ALWAYS(Repetitions,
- begin
- {H,Sf,Result} = run_parallel_commands(?MODULE,Cmds,[{data_dir,DataDir}]),
- present_result(?MODULE, Cmds, {H,Sf,Result}, Result==ok)
- end))).
+%% prop_parallel_multi() ->
+%% ?TESTINGTIME(do_prop_parallel_multi(?SSH_DIR)).
+
+%% %% To be called from a common_test test suite
+%% prop_parallel_multi(CT_Config) ->
+%% do_prop_parallel_multi(full_path(?SSH_DIR, CT_Config)).
+
+%% do_prop_parallel_multi(DataDir) ->
+%% setup_rsa(DataDir),
+%% ?FORALL(Repetitions,?SHRINK(1,[10]),
+%% ?FORALL(Cmds,parallel_commands(?MODULE),
+%% ?ALWAYS(Repetitions,
+%% begin
+%% {H,Sf,Result} = run_parallel_commands(?MODULE,Cmds,[{data_dir,DataDir}]),
+%% present_result(?MODULE, Cmds, {H,Sf,Result}, Result==ok)
+%% end))).
%%%================================================================
%%% State machine spec
@@ -169,13 +170,50 @@ initial_state(DataDir) ->
ssh:start().
%%%----------------
-weight(S, ssh_send) -> 5*length([C || C<-S#state.channels, has_subsyst(C)]);
-weight(S, ssh_start_subsyst) -> 3*length([C || C<-S#state.channels, no_subsyst(C)]);
+weight(S, ssh_send) -> 20*length([C || C<-S#state.channels, has_subsyst(C)]);
+weight(S, ssh_start_subsyst) -> 10*length([C || C<-S#state.channels, no_subsyst(C)]);
weight(S, ssh_close_channel) -> 2*length([C || C<-S#state.channels, has_subsyst(C)]);
-weight(S, ssh_open_channel) -> length(S#state.connections);
+weight(S, ssh_open_channel) -> 2*length(S#state.connections);
weight(_S, _) -> 1.
%%%----------------
+fns() -> [initial_state,
+ ssh_server,
+ ssh_client,
+ ssh_open_connection,
+ ssh_close_connection,
+ ssh_open_channel,
+ ssh_close_channel,
+ ssh_start_subsyst,
+ ssh_send
+ ].
+
+call_f(Name, Sfx) ->
+ case get({Name,Sfx}) of
+ undefined -> F = list_to_atom(lists:concat([Name,"_",Sfx])),
+ put({Name,Sfx}, F),
+ F;
+ F when is_atom(F) -> F
+ end.
+
+-define(call(Name, What, Args), apply(?MODULE, call_f(Name,What), Args)).
+
+symbolic_call(S,Name) -> {call, ?MODULE, Name, ?call(Name,args,[S])}.
+
+may_generate(S, F) -> ?call(F,pre,[S]).
+
+command(S) ->
+ frequency([{weight(S,F), symbolic_call(S,F)} || F <- fns(),
+ may_generate(S, F)]
+ ).
+
+precondition(S, {call,_M,F,As}) -> try ?call(F, pre, [S,As])
+ catch _:undef -> try ?call(F,pre,[S]) catch _:undef -> true end
+ end.
+next_state(S, Res, {call,_M,F,As}) -> try ?call(F, next, [S,Res,As]) catch _:undef -> S end.
+postcondition(S, {call,_M,F,As}, Res) -> try ?call(F, post, [S,As,Res]) catch _:undef -> true end.
+
+%%%----------------
%%% Initialize
initial_state_pre(S) -> not S#state.initialized.
@@ -200,24 +238,34 @@ ssh_server_pre(S) -> S#state.initialized andalso
ssh_server_args(_) -> [?SERVER_ADDRESS, {var,data_dir}, ?SERVER_EXTRA_OPTIONS].
-ssh_server({IP,Port}, DataDir, ExtraOptions) ->
- ok(ssh:daemon(IP, Port,
- [
- {system_dir, system_dir(DataDir)},
- {user_dir, user_dir(DataDir)},
- {subsystems, [{SS, {ssh_eqc_subsys, [SS]}} || SS <- ?SUBSYSTEMS]}
- | ExtraOptions
- ])).
-
-ssh_server_post(_S, _Args, {error,eaddrinuse}) -> true;
-ssh_server_post(_S, _Args, Result) -> is_ok(Result).
-
-ssh_server_next(S, {error,eaddrinuse}, _) -> S;
-ssh_server_next(S, Result, [{IP,Port},_,_]) ->
- S#state{servers=[#srvr{ref = Result,
- address = IP,
- port = Port}
- | S#state.servers]}.
+ssh_server(IP0, DataDir, ExtraOptions) ->
+ case ssh:daemon(IP0, 0,
+ [
+ {system_dir, system_dir(DataDir)},
+ {user_dir, user_dir(DataDir)},
+ {subsystems, [{SS, {ssh_eqc_subsys, [SS]}} || SS <- ?SUBSYSTEMS]}
+ | ExtraOptions
+ ]) of
+ {ok,DaemonRef} ->
+ case ssh:daemon_info(DaemonRef) of
+ {ok, Props} ->
+ Port = proplists:get_value(port,Props),
+ IP = proplists:get_value(ip,Props),
+ #srvr{ref = DaemonRef,
+ address = IP,
+ port = Port};
+ Other ->
+ Other
+ end;
+ Other ->
+ Other
+ end.
+
+ssh_server_post(_S, _Args, #srvr{port=Port}) -> (0 < Port) andalso (Port < 65536);
+ssh_server_post(_S, _Args, _) -> false.
+
+ssh_server_next(S, Srvr, _) ->
+ S#state{servers=[Srvr | S#state.servers]}.
%%%----------------
%%% Start a new client
@@ -271,8 +319,7 @@ ssh_open_connection(#srvr{address=Ip, port=Port}, DataDir) ->
ssh_open_connection_post(_S, _Args, Result) -> is_ok(Result).
-ssh_open_connection_next(S, ConnRef, [#srvr{ref=SrvrRef},_]) ->
- S#state{connections=[#conn{ref=ConnRef, srvr_ref=SrvrRef}|S#state.connections]}.
+ssh_open_connection_next(S, ConnRef, [_,_]) -> S#state{connections=[ConnRef|S#state.connections]}.
%%%----------------
%%% Stop a new connection
@@ -282,12 +329,12 @@ ssh_close_connection_pre(S) -> S#state.connections /= [].
ssh_close_connection_args(S) -> [oneof(S#state.connections)].
-ssh_close_connection(#conn{ref=ConnectionRef}) -> ssh:close(ConnectionRef).
+ssh_close_connection(ConnectionRef) -> ssh:close(ConnectionRef).
-ssh_close_connection_next(S, _, [Conn=#conn{ref=ConnRef}]) ->
- S#state{connections = S#state.connections--[Conn],
- channels = [C || C <- S#state.channels,
- C#chan.conn_ref /= ConnRef]
+ssh_close_connection_next(S, _, [ConnRef]) ->
+ S#state{connections = S#state.connections--[ConnRef],
+ channels = [C || C <- S#state.channels,
+ C#chan.conn_ref /= ConnRef]
}.
%%%----------------
@@ -299,14 +346,14 @@ ssh_open_channel_pre(S) -> S#state.connections /= [].
ssh_open_channel_args(S) -> [oneof(S#state.connections)].
%%% For re-arrangement in parallel tests.
-ssh_open_channel_pre(S,[C]) -> lists:member(C,S#state.connections).
+ssh_open_channel_pre(S,[C]) when is_record(S,state) -> lists:member(C,S#state.connections).
-ssh_open_channel(#conn{ref=ConnectionRef}) ->
+ssh_open_channel(ConnectionRef) ->
ok(ssh_connection:session_channel(ConnectionRef, 20?sec)).
ssh_open_channel_post(_S, _Args, Result) -> is_ok(Result).
-ssh_open_channel_next(S, ChannelRef, [#conn{ref=ConnRef}]) ->
+ssh_open_channel_next(S, ChannelRef, [ConnRef]) ->
S#state{channels=[#chan{ref=ChannelRef,
conn_ref=ConnRef}
| S#state.channels]}.
@@ -326,9 +373,7 @@ ssh_close_channel_next(S, _, [C]) ->
S#state{channels = [Ci || Ci <- S#state.channels,
sig(C) /= sig(Ci)]}.
-
sig(C) -> {C#chan.ref, C#chan.conn_ref}.
-
%%%----------------
%%% Start a sub system on a channel
@@ -361,9 +406,10 @@ ssh_start_subsyst_next(S, _Result, [C,SS,Pid|_]) ->
ssh_send_pre(S) -> lists:any(fun has_subsyst/1, S#state.channels).
-ssh_send_args(S) -> [oneof(lists:filter(fun has_subsyst/1, S#state.channels)),
- choose(0,1),
- message()].
+ssh_send_args(S) ->
+ [oneof(lists:filter(fun has_subsyst/1, S#state.channels)),
+ choose(0,1),
+ message()].
%% For re-arrangement in parallel tests.
ssh_send_pre(S, [C|_]) -> lists:member(C, S#state.channels).
@@ -388,17 +434,17 @@ ssh_send(C=#chan{conn_ref=ConnectionRef, ref=ChannelRef, client_pid=Pid}, Type,
end).
ssh_send_blocking(_S, _Args) ->
- true.
+ true.
ssh_send_post(_S, [C,_,Msg], Response) when is_binary(Response) ->
- Expected = ssh_eqc_subsys:response(modify_msg(C,Msg), C#chan.subsystem),
+ Expected = ssh_eqc_subsys:response(modify_msg(C,Msg), C#chan.subsystem),
case Response of
Expected -> true;
_ -> {send_failed, size(Response), size(Expected)}
end;
ssh_send_post(_S, _Args, Response) ->
- {error,Response}.
+ {error,Response}.
modify_msg(_, <<>>) -> <<>>;
@@ -440,7 +486,11 @@ present_result(_Module, Cmds, _Triple, true) ->
true)))));
present_result(Module, Cmds, Triple, false) ->
- pretty_commands(Module, Cmds, Triple, [{show_states,true}], false).
+ pretty_comands(Module, Cmds, Triple, [{show_states,true}], false),
+ false. % Proper dislikes non-boolean results while eqc treats non-true as false.
+
+pretty_comands(Module, Cmds, Triple, Opts, Bool) ->
+ ct:log("Module = ~p,~n Cmds = ~p,~n Triple = ~p,~n Opts = ~p,~n Bool = ~p",[Module, Cmds, Triple, Opts, Bool]).
@@ -476,23 +526,35 @@ traverse_commands(Fseq, Fpar, {Seq, ParLs}) -> lists:append([Fseq(Seq)|Fpar(ParL
print_frequencies() -> print_frequencies(10).
print_frequencies(Ngroups) -> fun([]) -> io:format('Empty list!~n',[]);
- (L ) -> print_frequencies(L,Ngroups,0,element(1,lists:last(L)))
+ (L ) ->
+ try
+ M = lists:last(L),
+ Max = if is_integer(M) -> M;
+ is_tuple(M) -> element(1,L)
+ end,
+ print_frequencies(L,Ngroups,0,Max)
+ catch
+ C:E:S ->
+ ct:pal("~p:~p ~p:~p~n~p~n~p",[?MODULE,?LINE,C,E,S,L])
+ end
end.
+
print_frequencies(Ngroups, MaxValue) -> fun(L) -> print_frequencies(L,Ngroups,0,MaxValue) end.
print_frequencies(L, N, Min, Max) when N>Max -> print_frequencies(L++[{N,0}], N, Min, N);
-print_frequencies(L, N, Min, Max) ->
-%%io:format('L=~p~n',[L]),
+print_frequencies(L, N, Min, Max0) ->
try
+ Interval = round((Max0-Min)/N),
+ Max = Max0 + (Max0 rem Interval),
IntervalUpperLimits =
lists:reverse(
- [Max | tl(lists:reverse(lists:seq(Min,Max,round((Max-Min)/N))))]
+ [Max | tl(lists:reverse(lists:seq(Min,Max,Interval)))]
),
{Acc0,_} = lists:mapfoldl(fun(Upper,Lower) ->
{{{Lower,Upper},0}, Upper+1}
end, hd(IntervalUpperLimits), tl(IntervalUpperLimits)),
- Fs0 = get_frequencies(L, Acc0),
+ Fs0 = get_frequencies(L, Acc0),
SumVal = lists:sum([V||{_,V}<-Fs0]),
Fs = with_percentage(Fs0, SumVal),
Mean = mean(L),
@@ -517,7 +579,6 @@ print_frequencies(L, N, Min, Max) ->
|| {Interval={Rlow,Rhigh},Val,Percent} <- Fs],
io:format('~*c ~*c~n',[2*Npos_range,32,Npos_value+2,$-]),
io:format('~*c ~*w~n',[2*Npos_range,32,Npos_value,SumVal])
- %%,io:format('L=~p~n',[L])
catch
C:E ->
io:format('*** Faild printing (~p:~p) for~n~p~n',[C,E,L])
@@ -527,6 +588,8 @@ get_frequencies([{I,Num}|T], [{{Lower,Upper},Cnt}|Acc]) when Lower=<I,I=<Upper -
get_frequencies(T, [{{Lower,Upper},Cnt+Num}|Acc]);
get_frequencies(L=[{I,_Num}|_], [Ah={{_Lower,Upper},_Cnt}|Acc]) when I>Upper ->
[Ah | get_frequencies(L,Acc)];
+get_frequencies([I|T], Acc) when is_integer(I) ->
+ get_frequencies([{I,1}|T], Acc);
get_frequencies([], Acc) ->
Acc.
@@ -616,4 +679,3 @@ erase_dir(Dir) ->
file:del_dir(Dir).
-endif.
--endif.
diff --git a/lib/ssh/test/ssh_compat_SUITE.erl b/lib/ssh/test/ssh_compat_SUITE.erl
index 1c607bebe8..af85ef7aee 100644
--- a/lib/ssh/test/ssh_compat_SUITE.erl
+++ b/lib/ssh/test/ssh_compat_SUITE.erl
@@ -648,6 +648,7 @@ setup_remote_priv_and_local_auth_keys(KeyAlg, IP, Port, UserDir, Config) ->
{silently_accept_hosts,true},
{user_interaction,false}
]),
+ rm_id_in_remote_dir(Ch, ".ssh"),
_ = ssh_sftp:make_dir(Ch, ".ssh"),
DstFile = filename:join(".ssh", dst_filename(user,KeyAlg)),
ok = ssh_sftp:write_file(Ch, DstFile, Priv),
@@ -658,6 +659,18 @@ setup_remote_priv_and_local_auth_keys(KeyAlg, IP, Port, UserDir, Config) ->
ok = ssh:close(Cc),
UserDir.
+rm_id_in_remote_dir(Ch, Dir) ->
+ case ssh_sftp:list_dir(Ch, Dir) of
+ {error,_Error} ->
+ ok;
+ {ok,FileNames} ->
+ lists:foreach(fun("id_"++_ = F) ->
+ ok = ssh_sftp:delete(Ch, filename:join(Dir,F));
+ (_) ->
+ leave
+ end, FileNames)
+ end.
+
user_priv_pub_keys(Config, KeyAlg) -> priv_pub_keys("users_keys", user, Config, KeyAlg).
host_priv_pub_keys(Config, KeyAlg) -> priv_pub_keys("host_keys", host, Config, KeyAlg).
diff --git a/lib/ssh/test/ssh_compat_SUITE_data/build_scripts/create_all b/lib/ssh/test/ssh_compat_SUITE_data/build_scripts/create_all
index 0dcf8cb570..c2e77fcc79 100755
--- a/lib/ssh/test/ssh_compat_SUITE_data/build_scripts/create_all
+++ b/lib/ssh/test/ssh_compat_SUITE_data/build_scripts/create_all
@@ -18,6 +18,12 @@ SSH_SSL_VERSIONS=(\
openssh 7.6p1 openssl 1.0.2n \
\
openssh 7.6p1 libressl 2.6.4 \
+ \
+ openssh 7.7p1 openssl 1.0.2p \
+ openssh 7.8p1 openssl 1.0.2p \
+ openssh 7.9p1 openssl 1.0.2p \
+ \
+ openssh 7.9p1 libressl 2.6.4 \
)
if [ "x$1" == "x-b" ]
diff --git a/lib/ssh/test/ssh_property_test_SUITE.erl b/lib/ssh/test/ssh_property_test_SUITE.erl
index 3318b86d39..9aaac898a0 100644
--- a/lib/ssh/test/ssh_property_test_SUITE.erl
+++ b/lib/ssh/test/ssh_property_test_SUITE.erl
@@ -46,8 +46,9 @@ groups() ->
[{messages, [], [decode,
decode_encode]},
{client_server, [], [client_server_sequential,
- client_server_parallel,
- client_server_parallel_multi]}
+ client_server_parallel
+ %% client_server_parallel_multi
+ ]}
].
@@ -62,7 +63,7 @@ end_per_suite(Config) ->
%%% if we run proper.
init_per_group(client_server, Config) ->
case proplists:get_value(property_test_tool,Config) of
- eqc -> Config;
+ proper -> Config;
X -> {skip, lists:concat([X," is not supported"])}
end;
init_per_group(_, Config) ->
diff --git a/lib/ssl/doc/src/notes.xml b/lib/ssl/doc/src/notes.xml
index 673431ed0a..46fd8ab180 100644
--- a/lib/ssl/doc/src/notes.xml
+++ b/lib/ssl/doc/src/notes.xml
@@ -333,6 +333,38 @@
</section>
+<section><title>SSL 8.2.6.4</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Add engine support for RSA key exchange</p>
+ <p>
+ Own Id: OTP-15420</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>SSL 8.2.6.3</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Extend check for undelivered data at closing, could under
+ some circumstances fail to deliverd all data that was
+ acctualy recivied.</p>
+ <p>
+ Own Id: OTP-15412</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>SSL 8.2.6.2</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -3163,5 +3195,3 @@
</section>
</section>
</chapter>
-
-
diff --git a/lib/ssl/src/dtls_record.erl b/lib/ssl/src/dtls_record.erl
index 9eb0d8e2d7..b7346d3ec8 100644
--- a/lib/ssl/src/dtls_record.erl
+++ b/lib/ssl/src/dtls_record.erl
@@ -499,23 +499,22 @@ encode_dtls_cipher_text(Type, {MajVer, MinVer}, Fragment,
WriteState#{sequence_number => Seq + 1}}.
encode_plain_text(Type, Version, Data, #{compression_state := CompS0,
+ cipher_state := CipherS0,
epoch := Epoch,
sequence_number := Seq,
- cipher_state := CipherS0,
security_parameters :=
#security_parameters{
cipher_type = ?AEAD,
- bulk_cipher_algorithm =
- BulkCipherAlgo,
+ bulk_cipher_algorithm = BCAlg,
compression_algorithm = CompAlg}
} = WriteState0) ->
{Comp, CompS1} = ssl_record:compress(CompAlg, Data, CompS0),
- AAD = calc_aad(Type, Version, Epoch, Seq),
+ AAD = start_additional_data(Type, Version, Epoch, Seq),
+ CipherS = ssl_record:nonce_seed(BCAlg, <<?UINT16(Epoch), ?UINT48(Seq)>>, CipherS0),
+ WriteState = WriteState0#{compression_state => CompS1,
+ cipher_state => CipherS},
TLSVersion = dtls_v1:corresponding_tls_version(Version),
- {CipherFragment, CipherS1} =
- ssl_cipher:cipher_aead(BulkCipherAlgo, CipherS0, Seq, AAD, Comp, TLSVersion),
- {CipherFragment, WriteState0#{compression_state => CompS1,
- cipher_state => CipherS1}};
+ ssl_record:cipher_aead(TLSVersion, Comp, WriteState, AAD);
encode_plain_text(Type, Version, Fragment, #{compression_state := CompS0,
epoch := Epoch,
sequence_number := Seq,
@@ -547,9 +546,10 @@ decode_cipher_text(#ssl_tls{type = Type, version = Version,
BulkCipherAlgo,
compression_algorithm = CompAlg}} = ReadState0,
ConnnectionStates0) ->
- AAD = calc_aad(Type, Version, Epoch, Seq),
+ AAD = start_additional_data(Type, Version, Epoch, Seq),
+ CipherS1 = ssl_record:nonce_seed(BulkCipherAlgo, <<?UINT16(Epoch), ?UINT48(Seq)>>, CipherS0),
TLSVersion = dtls_v1:corresponding_tls_version(Version),
- case ssl_cipher:decipher_aead(BulkCipherAlgo, CipherS0, Seq, AAD, CipherFragment, TLSVersion) of
+ case ssl_record:decipher_aead(BulkCipherAlgo, CipherS1, AAD, CipherFragment, TLSVersion) of
{PlainFragment, CipherState} ->
{Plain, CompressionS1} = ssl_record:uncompress(CompAlg,
PlainFragment, CompressionS0),
@@ -600,7 +600,7 @@ mac_hash({Major, Minor}, MacAlg, MacSecret, Epoch, SeqNo, Type, Length, Fragment
Fragment],
dtls_v1:hmac_hash(MacAlg, MacSecret, Value).
-calc_aad(Type, {MajVer, MinVer}, Epoch, SeqNo) ->
+start_additional_data(Type, {MajVer, MinVer}, Epoch, SeqNo) ->
<<?UINT16(Epoch), ?UINT48(SeqNo), ?BYTE(Type), ?BYTE(MajVer), ?BYTE(MinVer)>>.
%%--------------------------------------------------------------------
diff --git a/lib/ssl/src/inet_tls_dist.erl b/lib/ssl/src/inet_tls_dist.erl
index ca059603ae..a4f8bb7562 100644
--- a/lib/ssl/src/inet_tls_dist.erl
+++ b/lib/ssl/src/inet_tls_dist.erl
@@ -567,7 +567,7 @@ gen_close(Driver, Socket) ->
get_address_resolver(EpmdModule, Driver) ->
case erlang:function_exported(EpmdModule, address_please, 3) of
true -> {EpmdModule, address_please};
- _ -> {Driver, getaddr}
+ _ -> {erl_epmd, address_please}
end.
%% ------------------------------------------------------------
diff --git a/lib/ssl/src/ssl_cipher.erl b/lib/ssl/src/ssl_cipher.erl
index b23129dcdd..54c04c13e5 100644
--- a/lib/ssl/src/ssl_cipher.erl
+++ b/lib/ssl/src/ssl_cipher.erl
@@ -34,7 +34,7 @@
-include_lib("public_key/include/public_key.hrl").
-export([security_parameters/2, security_parameters/3,
- cipher_init/3, decipher/6, cipher/5, decipher_aead/6, cipher_aead/6,
+ cipher_init/3, nonce_seed/2, decipher/6, cipher/5, aead_encrypt/5, aead_decrypt/6,
suites/1, all_suites/1, crypto_support_filters/0,
chacha_suites/1, anonymous_suites/1, psk_suites/1, psk_suites_anon/1,
srp_suites/0, srp_suites_anon/0,
@@ -48,6 +48,8 @@
-type cipher_enum() :: integer().
+-export_type([cipher_enum/0]).
+
%%--------------------------------------------------------------------
-spec security_parameters(ssl_cipher_format:cipher_suite(), #security_parameters{}) ->
#security_parameters{}.
@@ -91,10 +93,15 @@ cipher_init(?RC4, IV, Key) ->
#cipher_state{iv = IV, key = Key, state = State};
cipher_init(?AES_GCM, IV, Key) ->
<<Nonce:64>> = random_bytes(8),
- #cipher_state{iv = IV, key = Key, nonce = Nonce};
+ #cipher_state{iv = IV, key = Key, nonce = Nonce, tag_len = 16};
+cipher_init(?CHACHA20_POLY1305, IV, Key) ->
+ #cipher_state{iv = IV, key = Key, tag_len = 16};
cipher_init(_BCA, IV, Key) ->
#cipher_state{iv = IV, key = Key}.
+nonce_seed(Seed, CipherState) ->
+ CipherState#cipher_state{nonce = Seed}.
+
%%--------------------------------------------------------------------
-spec cipher(cipher_enum(), #cipher_state{}, binary(), iodata(), ssl_record:ssl_version()) ->
{binary(), #cipher_state{}}.
@@ -126,32 +133,16 @@ cipher(?AES_CBC, CipherState, Mac, Fragment, Version) ->
crypto:block_encrypt(aes_cbc256, Key, IV, T)
end, block_size(aes_128_cbc), CipherState, Mac, Fragment, Version).
-%%--------------------------------------------------------------------
--spec cipher_aead(cipher_enum(), #cipher_state{}, integer(), binary(), iodata(), ssl_record:ssl_version()) ->
- {binary(), #cipher_state{}}.
-%%
-%% Description: Encrypts the data and protects associated data (AAD) using chipher
-%% described by cipher_enum() and updating the cipher state
-%% Use for suites that use authenticated encryption with associated data (AEAD)
-%%-------------------------------------------------------------------
-cipher_aead(?AES_GCM, CipherState, SeqNo, AAD, Fragment, Version) ->
- aead_cipher(aes_gcm, CipherState, SeqNo, AAD, Fragment, Version);
-cipher_aead(?CHACHA20_POLY1305, CipherState, SeqNo, AAD, Fragment, Version) ->
- aead_cipher(chacha20_poly1305, CipherState, SeqNo, AAD, Fragment, Version).
-
-aead_cipher(chacha20_poly1305, #cipher_state{key=Key} = CipherState, SeqNo, AAD0, Fragment, _Version) ->
- CipherLen = erlang:iolist_size(Fragment),
- AAD = <<AAD0/binary, ?UINT16(CipherLen)>>,
- Nonce = ?uint64(SeqNo),
- {Content, CipherTag} = crypto:block_encrypt(chacha20_poly1305, Key, Nonce, {AAD, Fragment}),
- {<<Content/binary, CipherTag/binary>>, CipherState};
-aead_cipher(Type, #cipher_state{key=Key, iv = IV0, nonce = Nonce} = CipherState, _SeqNo, AAD0, Fragment, _Version) ->
- CipherLen = erlang:iolist_size(Fragment),
- AAD = <<AAD0/binary, ?UINT16(CipherLen)>>,
- <<Salt:4/bytes, _/binary>> = IV0,
- IV = <<Salt/binary, Nonce:64/integer>>,
- {Content, CipherTag} = crypto:block_encrypt(Type, Key, IV, {AAD, Fragment}),
- {<<Nonce:64/integer, Content/binary, CipherTag/binary>>, CipherState#cipher_state{nonce = Nonce + 1}}.
+aead_encrypt(Type, Key, Nonce, Fragment, AdditionalData) ->
+ crypto:block_encrypt(aead_type(Type), Key, Nonce, {AdditionalData, Fragment}).
+
+aead_decrypt(Type, Key, Nonce, CipherText, CipherTag, AdditionalData) ->
+ crypto:block_decrypt(aead_type(Type), Key, Nonce, {AdditionalData, CipherText, CipherTag}).
+
+aead_type(?AES_GCM) ->
+ aes_gcm;
+aead_type(?CHACHA20_POLY1305) ->
+ chacha20_poly1305.
build_cipher_block(BlockSz, Mac, Fragment) ->
TotSz = byte_size(Mac) + erlang:iolist_size(Fragment) + 1,
@@ -218,19 +209,6 @@ decipher(?AES_CBC, HashSz, CipherState, Fragment, Version, PaddingCheck) ->
crypto:block_decrypt(aes_cbc256, Key, IV, T)
end, CipherState, HashSz, Fragment, Version, PaddingCheck).
-%%--------------------------------------------------------------------
--spec decipher_aead(cipher_enum(), #cipher_state{}, integer(), binary(), binary(), ssl_record:ssl_version()) ->
- {binary(), #cipher_state{}} | #alert{}.
-%%
-%% Description: Decrypts the data and checks the associated data (AAD) MAC using
-%% cipher described by cipher_enum() and updating the cipher state.
-%% Use for suites that use authenticated encryption with associated data (AEAD)
-%%-------------------------------------------------------------------
-decipher_aead(?AES_GCM, CipherState, SeqNo, AAD, Fragment, Version) ->
- aead_decipher(aes_gcm, CipherState, SeqNo, AAD, Fragment, Version);
-decipher_aead(?CHACHA20_POLY1305, CipherState, SeqNo, AAD, Fragment, Version) ->
- aead_decipher(chacha20_poly1305, CipherState, SeqNo, AAD, Fragment, Version).
-
block_decipher(Fun, #cipher_state{key=Key, iv=IV} = CipherState0,
HashSz, Fragment, Version, PaddingCheck) ->
try
@@ -261,34 +239,6 @@ block_decipher(Fun, #cipher_state{key=Key, iv=IV} = CipherState0,
?ALERT_REC(?FATAL, ?BAD_RECORD_MAC, decryption_failed)
end.
-aead_ciphertext_to_state(chacha20_poly1305, SeqNo, _IV, AAD0, Fragment, _Version) ->
- CipherLen = size(Fragment) - 16,
- <<CipherText:CipherLen/bytes, CipherTag:16/bytes>> = Fragment,
- AAD = <<AAD0/binary, ?UINT16(CipherLen)>>,
- Nonce = ?uint64(SeqNo),
- {Nonce, AAD, CipherText, CipherTag};
-aead_ciphertext_to_state(_, _SeqNo, <<Salt:4/bytes, _/binary>>, AAD0, Fragment, _Version) ->
- CipherLen = size(Fragment) - 24,
- <<ExplicitNonce:8/bytes, CipherText:CipherLen/bytes, CipherTag:16/bytes>> = Fragment,
- AAD = <<AAD0/binary, ?UINT16(CipherLen)>>,
- Nonce = <<Salt/binary, ExplicitNonce/binary>>,
- {Nonce, AAD, CipherText, CipherTag}.
-
-aead_decipher(Type, #cipher_state{key = Key, iv = IV} = CipherState,
- SeqNo, AAD0, Fragment, Version) ->
- try
- {Nonce, AAD, CipherText, CipherTag} = aead_ciphertext_to_state(Type, SeqNo, IV, AAD0, Fragment, Version),
- case crypto:block_decrypt(Type, Key, Nonce, {AAD, CipherText, CipherTag}) of
- Content when is_binary(Content) ->
- {Content, CipherState};
- _ ->
- ?ALERT_REC(?FATAL, ?BAD_RECORD_MAC, decryption_failed)
- end
- catch
- _:_ ->
- ?ALERT_REC(?FATAL, ?BAD_RECORD_MAC, decryption_failed)
- end.
-
%%--------------------------------------------------------------------
-spec suites(ssl_record:ssl_version()) -> [ssl_cipher_format:cipher_suite()].
%%
@@ -982,7 +932,7 @@ filter_suites_pubkey(ec, Ciphers, _, OtpCert) ->
ec_ecdhe_suites(Ciphers)),
filter_keyuse_suites(keyAgreement, Uses, CiphersSuites, ec_ecdh_suites(Ciphers)).
-filter_suites_signature(rsa, Ciphers, {3, N}) when N >= 3 ->
+filter_suites_signature(_, Ciphers, {3, N}) when N >= 3 ->
Ciphers;
filter_suites_signature(rsa, Ciphers, Version) ->
(Ciphers -- ecdsa_signed_suites(Ciphers, Version)) -- dsa_signed_suites(Ciphers, Version);
diff --git a/lib/ssl/src/ssl_cipher.hrl b/lib/ssl/src/ssl_cipher.hrl
index ba6a98b92a..2371e8bd32 100644
--- a/lib/ssl/src/ssl_cipher.hrl
+++ b/lib/ssl/src/ssl_cipher.hrl
@@ -48,7 +48,8 @@
iv,
key,
state,
- nonce
+ nonce,
+ tag_len
}).
%%% TLS_NULL_WITH_NULL_NULL is specified and is the initial state of a
diff --git a/lib/ssl/src/ssl_connection.erl b/lib/ssl/src/ssl_connection.erl
index 9f876add6c..acd9f14f7b 100644
--- a/lib/ssl/src/ssl_connection.erl
+++ b/lib/ssl/src/ssl_connection.erl
@@ -621,8 +621,10 @@ init({call, From}, {start, {Opts, EmOpts}, Timeout},
catch throw:Error ->
stop_and_reply(normal, {reply, From, {error, Error}}, State0)
end;
-init({call, From}, Msg, State, Connection) ->
+init({call, From}, {new_user, _} = Msg, State, Connection) ->
handle_call(Msg, From, ?FUNCTION_NAME, State, Connection);
+init({call, From}, _Msg, _State, _Connection) ->
+ {keep_state_and_data, [{reply, From, {error, notsup_on_transport_accept_socket}}]};
init(_Type, _Event, _State, _Connection) ->
{keep_state_and_data, [postpone]}.
diff --git a/lib/ssl/src/ssl_handshake.erl b/lib/ssl/src/ssl_handshake.erl
index dc89fb0029..14df1d2e02 100644
--- a/lib/ssl/src/ssl_handshake.erl
+++ b/lib/ssl/src/ssl_handshake.erl
@@ -925,6 +925,13 @@ premaster_secret(EncSecret, #'RSAPrivateKey'{} = RSAPrivateKey) ->
catch
_:_ ->
throw(?ALERT_REC(?FATAL, ?DECRYPT_ERROR))
+ end;
+premaster_secret(EncSecret, #{algorithm := rsa} = Engine) ->
+ try crypto:private_decrypt(rsa, EncSecret, maps:remove(algorithm, Engine),
+ [{rsa_pad, rsa_pkcs1_padding}])
+ catch
+ _:_ ->
+ throw(?ALERT_REC(?FATAL, ?DECRYPT_ERROR))
end.
%%====================================================================
%% Extensions handling
diff --git a/lib/ssl/src/ssl_pem_cache.erl b/lib/ssl/src/ssl_pem_cache.erl
index b7d23ef01e..41bca2f7b5 100644
--- a/lib/ssl/src/ssl_pem_cache.erl
+++ b/lib/ssl/src/ssl_pem_cache.erl
@@ -45,7 +45,7 @@
-record(state, {
pem_cache,
- last_pem_check :: erlang:timestamp(),
+ last_pem_check :: integer(),
clear :: integer()
}).
@@ -134,8 +134,9 @@ init([Name]) ->
PemCache = ssl_pkix_db:create_pem_cache(Name),
Interval = pem_check_interval(),
erlang:send_after(Interval, self(), clear_pem_cache),
+ erlang:system_time(second),
{ok, #state{pem_cache = PemCache,
- last_pem_check = os:timestamp(),
+ last_pem_check = erlang:convert_time_unit(os:system_time(), native, second),
clear = Interval
}}.
@@ -183,7 +184,7 @@ handle_cast({invalidate_pem, File}, #state{pem_cache = Db} = State) ->
handle_info(clear_pem_cache, #state{pem_cache = PemCache,
clear = Interval,
last_pem_check = CheckPoint} = State) ->
- NewCheckPoint = os:timestamp(),
+ NewCheckPoint = erlang:convert_time_unit(os:system_time(), native, second),
start_pem_cache_validator(PemCache, CheckPoint),
erlang:send_after(Interval, self(), clear_pem_cache),
{noreply, State#state{last_pem_check = NewCheckPoint}};
@@ -229,24 +230,14 @@ init_pem_cache_validator([CacheName, PemCache, CheckPoint]) ->
CheckPoint, PemCache).
pem_cache_validate({File, _}, CheckPoint) ->
- case file:read_file_info(File, []) of
- {ok, #file_info{mtime = Time}} ->
- case is_before_checkpoint(Time, CheckPoint) of
- true ->
- ok;
- false ->
- invalidate_pem(File)
- end;
+ case file:read_file_info(File, [{time, posix}]) of
+ {ok, #file_info{mtime = Time}} when Time < CheckPoint ->
+ ok;
_ ->
invalidate_pem(File)
end,
CheckPoint.
-is_before_checkpoint(Time, CheckPoint) ->
- calendar:datetime_to_gregorian_seconds(
- calendar:now_to_datetime(CheckPoint)) -
- calendar:datetime_to_gregorian_seconds(Time) > 0.
-
pem_check_interval() ->
case application:get_env(ssl, ssl_pem_cache_clean) of
{ok, Interval} when is_integer(Interval) ->
diff --git a/lib/ssl/src/ssl_record.erl b/lib/ssl/src/ssl_record.erl
index 659e1485ac..b9d1320ef3 100644
--- a/lib/ssl/src/ssl_record.erl
+++ b/lib/ssl/src/ssl_record.erl
@@ -45,7 +45,7 @@
-export([compress/3, uncompress/3, compressions/0]).
%% Payload encryption/decryption
--export([cipher/4, decipher/4, cipher_aead/4, is_correct_mac/2]).
+-export([cipher/4, decipher/4, cipher_aead/4, decipher_aead/5, is_correct_mac/2, nonce_seed/3]).
-export_type([ssl_version/0, ssl_atom_version/0, connection_states/0, connection_state/0]).
@@ -306,22 +306,20 @@ cipher(Version, Fragment,
{CipherFragment, CipherS1} =
ssl_cipher:cipher(BulkCipherAlgo, CipherS0, MacHash, Fragment, Version),
{CipherFragment, WriteState0#{cipher_state => CipherS1}}.
-%% %%--------------------------------------------------------------------
-%% -spec cipher_aead(ssl_version(), iodata(), connection_state(), MacHash::binary()) ->
-%% {CipherFragment::binary(), connection_state()}.
-%% %%
-%% %% Description: Payload encryption
+%%--------------------------------------------------------------------
+-spec cipher_aead(ssl_version(), iodata(), connection_state(), AAD::binary()) ->
+ {CipherFragment::binary(), connection_state()}.
+
+%% Description: Payload encryption
%% %%--------------------------------------------------------------------
cipher_aead(Version, Fragment,
#{cipher_state := CipherS0,
- sequence_number := SeqNo,
security_parameters :=
#security_parameters{bulk_cipher_algorithm =
BulkCipherAlgo}
} = WriteState0, AAD) ->
-
{CipherFragment, CipherS1} =
- ssl_cipher:cipher_aead(BulkCipherAlgo, CipherS0, SeqNo, AAD, Fragment, Version),
+ cipher_aead(BulkCipherAlgo, CipherS0, AAD, Fragment, Version),
{CipherFragment, WriteState0#{cipher_state => CipherS1}}.
%%--------------------------------------------------------------------
@@ -344,10 +342,39 @@ decipher(Version, CipherFragment,
#alert{} = Alert ->
Alert
end.
+%%--------------------------------------------------------------------
+-spec decipher_aead(ssl_cipher:cipher_enum(), #cipher_state{},
+ binary(), binary(), ssl_record:ssl_version()) ->
+ {binary(), #cipher_state{}} | #alert{}.
+%%
+%% Description: Decrypts the data and checks the associated data (AAD) MAC using
+%% cipher described by cipher_enum() and updating the cipher state.
+%% Use for suites that use authenticated encryption with associated data (AEAD)
+%%-------------------------------------------------------------------
+decipher_aead(Type, #cipher_state{key = Key} = CipherState, AAD0, CipherFragment, _) ->
+ try
+ Nonce = decrypt_nonce(Type, CipherState, CipherFragment),
+ {AAD, CipherText, CipherTag} = aead_ciphertext_split(Type, CipherState, CipherFragment, AAD0),
+ case ssl_cipher:aead_decrypt(Type, Key, Nonce, CipherText, CipherTag, AAD) of
+ Content when is_binary(Content) ->
+ {Content, CipherState};
+ _ ->
+ ?ALERT_REC(?FATAL, ?BAD_RECORD_MAC, decryption_failed)
+ end
+ catch
+ _:_ ->
+ ?ALERT_REC(?FATAL, ?BAD_RECORD_MAC, decryption_failed)
+ end.
+
+nonce_seed(?CHACHA20_POLY1305, Seed, CipherState) ->
+ ssl_cipher:nonce_seed(Seed, CipherState);
+nonce_seed(_,_, CipherState) ->
+ CipherState.
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
+
empty_connection_state(ConnectionEnd, BeastMitigation) ->
SecParams = empty_security_params(ConnectionEnd),
#{security_parameters => SecParams,
@@ -400,3 +427,37 @@ initial_security_params(ConnectionEnd) ->
compression_algorithm = ?NULL},
ssl_cipher:security_parameters(?TLS_NULL_WITH_NULL_NULL, SecParams).
+cipher_aead(?CHACHA20_POLY1305 = Type, #cipher_state{key=Key} = CipherState, AAD0, Fragment, _Version) ->
+ AAD = end_additional_data(AAD0, erlang:iolist_size(Fragment)),
+ Nonce = encrypt_nonce(Type, CipherState),
+ {Content, CipherTag} = ssl_cipher:aead_encrypt(Type, Key, Nonce, Fragment, AAD),
+ {<<Content/binary, CipherTag/binary>>, CipherState};
+cipher_aead(Type, #cipher_state{key=Key, nonce = ExplicitNonce} = CipherState, AAD0, Fragment, _Version) ->
+ AAD = end_additional_data(AAD0, erlang:iolist_size(Fragment)),
+ Nonce = encrypt_nonce(Type, CipherState),
+ {Content, CipherTag} = ssl_cipher:aead_encrypt(Type, Key, Nonce, Fragment, AAD),
+ {<<ExplicitNonce:64/integer, Content/binary, CipherTag/binary>>, CipherState#cipher_state{nonce = ExplicitNonce + 1}}.
+
+encrypt_nonce(?CHACHA20_POLY1305, #cipher_state{nonce = Nonce, iv = IV}) ->
+ crypto:exor(<<?UINT32(0), Nonce/binary>>, IV);
+encrypt_nonce(?AES_GCM, #cipher_state{iv = IV, nonce = ExplicitNonce}) ->
+ <<Salt:4/bytes, _/binary>> = IV,
+ <<Salt/binary, ExplicitNonce:64/integer>>.
+
+decrypt_nonce(?CHACHA20_POLY1305, #cipher_state{nonce = Nonce, iv = IV}, _) ->
+ crypto:exor(<<Nonce:96/unsigned-big-integer>>, IV);
+decrypt_nonce(?AES_GCM, #cipher_state{iv = <<Salt:4/bytes, _/binary>>}, <<ExplicitNonce:8/bytes, _/binary>>) ->
+ <<Salt/binary, ExplicitNonce/binary>>.
+
+aead_ciphertext_split(?CHACHA20_POLY1305, #cipher_state{tag_len = Len}, CipherTextFragment, AAD) ->
+ CipherLen = size(CipherTextFragment) - Len,
+ <<CipherText:CipherLen/bytes, CipherTag:Len/bytes>> = CipherTextFragment,
+ {end_additional_data(AAD, CipherLen), CipherText, CipherTag};
+aead_ciphertext_split(?AES_GCM, #cipher_state{tag_len = Len}, CipherTextFragment, AAD) ->
+ CipherLen = size(CipherTextFragment) - (Len + 8), %% 8 is length of explicit Nonce
+ << _:8/bytes, CipherText:CipherLen/bytes, CipherTag:Len/bytes>> = CipherTextFragment,
+ {end_additional_data(AAD, CipherLen), CipherText, CipherTag}.
+
+end_additional_data(AAD, Len) ->
+ <<AAD/binary, ?UINT16(Len)>>.
+
diff --git a/lib/ssl/src/tls_connection.erl b/lib/ssl/src/tls_connection.erl
index adb4f6d9ea..4dfb50967d 100644
--- a/lib/ssl/src/tls_connection.erl
+++ b/lib/ssl/src/tls_connection.erl
@@ -783,6 +783,7 @@ handle_info({CloseTag, Socket}, StateName,
#state{socket = Socket, close_tag = CloseTag,
socket_options = #socket_options{active = Active},
protocol_buffers = #protocol_buffers{tls_cipher_texts = CTs},
+ user_data_buffer = Buffer,
negotiated_version = Version} = State) ->
%% Note that as of TLS 1.1,
@@ -790,7 +791,7 @@ handle_info({CloseTag, Socket}, StateName,
%% session not be resumed. This is a change from TLS 1.0 to conform
%% with widespread implementation practice.
- case (Active == false) andalso (CTs =/= []) of
+ case (Active == false) andalso ((CTs =/= []) or (Buffer =/= <<>>)) of
false ->
case Version of
{1, N} when N >= 1 ->
diff --git a/lib/ssl/src/tls_record.erl b/lib/ssl/src/tls_record.erl
index f1aca8c801..ce7edc9dcd 100644
--- a/lib/ssl/src/tls_record.erl
+++ b/lib/ssl/src/tls_record.erl
@@ -176,14 +176,15 @@ decode_cipher_text(#ssl_tls{type = Type, version = Version,
BulkCipherAlgo,
compression_algorithm = CompAlg}
} = ReadState0} = ConnnectionStates0, _) ->
- AAD = calc_aad(Type, Version, ReadState0),
- case ssl_cipher:decipher_aead(BulkCipherAlgo, CipherS0, Seq, AAD, CipherFragment, Version) of
- {PlainFragment, CipherS1} ->
+ AAD = start_additional_data(Type, Version, ReadState0),
+ CipherS1 = ssl_record:nonce_seed(BulkCipherAlgo, <<?UINT64(Seq)>>, CipherS0),
+ case ssl_record:decipher_aead(BulkCipherAlgo, CipherS1, AAD, CipherFragment, Version) of
+ {PlainFragment, CipherState} ->
{Plain, CompressionS1} = ssl_record:uncompress(CompAlg,
PlainFragment, CompressionS0),
ConnnectionStates = ConnnectionStates0#{
current_read => ReadState0#{
- cipher_state => CipherS1,
+ cipher_state => CipherState,
sequence_number => Seq + 1,
compression_state => CompressionS1}},
{CipherText#ssl_tls{fragment = Plain}, ConnnectionStates};
@@ -453,15 +454,20 @@ encode_iolist(Type, Data, Version, ConnectionStates0) ->
{lists:reverse(EncodedMsg), ConnectionStates}.
%%--------------------------------------------------------------------
do_encode_plain_text(Type, Version, Data, #{compression_state := CompS0,
- security_parameters :=
+ cipher_state := CipherS0,
+ sequence_number := Seq,
+ security_parameters :=
#security_parameters{
cipher_type = ?AEAD,
+ bulk_cipher_algorithm = BCAlg,
compression_algorithm = CompAlg}
} = WriteState0) ->
{Comp, CompS1} = ssl_record:compress(CompAlg, Data, CompS0),
- WriteState1 = WriteState0#{compression_state => CompS1},
- AAD = calc_aad(Type, Version, WriteState1),
- ssl_record:cipher_aead(Version, Comp, WriteState1, AAD);
+ CipherS = ssl_record:nonce_seed(BCAlg, <<?UINT64(Seq)>>, CipherS0),
+ WriteState = WriteState0#{compression_state => CompS1,
+ cipher_state => CipherS},
+ AAD = start_additional_data(Type, Version, WriteState),
+ ssl_record:cipher_aead(Version, Comp, WriteState, AAD);
do_encode_plain_text(Type, Version, Data, #{compression_state := CompS0,
security_parameters :=
#security_parameters{compression_algorithm = CompAlg}
@@ -473,7 +479,7 @@ do_encode_plain_text(Type, Version, Data, #{compression_state := CompS0,
do_encode_plain_text(_,_,_,CS) ->
exit({cs, CS}).
%%--------------------------------------------------------------------
-calc_aad(Type, {MajVer, MinVer},
+start_additional_data(Type, {MajVer, MinVer},
#{sequence_number := SeqNo}) ->
<<?UINT64(SeqNo), ?BYTE(Type), ?BYTE(MajVer), ?BYTE(MinVer)>>.
diff --git a/lib/ssl/src/tls_sender.erl b/lib/ssl/src/tls_sender.erl
index 8d1938cee7..a245ee2465 100644
--- a/lib/ssl/src/tls_sender.erl
+++ b/lib/ssl/src/tls_sender.erl
@@ -319,7 +319,7 @@ handle_info({'DOWN', Monitor, _, _, _}, _,
#data{connection_monitor = Monitor} = StateData) ->
{stop, normal, StateData};
handle_info(_,_,_) ->
- {keep_state_and_data}.
+ keep_state_and_data.
send_tls_alert(Alert, #data{negotiated_version = Version,
socket = Socket,
diff --git a/lib/ssl/test/ssl_ECC_SUITE.erl b/lib/ssl/test/ssl_ECC_SUITE.erl
index c93f066825..a5309e866b 100644
--- a/lib/ssl/test/ssl_ECC_SUITE.erl
+++ b/lib/ssl/test/ssl_ECC_SUITE.erl
@@ -395,10 +395,25 @@ client_ecdhe_rsa_server_ecdhe_ecdsa_client_custom(Config) ->
end.
mix_sign(Config) ->
- {COpts0, SOpts0} = ssl_test_lib:make_mix_cert(Config),
+ mix_sign_rsa_peer(Config),
+ mix_sign_ecdsa_peer(Config).
+
+mix_sign_ecdsa_peer(Config) ->
+ {COpts0, SOpts0} = ssl_test_lib:make_mix_cert([{mix, peer_ecc} |Config]),
COpts = ssl_test_lib:ssl_options(COpts0, Config),
SOpts = ssl_test_lib:ssl_options(SOpts0, Config),
ECDHE_ECDSA =
ssl:filter_cipher_suites(ssl:cipher_suites(default, 'tlsv1.2'),
[{key_exchange, fun(ecdhe_ecdsa) -> true; (_) -> false end}]),
ssl_test_lib:basic_test(COpts, [{ciphers, ECDHE_ECDSA} | SOpts], Config).
+
+
+mix_sign_rsa_peer(Config) ->
+ {COpts0, SOpts0} = ssl_test_lib:make_mix_cert([{mix, peer_rsa} |Config]),
+ COpts = ssl_test_lib:ssl_options(COpts0, Config),
+ SOpts = ssl_test_lib:ssl_options(SOpts0, Config),
+ ECDHE_RSA =
+ ssl:filter_cipher_suites(ssl:cipher_suites(default, 'tlsv1.2'),
+ [{key_exchange, fun(ecdhe_rsa) -> true; (_) -> false end}]),
+ ssl_test_lib:basic_test(COpts, [{ciphers, ECDHE_RSA} | SOpts], Config).
+
diff --git a/lib/ssl/test/ssl_basic_SUITE.erl b/lib/ssl/test/ssl_basic_SUITE.erl
index 4585ea7306..6f668f0c00 100644
--- a/lib/ssl/test/ssl_basic_SUITE.erl
+++ b/lib/ssl/test/ssl_basic_SUITE.erl
@@ -244,7 +244,9 @@ error_handling_tests()->
recv_active_once,
recv_error_handling,
call_in_error_state,
- close_in_error_state
+ close_in_error_state,
+ abuse_transport_accept_socket,
+ controlling_process_transport_accept_socket
].
error_handling_tests_tls()->
@@ -4054,7 +4056,51 @@ close_in_error_state(Config) when is_list(Config) ->
Other ->
ct:fail(Other)
end.
+%%--------------------------------------------------------------------
+abuse_transport_accept_socket() ->
+ [{doc,"Only ssl:handshake and ssl:controlling_process is allowed for transport_accept:sockets"}].
+abuse_transport_accept_socket(Config) when is_list(Config) ->
+ ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
+ ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+
+ Server = ssl_test_lib:start_server_transport_abuse_socket([{node, ServerNode},
+ {port, 0},
+ {from, self()},
+ {options, ServerOpts}]),
+ Port = ssl_test_lib:inet_port(Server),
+ Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {ssl_test_lib, no_result, []}},
+ {options, ClientOpts}]),
+ ssl_test_lib:check_result(Server, ok),
+ ssl_test_lib:close(Server),
+ ssl_test_lib:close(Client).
+
+%%--------------------------------------------------------------------
+controlling_process_transport_accept_socket() ->
+ [{doc,"Only ssl:handshake and ssl:controlling_process is allowed for transport_accept:sockets"}].
+controlling_process_transport_accept_socket(Config) when is_list(Config) ->
+ ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
+ ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+
+ Server = ssl_test_lib:start_server_transport_control([{node, ServerNode},
+ {port, 0},
+ {from, self()},
+ {options, ServerOpts}]),
+ Port = ssl_test_lib:inet_port(Server),
+
+ _Client = ssl_test_lib:start_client_error([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {options, ClientOpts}]),
+ ssl_test_lib:check_result(Server, ok),
+ ssl_test_lib:close(Server).
+
+%%--------------------------------------------------------------------
run_error_server_close([Pid | Opts]) ->
{ok, Listen} = ssl:listen(0, Opts),
{ok,{_, Port}} = ssl:sockname(Listen),
diff --git a/lib/ssl/test/ssl_bench_SUITE.erl b/lib/ssl/test/ssl_bench_SUITE.erl
index 3fe6338d69..13097b08b6 100644
--- a/lib/ssl/test/ssl_bench_SUITE.erl
+++ b/lib/ssl/test/ssl_bench_SUITE.erl
@@ -44,6 +44,7 @@ init_per_suite(Config) ->
nonode@nohost ->
{skipped, "Node not distributed"};
_ ->
+ ssl_test_lib:clean_start(),
[{server_node, ssl_bench_test_lib:setup(perf_server)}|Config]
end.
diff --git a/lib/ssl/test/ssl_bench_test_lib.erl b/lib/ssl/test/ssl_bench_test_lib.erl
index e5cbb911bd..47bcd41608 100644
--- a/lib/ssl/test/ssl_bench_test_lib.erl
+++ b/lib/ssl/test/ssl_bench_test_lib.erl
@@ -58,13 +58,13 @@ setup(Name) ->
Path = code:get_path(),
true = rpc:call(Node, code, set_path, [Path]),
ok = rpc:call(Node, ?MODULE, setup_server, [node()]),
- io:format("Client (~p) using ~s~n",[node(), code:which(ssl)]),
+ io:format("Client (~p) using ~ts~n",[node(), code:which(ssl)]),
(Node =:= node()) andalso restrict_schedulers(client),
Node.
setup_server(ClientNode) ->
(ClientNode =:= node()) andalso restrict_schedulers(server),
- io:format("Server (~p) using ~s~n",[node(), code:which(ssl)]),
+ io:format("Server (~p) using ~ts~n",[node(), code:which(ssl)]),
ok.
restrict_schedulers(Type) ->
diff --git a/lib/ssl/test/ssl_engine_SUITE.erl b/lib/ssl/test/ssl_engine_SUITE.erl
index 1423c99dc2..e6c82d3eb5 100644
--- a/lib/ssl/test/ssl_engine_SUITE.erl
+++ b/lib/ssl/test/ssl_engine_SUITE.erl
@@ -90,12 +90,14 @@ end_per_testcase(_TestCase, Config) ->
private_key(Config) when is_list(Config) ->
ClientFileBase = filename:join([proplists:get_value(priv_dir, Config), "client_engine"]),
ServerFileBase = filename:join([proplists:get_value(priv_dir, Config), "server_engine"]),
+ Ext = x509_test:extensions([{key_usage, [digitalSignature, keyEncipherment]}]),
#{server_config := ServerConf,
client_config := ClientConf} = GenCertData =
public_key:pkix_test_data(#{server_chain =>
#{root => [{key, ssl_test_lib:hardcode_rsa_key(1)}],
intermediates => [[{key, ssl_test_lib:hardcode_rsa_key(2)}]],
- peer => [{key, ssl_test_lib:hardcode_rsa_key(3)}
+ peer => [{extensions, Ext},
+ {key, ssl_test_lib:hardcode_rsa_key(3)}
]},
client_chain =>
#{root => [{key, ssl_test_lib:hardcode_rsa_key(4)}],
@@ -131,6 +133,12 @@ private_key(Config) when is_list(Config) ->
%% Test with engine
test_tls_connection(EngineServerConf, EngineClientConf, Config),
+ %% Test with engine and rsa keyexchange
+ RSASuites = all_kex_rsa_suites([{tls_version, 'tlsv1.2'} | Config]),
+
+ test_tls_connection([{ciphers, RSASuites}, {versions, ['tlsv1.2']} | EngineServerConf],
+ [{ciphers, RSASuites}, {versions, ['tlsv1.2']} | EngineClientConf], Config),
+
%% Test with engine and present file arugments
test_tls_connection(EngineFileServerConf, EngineFileClientConf, Config),
@@ -160,3 +168,8 @@ test_tls_connection(ServerConf, ClientConf, Config) ->
ssl_test_lib:check_result(Server, ok, Client, ok),
ssl_test_lib:close(Server),
ssl_test_lib:close(Client).
+
+all_kex_rsa_suites(Config) ->
+ Version = proplists:get_value(tls_version, Config),
+ All = ssl:cipher_suites(all, Version),
+ ssl:filter_cipher_suites(All,[{key_exchange, fun(rsa) -> true;(_) -> false end}]).
diff --git a/lib/ssl/test/ssl_test_lib.erl b/lib/ssl/test/ssl_test_lib.erl
index 39a5bcaad6..8a2f0824fb 100644
--- a/lib/ssl/test/ssl_test_lib.erl
+++ b/lib/ssl/test/ssl_test_lib.erl
@@ -196,6 +196,55 @@ connect(ListenSocket, Node, _, _, Timeout, Opts, _) ->
rpc:call(Node, ssl, ssl_accept, [AcceptSocket, Opts, Timeout]),
AcceptSocket.
+
+start_server_transport_abuse_socket(Args) ->
+ Result = spawn_link(?MODULE, transport_accept_abuse, [Args]),
+ receive
+ {listen, up} ->
+ Result
+ end.
+
+start_server_transport_control(Args) ->
+ Result = spawn_link(?MODULE, transport_switch_control, [Args]),
+ receive
+ {listen, up} ->
+ Result
+ end.
+
+
+transport_accept_abuse(Opts) ->
+ Node = proplists:get_value(node, Opts),
+ Port = proplists:get_value(port, Opts),
+ Options = proplists:get_value(options, Opts),
+ Pid = proplists:get_value(from, Opts),
+ Transport = proplists:get_value(transport, Opts, ssl),
+ ct:log("~p:~p~nssl:listen(~p, ~p)~n", [?MODULE,?LINE, Port, Options]),
+ {ok, ListenSocket} = rpc:call(Node, Transport, listen, [Port, Options]),
+ Pid ! {listen, up},
+ send_selected_port(Pid, Port, ListenSocket),
+ {ok, AcceptSocket} = rpc:call(Node, ssl, transport_accept,
+ [ListenSocket]),
+ {error, _} = rpc:call(Node, ssl, connection_information, [AcceptSocket]),
+ _ = rpc:call(Node, ssl, handshake, [AcceptSocket, infinity]),
+ Pid ! {self(), ok}.
+
+
+transport_switch_control(Opts) ->
+ Node = proplists:get_value(node, Opts),
+ Port = proplists:get_value(port, Opts),
+ Options = proplists:get_value(options, Opts),
+ Pid = proplists:get_value(from, Opts),
+ Transport = proplists:get_value(transport, Opts, ssl),
+ ct:log("~p:~p~nssl:listen(~p, ~p)~n", [?MODULE,?LINE, Port, Options]),
+ {ok, ListenSocket} = rpc:call(Node, Transport, listen, [Port, Options]),
+ Pid ! {listen, up},
+ send_selected_port(Pid, Port, ListenSocket),
+ {ok, AcceptSocket} = rpc:call(Node, ssl, transport_accept,
+ [ListenSocket]),
+ ok = rpc:call(Node, ssl, controlling_process, [AcceptSocket, self()]),
+ Pid ! {self(), ok}.
+
+
remove_close_msg(0) ->
ok;
remove_close_msg(ReconnectTimes) ->
@@ -693,20 +742,12 @@ make_mix_cert(Config) ->
Ext = x509_test:extensions([{key_usage, [digitalSignature]}]),
Digest = {digest, appropriate_sha(crypto:supports())},
CurveOid = hd(tls_v1:ecc_curves(0)),
- ClientFileBase = filename:join([proplists:get_value(priv_dir, Config), "mix"]),
- ServerFileBase = filename:join([proplists:get_value(priv_dir, Config), "mix"]),
- ClientChain = [[Digest, {key, {namedCurve, CurveOid}}],
- [Digest, {key, hardcode_rsa_key(1)}],
- [Digest, {key, {namedCurve, CurveOid}}, {extensions, Ext}]
- ],
- ServerChain = [[Digest, {key, {namedCurve, CurveOid}}],
- [Digest, {key, hardcode_rsa_key(2)}],
- [Digest, {key, {namedCurve, CurveOid}},{extensions, Ext}]
- ],
+ Mix = proplists:get_value(mix, Config, peer_ecc),
ClientChainType =ServerChainType = mix,
+ {ClientChain, ServerChain} = mix(Mix, Digest, CurveOid, Ext),
CertChainConf = gen_conf(ClientChainType, ServerChainType, ClientChain, ServerChain),
- ClientFileBase = filename:join([proplists:get_value(priv_dir, Config), atom_to_list(ClientChainType)]),
- ServerFileBase = filename:join([proplists:get_value(priv_dir, Config), atom_to_list(ServerChainType)]),
+ ClientFileBase = filename:join([proplists:get_value(priv_dir, Config), "mix" ++ atom_to_list(Mix)]),
+ ServerFileBase = filename:join([proplists:get_value(priv_dir, Config), "mix" ++ atom_to_list(Mix)]),
GenCertData = public_key:pkix_test_data(CertChainConf),
[{server_config, ServerConf},
{client_config, ClientConf}] =
@@ -715,6 +756,28 @@ make_mix_cert(Config) ->
[{reuseaddr, true}, {verify, verify_peer} | ServerConf]
}.
+mix(peer_ecc, Digest, CurveOid, Ext) ->
+ ClientChain = [[Digest, {key, {namedCurve, CurveOid}}],
+ [Digest, {key, hardcode_rsa_key(1)}],
+ [Digest, {key, {namedCurve, CurveOid}}, {extensions, Ext}]
+ ],
+ ServerChain = [[Digest, {key, {namedCurve, CurveOid}}],
+ [Digest, {key, hardcode_rsa_key(2)}],
+ [Digest, {key, {namedCurve, CurveOid}},{extensions, Ext}]
+ ],
+ {ClientChain, ServerChain};
+
+mix(peer_rsa, Digest, CurveOid, Ext) ->
+ ClientChain = [[Digest, {key, {namedCurve, CurveOid}}],
+ [Digest, {key, {namedCurve, CurveOid}}],
+ [Digest, {key, hardcode_rsa_key(1)}, {extensions, Ext}]
+ ],
+ ServerChain = [[Digest, {key, {namedCurve, CurveOid}}],
+ [Digest, {key, {namedCurve, CurveOid}}],
+ [Digest, {key, hardcode_rsa_key(2)},{extensions, Ext}]
+ ],
+ {ClientChain, ServerChain}.
+
make_ecdsa_cert(Config) ->
CryptoSupport = crypto:supports(),
case proplists:get_bool(ecdsa, proplists:get_value(public_keys, CryptoSupport)) of
diff --git a/lib/stdlib/doc/src/beam_lib.xml b/lib/stdlib/doc/src/beam_lib.xml
index 26d0724aaf..213170df7f 100644
--- a/lib/stdlib/doc/src/beam_lib.xml
+++ b/lib/stdlib/doc/src/beam_lib.xml
@@ -180,8 +180,8 @@ io:fwrite("~s~n", [erl_prettypr:format(erl_syntax:form_list(AC))]).</code>
<name name="beam"/>
<desc>
<p>Each of the functions described below accept either the
- module name, the filename, or a binary containing the BEAM
- module.</p>
+ filename (as a string) or a binary containing the BEAM
+ module.</p>
</desc>
</datatype>
<datatype>
diff --git a/lib/stdlib/doc/src/gen_event.xml b/lib/stdlib/doc/src/gen_event.xml
index f793ec7fdf..fc34e51216 100644
--- a/lib/stdlib/doc/src/gen_event.xml
+++ b/lib/stdlib/doc/src/gen_event.xml
@@ -775,7 +775,7 @@ gen_event:stop -----> Module:terminate/2
<p>This callback is optional, so callback modules need not
export it. The <c>gen_event</c> module provides a default
implementation of this function that logs about the unexpected
- <c>Info</c> message, drops it and returns <c>{noreply, State}</c>.</p>
+ <c>Info</c> message, drops it and returns <c>{ok, State}</c>.</p>
</note>
<p>This function is called for each installed event handler when
an event manager receives any other message than an event or
diff --git a/lib/stdlib/doc/src/lists.xml b/lib/stdlib/doc/src/lists.xml
index c3d5d7e07a..e4215a5336 100644
--- a/lib/stdlib/doc/src/lists.xml
+++ b/lib/stdlib/doc/src/lists.xml
@@ -850,14 +850,6 @@ splitwith(Pred, List) ->
> <input>lists:subtract("123212", "212").</input>
"312".</pre>
<p><c>lists:subtract(A, B)</c> is equivalent to <c>A -- B</c>.</p>
- <warning>
- <p>The complexity of <c>lists:subtract(A, B)</c> is proportional to
- <c>length(A)*length(B)</c>, meaning that it is very slow if both
- <c>A</c> and <c>B</c> are long lists. (If both lists are long, it
- is a much better choice to use ordered lists and
- <seealso marker="ordsets#subtract/2">
- <c>ordsets:subtract/2</c></seealso>.</p>
- </warning>
</desc>
</func>
diff --git a/lib/stdlib/doc/src/notes.xml b/lib/stdlib/doc/src/notes.xml
index d800885b16..039f087708 100644
--- a/lib/stdlib/doc/src/notes.xml
+++ b/lib/stdlib/doc/src/notes.xml
@@ -504,6 +504,21 @@
</section>
+<section><title>STDLIB 3.4.5.1</title>
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>List subtraction (The <c>--</c> operator) will now
+ yield properly on large inputs.</p>
+ <p>
+ Own Id: OTP-15371</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>STDLIB 3.4.5</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -1658,6 +1673,21 @@
</section>
+<section><title>STDLIB 2.8.0.1</title>
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>List subtraction (The <c>--</c> operator) will now
+ yield properly on large inputs.</p>
+ <p>
+ Own Id: OTP-15371</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>STDLIB 2.8</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -7827,4 +7857,3 @@
</section>
</section>
</chapter>
-
diff --git a/lib/stdlib/src/beam_lib.erl b/lib/stdlib/src/beam_lib.erl
index 01181b1097..3386cfcbe6 100644
--- a/lib/stdlib/src/beam_lib.erl
+++ b/lib/stdlib/src/beam_lib.erl
@@ -53,7 +53,7 @@
%%-------------------------------------------------------------------------
--type beam() :: module() | file:filename() | binary().
+-type beam() :: file:filename() | binary().
-type debug_info() :: {DbgiVersion :: atom(), Backend :: module(), Data :: term()} | 'no_debug_info'.
-type forms() :: [erl_parse:abstract_form() | erl_parse:form_info()].
diff --git a/lib/stdlib/test/gen_fsm_SUITE.erl b/lib/stdlib/test/gen_fsm_SUITE.erl
index 41ee3246f5..a8264e5a84 100644
--- a/lib/stdlib/test/gen_fsm_SUITE.erl
+++ b/lib/stdlib/test/gen_fsm_SUITE.erl
@@ -124,8 +124,10 @@ start2(Config) when is_list(Config) ->
{ok, Pid0} = gen_fsm:start(gen_fsm_SUITE, [], []),
ok = do_func_test(Pid0),
ok = do_sync_func_test(Pid0),
+ MRef = monitor(process,Pid0),
shutdown_stopped =
gen_fsm:sync_send_all_state_event(Pid0, stop_shutdown),
+ receive {'DOWN',MRef,_,_,shutdown} -> ok end,
{'EXIT', {noproc,_}} =
(catch gen_fsm:sync_send_event(Pid0, hej)),
diff --git a/lib/stdlib/test/lists_SUITE.erl b/lib/stdlib/test/lists_SUITE.erl
index 837ab4e97e..984b51e7ae 100644
--- a/lib/stdlib/test/lists_SUITE.erl
+++ b/lib/stdlib/test/lists_SUITE.erl
@@ -2597,6 +2597,20 @@ subtract(Config) when is_list(Config) ->
{'EXIT',_} = (catch sub([a|b], [])),
{'EXIT',_} = (catch sub([a|b], [a])),
+ %% Trapping, both crashing and otherwise.
+ [sub_trapping(N) || N <- lists:seq(0, 18)],
+
+ %% The current implementation chooses which algorithm to use based on
+ %% certain thresholds, and we need proper coverage for all corner cases.
+ [sub_thresholds(N) || N <- lists:seq(0, 32)],
+
+ %% Trapping, both crashing and otherwise.
+ [sub_trapping(N) || N <- lists:seq(0, 18)],
+
+ %% The current implementation chooses which algorithm to use based on
+ %% certain thresholds, and we need proper coverage for all corner cases.
+ [sub_thresholds(N) || N <- lists:seq(0, 32)],
+
ok.
sub_non_matching(A, B) ->
@@ -2606,6 +2620,41 @@ sub(A, B) ->
Res = A -- B,
Res = lists:subtract(A, B).
+sub_trapping(N) ->
+ List = lists:duplicate(N + (1 bsl N), gurka),
+ ImproperList = List ++ crash,
+
+ {'EXIT',_} = (catch sub_trapping_1(ImproperList, [])),
+ {'EXIT',_} = (catch sub_trapping_1(List, ImproperList)),
+
+ List = List -- lists:duplicate(N + (1 bsl N), gaffel),
+ ok = sub_trapping_1(List, []).
+
+sub_trapping_1([], _) -> ok;
+sub_trapping_1(L, R) -> sub_trapping_1(L -- R, [gurka | R]).
+
+sub_thresholds(N) ->
+ %% This needs to be long enough to cause trapping.
+ OtherLen = 1 bsl 18,
+ Other = lists:seq(0, OtherLen - 1),
+
+ Disjoint = lists:seq(-N, -1),
+ Subset = lists:seq(1, N),
+
+ %% LHS is disjoint from RHS, so all elements must be retained.
+ Disjoint = Disjoint -- Other,
+
+ %% LHS is covered by RHS, so all elements must be removed.
+ [] = Subset -- Other,
+
+ %% RHS is disjoint from LHS, so all elements must be retained.
+ Other = Other -- Disjoint,
+
+ %% RHS is covered by LHS, so N elements must be removed.
+ N = OtherLen - length(Other -- Subset),
+
+ ok.
+
%% Test lists:droplast/1
droplast(Config) when is_list(Config) ->
[] = lists:droplast([x]),
diff --git a/lib/stdlib/test/sys_SUITE.erl b/lib/stdlib/test/sys_SUITE.erl
index 3278eb0eb0..fcc4419569 100644
--- a/lib/stdlib/test/sys_SUITE.erl
+++ b/lib/stdlib/test/sys_SUITE.erl
@@ -219,7 +219,7 @@ spec_proc(Mod) ->
{Mod,system_get_state},{throw,fail}},_}} ->
ok
end,
- ok = sys:terminate(Mod, normal),
+ ok = sync_terminate(Mod),
{ok,_} = Mod:start_link(4),
ok = case catch sys:replace_state(Mod, fun(_) -> {} end) of
{} ->
@@ -228,7 +228,7 @@ spec_proc(Mod) ->
{Mod,system_replace_state},{throw,fail}},_}} ->
ok
end,
- ok = sys:terminate(Mod, normal),
+ ok = sync_terminate(Mod),
{ok,_} = Mod:start_link(4),
StateFun = fun(_) -> error(fail) end,
ok = case catch sys:replace_state(Mod, StateFun) of
@@ -240,7 +240,18 @@ spec_proc(Mod) ->
{'EXIT',{{callback_failed,StateFun,{error,fail}},_}} ->
ok
end,
- ok = sys:terminate(Mod, normal).
+ ok = sync_terminate(Mod).
+
+sync_terminate(Mod) ->
+ P = whereis(Mod),
+ MRef = erlang:monitor(process,P),
+ ok = sys:terminate(Mod, normal),
+ receive
+ {'DOWN',MRef,_,_,normal} ->
+ ok
+ end,
+ undefined = whereis(Mod),
+ ok.
%%%%%%%%%%%%%%%%%%%%
%% Dummy server