aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/asn1/c_src/asn1_erl_nif.c2
-rw-r--r--lib/asn1/test/asn1_SUITE.erl16
-rw-r--r--lib/compiler/doc/src/compile.xml7
-rw-r--r--lib/compiler/doc/src/notes.xml15
-rw-r--r--lib/compiler/src/beam_jump.erl15
-rw-r--r--lib/compiler/src/beam_utils.erl9
-rw-r--r--lib/compiler/src/compile.erl14
-rw-r--r--lib/compiler/src/sys_core_fold.erl20
-rw-r--r--lib/compiler/test/beam_jump_SUITE.erl19
-rw-r--r--lib/compiler/test/beam_utils_SUITE.erl23
-rw-r--r--lib/compiler/test/bs_match_SUITE.erl19
-rw-r--r--lib/compiler/test/compile_SUITE.erl28
-rw-r--r--lib/compiler/test/trycatch_SUITE.erl26
-rw-r--r--lib/compiler/vsn.mk2
-rw-r--r--lib/crypto/c_src/crypto.c295
-rw-r--r--lib/crypto/c_src/crypto_callback.c6
-rw-r--r--lib/crypto/c_src/otp_test_engine.c178
-rw-r--r--lib/crypto/doc/src/algorithm_details.xml106
-rw-r--r--lib/crypto/doc/src/crypto.xml11
-rw-r--r--lib/crypto/src/crypto.erl44
-rw-r--r--lib/crypto/test/crypto_SUITE.erl619
-rw-r--r--lib/crypto/test/engine_SUITE.erl147
-rw-r--r--lib/eldap/doc/src/notes.xml33
-rw-r--r--lib/et/src/et_wx_viewer.erl37
-rw-r--r--lib/hipe/main/hipe.erl2
-rw-r--r--lib/inets/src/http_client/httpc_request.erl23
-rw-r--r--lib/inets/test/httpc_SUITE.erl85
-rw-r--r--lib/kernel/doc/src/inet.xml194
-rw-r--r--lib/kernel/doc/src/logger.xml97
-rw-r--r--lib/kernel/doc/src/logger_chapter.xml35
-rw-r--r--lib/kernel/doc/src/logger_disk_log_h.xml10
-rw-r--r--lib/kernel/doc/src/logger_formatter.xml2
-rw-r--r--lib/kernel/doc/src/logger_std_h.xml4
-rw-r--r--lib/kernel/doc/src/notes.xml16
-rw-r--r--lib/kernel/src/inet.erl82
-rw-r--r--lib/kernel/src/inet_int.hrl1
-rw-r--r--lib/kernel/src/inet_tcp_dist.erl2
-rw-r--r--lib/kernel/src/kernel.appup.src45
-rw-r--r--lib/kernel/src/logger.erl35
-rw-r--r--lib/kernel/src/logger_config.erl4
-rw-r--r--lib/kernel/src/logger_disk_log_h.erl720
-rw-r--r--lib/kernel/src/logger_h_common.erl658
-rw-r--r--lib/kernel/src/logger_h_common.hrl16
-rw-r--r--lib/kernel/src/logger_server.erl146
-rw-r--r--lib/kernel/src/logger_simple_h.erl7
-rw-r--r--lib/kernel/src/logger_std_h.erl784
-rw-r--r--lib/kernel/src/net_kernel.erl3
-rw-r--r--lib/kernel/test/gen_tcp_misc_SUITE.erl167
-rw-r--r--lib/kernel/test/inet_SUITE.erl172
-rw-r--r--lib/kernel/test/inet_sockopt_SUITE.erl9
-rw-r--r--lib/kernel/test/logger_SUITE.erl12
-rw-r--r--lib/kernel/test/logger_disk_log_h_SUITE.erl297
-rw-r--r--lib/kernel/test/logger_std_h_SUITE.erl256
-rw-r--r--lib/kernel/test/sendfile_SUITE.erl26
-rw-r--r--lib/kernel/test/seq_trace_SUITE.erl39
-rw-r--r--lib/kernel/vsn.mk2
-rw-r--r--lib/observer/src/Makefile1
-rw-r--r--lib/observer/src/cdv_html_wx.erl26
-rw-r--r--lib/observer/src/cdv_persistent_cb.erl32
-rw-r--r--lib/observer/src/cdv_wx.erl16
-rw-r--r--lib/observer/src/crashdump_viewer.erl313
-rw-r--r--lib/observer/src/observer.app.src1
-rw-r--r--lib/observer/src/observer_html_lib.erl3
-rw-r--r--lib/observer/test/crashdump_helper.erl67
-rw-r--r--lib/observer/test/crashdump_viewer_SUITE.erl39
-rw-r--r--lib/os_mon/c_src/cpu_sup.c50
-rw-r--r--lib/os_mon/src/cpu_sup.erl26
-rw-r--r--lib/public_key/asn1/OTP-PKIX.asn124
-rw-r--r--lib/public_key/doc/src/notes.xml16
-rw-r--r--lib/public_key/doc/src/public_key.xml15
-rw-r--r--lib/public_key/src/pubkey_pem.erl11
-rw-r--r--lib/public_key/src/pubkey_ssh.erl99
-rw-r--r--lib/public_key/src/public_key.erl23
-rw-r--r--lib/public_key/test/public_key_SUITE.erl11
-rw-r--r--lib/public_key/vsn.mk2
-rw-r--r--lib/reltool/src/reltool_utils.erl13
-rw-r--r--lib/reltool/test/reltool_server_SUITE.erl16
-rw-r--r--lib/sasl/src/sasl.app.src2
-rw-r--r--lib/sasl/src/sasl.appup.src8
-rw-r--r--lib/sasl/src/systools_make.erl4
-rw-r--r--lib/ssh/doc/src/Makefile3
-rw-r--r--lib/ssh/doc/src/notes.xml17
-rw-r--r--lib/ssh/doc/src/ref_man.xml1
-rw-r--r--lib/ssh/doc/src/specs.xml1
-rw-r--r--lib/ssh/doc/src/ssh.xml113
-rw-r--r--lib/ssh/doc/src/ssh_app.xml15
-rw-r--r--lib/ssh/doc/src/ssh_file.xml285
-rw-r--r--lib/ssh/doc/src/ssh_sftp.xml1
-rw-r--r--lib/ssh/doc/src/terminology.xml185
-rw-r--r--lib/ssh/doc/src/usersguide.xml1
-rw-r--r--lib/ssh/doc/src/using_ssh.xml11
-rw-r--r--lib/ssh/src/ssh.hrl36
-rw-r--r--lib/ssh/src/ssh_auth.erl112
-rw-r--r--lib/ssh/src/ssh_connection_handler.erl65
-rw-r--r--lib/ssh/src/ssh_file.erl31
-rw-r--r--lib/ssh/src/ssh_message.erl8
-rw-r--r--lib/ssh/src/ssh_options.erl24
-rw-r--r--lib/ssh/src/ssh_transport.erl76
-rw-r--r--lib/ssh/test/.gitignore5
-rw-r--r--lib/ssh/test/property_test/ssh_eqc_client_info_timing.erl20
-rw-r--r--lib/ssh/test/property_test/ssh_eqc_client_server.erl230
-rw-r--r--lib/ssh/test/ssh_algorithms_SUITE.erl7
-rw-r--r--lib/ssh/test/ssh_algorithms_SUITE_data/id_ed255197
-rw-r--r--lib/ssh/test/ssh_algorithms_SUITE_data/id_ed25519.pub1
-rw-r--r--lib/ssh/test/ssh_algorithms_SUITE_data/id_ed44810
-rw-r--r--lib/ssh/test/ssh_algorithms_SUITE_data/id_ed448.pub1
-rw-r--r--lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed25519_key7
-rw-r--r--lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed25519_key.pub1
-rw-r--r--lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed448_key10
-rw-r--r--lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed448_key.pub1
-rw-r--r--lib/ssh/test/ssh_basic_SUITE.erl26
-rw-r--r--lib/ssh/test/ssh_basic_SUITE_data/id_ed255197
-rw-r--r--lib/ssh/test/ssh_basic_SUITE_data/id_ed25519.pub1
-rw-r--r--lib/ssh/test/ssh_basic_SUITE_data/id_ed44810
-rw-r--r--lib/ssh/test/ssh_basic_SUITE_data/id_ed448.pub1
-rw-r--r--lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed25519_key7
-rw-r--r--lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed25519_key.pub1
-rw-r--r--lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed448_key10
-rw-r--r--lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed448_key.pub1
-rw-r--r--lib/ssh/test/ssh_compat_SUITE.erl21
-rwxr-xr-xlib/ssh/test/ssh_compat_SUITE_data/build_scripts/create_all6
-rw-r--r--lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed25519_key7
-rw-r--r--lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed25519_key.pub1
-rw-r--r--lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed448_key10
-rw-r--r--lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed448_key.pub1
-rw-r--r--lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed255197
-rw-r--r--lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed25519.pub1
-rw-r--r--lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed44810
-rw-r--r--lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed448.pub1
-rw-r--r--lib/ssh/test/ssh_engine_SUITE.erl11
-rw-r--r--lib/ssh/test/ssh_options_SUITE.erl39
-rw-r--r--lib/ssh/test/ssh_property_test_SUITE.erl7
-rw-r--r--lib/ssh/test/ssh_test_lib.erl25
-rw-r--r--lib/ssl/doc/src/notes.xml34
-rw-r--r--lib/ssl/doc/src/ssl_app.xml14
-rw-r--r--lib/ssl/src/dtls_connection.erl144
-rw-r--r--lib/ssl/src/dtls_record.erl22
-rw-r--r--lib/ssl/src/inet_tls_dist.erl2
-rw-r--r--lib/ssl/src/ssl_cipher.erl90
-rw-r--r--lib/ssl/src/ssl_cipher.hrl3
-rw-r--r--lib/ssl/src/ssl_connection.erl255
-rw-r--r--lib/ssl/src/ssl_handshake.erl7
-rw-r--r--lib/ssl/src/ssl_internal.hrl1
-rw-r--r--lib/ssl/src/ssl_pem_cache.erl23
-rw-r--r--lib/ssl/src/ssl_record.erl79
-rw-r--r--lib/ssl/src/tls_connection.erl153
-rw-r--r--lib/ssl/src/tls_record.erl51
-rw-r--r--lib/ssl/src/tls_sender.erl100
-rw-r--r--lib/ssl/test/ssl_ECC_SUITE.erl17
-rw-r--r--lib/ssl/test/ssl_basic_SUITE.erl75
-rw-r--r--lib/ssl/test/ssl_bench_SUITE.erl1
-rw-r--r--lib/ssl/test/ssl_bench_test_lib.erl4
-rw-r--r--lib/ssl/test/ssl_dist_bench_SUITE.erl224
-rw-r--r--lib/ssl/test/ssl_engine_SUITE.erl30
-rw-r--r--lib/ssl/test/ssl_packet_SUITE.erl2
-rw-r--r--lib/ssl/test/ssl_payload_SUITE.erl654
-rw-r--r--lib/ssl/test/ssl_test_lib.erl94
-rw-r--r--lib/stdlib/doc/src/assert_hrl.xml2
-rw-r--r--lib/stdlib/doc/src/beam_lib.xml4
-rw-r--r--lib/stdlib/doc/src/calendar.xml6
-rw-r--r--lib/stdlib/doc/src/epp.xml8
-rw-r--r--lib/stdlib/doc/src/gen_event.xml2
-rw-r--r--lib/stdlib/doc/src/lists.xml8
-rw-r--r--lib/stdlib/doc/src/notes.xml31
-rw-r--r--lib/stdlib/src/beam_lib.erl2
-rw-r--r--lib/stdlib/src/calendar.erl11
-rw-r--r--lib/stdlib/src/epp.erl13
-rw-r--r--lib/stdlib/src/erl_lint.erl3
-rw-r--r--lib/stdlib/src/ms_transform.erl1
-rw-r--r--lib/stdlib/src/stdlib.appup.src6
-rw-r--r--lib/stdlib/test/calendar_SUITE.erl68
-rw-r--r--lib/stdlib/test/epp_SUITE.erl16
-rw-r--r--lib/stdlib/test/epp_SUITE_data/source_name.erl27
-rw-r--r--lib/stdlib/test/erl_lint_SUITE.erl20
-rw-r--r--lib/stdlib/test/ets_SUITE.erl22
-rw-r--r--lib/stdlib/test/gen_fsm_SUITE.erl2
-rw-r--r--lib/stdlib/test/lists_SUITE.erl49
-rw-r--r--lib/stdlib/test/rand_SUITE.erl3
-rw-r--r--lib/stdlib/test/sys_SUITE.erl17
-rw-r--r--lib/stdlib/test/unicode_util_SUITE.erl27
-rwxr-xr-xlib/stdlib/uc_spec/gen_unicode_mod.escript2
-rw-r--r--lib/tools/emacs/Makefile1
-rw-r--r--lib/tools/emacs/erlang-edoc.el1
-rw-r--r--lib/tools/emacs/erlang-eunit.el10
-rw-r--r--lib/tools/emacs/erlang-pkg.el3
-rw-r--r--lib/tools/emacs/erlang-skels.el2
-rw-r--r--lib/tools/emacs/erlang-test.el77
-rw-r--r--lib/tools/emacs/erlang.el58
-rw-r--r--lib/tools/emacs/erlang_appwiz.el13
-rw-r--r--lib/tools/src/eprof.erl52
-rw-r--r--lib/tools/test/emacs_SUITE.erl175
-rw-r--r--lib/wx/api_gen/wx_gen_cpp.erl9
-rw-r--r--lib/wx/c_src/gen/wxe_funcs.cpp10
-rw-r--r--lib/wx/c_src/wxe_impl.cpp4
-rw-r--r--lib/wx/c_src/wxe_impl.h2
195 files changed, 7265 insertions, 3537 deletions
diff --git a/lib/asn1/c_src/asn1_erl_nif.c b/lib/asn1/c_src/asn1_erl_nif.c
index 797be6d4f8..da43af3405 100644
--- a/lib/asn1/c_src/asn1_erl_nif.c
+++ b/lib/asn1/c_src/asn1_erl_nif.c
@@ -999,7 +999,7 @@ static int ber_decode_value(ErlNifEnv* env, ERL_NIF_TERM *value, unsigned char *
while (*ib_index < end_index) {
if ((maybe_ret = ber_decode(env, &term, in_buf, ib_index,
- *ib_index + len)) <= ASN1_ERROR
+ end_index )) <= ASN1_ERROR
)
return maybe_ret;
curr_head = enif_make_list_cell(env, term, curr_head);
diff --git a/lib/asn1/test/asn1_SUITE.erl b/lib/asn1/test/asn1_SUITE.erl
index 5506923341..ab78678110 100644
--- a/lib/asn1/test/asn1_SUITE.erl
+++ b/lib/asn1/test/asn1_SUITE.erl
@@ -63,7 +63,8 @@ groups() ->
constraint_equivalence]},
{ber, Parallel,
- [ber_choiceinseq,
+ [ber_decode_invalid_length,
+ ber_choiceinseq,
% Uses 'SOpttest'
ber_optional,
tagdefault_automatic]},
@@ -665,6 +666,19 @@ module_test(M0, Config, Rule, Opts) ->
end
end.
+ber_decode_invalid_length(_Config) ->
+ Bin = <<48,129,157,48,0,2,1,2,164,0,48,129,154,49,24,48,22,6,
+ 3,85,4,10,19,15,69,120,97,109,112,108,101,32,67,111,
+ 109,112,97,110,121,49,29,48,27,6,9,42,134,72,134,247,
+ 13,1,9,1,22,14,99,97,64,101,120,97,109,112,108,101,46,
+ 99,111,109,49,13,48,11,6,3,85,4,7,19,4,79,117,108,117,
+ 49,26,48,24,6,3,85,4,8,19,17,80,111,104,106,111,105,
+ 115,45,80,111,104,106,97,110,109,97,97,49,11,48,9,6,3,
+ 85,4,6,19,2,70,73,49,19,48,17,6,3,85,4,3,19,10,69,120,
+ 97,109,112,108,101,32,67,65,49,11,48,16,6,3,85,4,11,
+ 19,9,84,101>>,
+ {'EXIT',{error,{asn1,{invalid_value,12}}}} = (catch asn1rt_nif:decode_ber_tlv(Bin)),
+ ok.
ber_choiceinseq(Config) ->
test(Config, fun ber_choiceinseq/3, [ber]).
diff --git a/lib/compiler/doc/src/compile.xml b/lib/compiler/doc/src/compile.xml
index 1a71c83521..cfbd4c7fda 100644
--- a/lib/compiler/doc/src/compile.xml
+++ b/lib/compiler/doc/src/compile.xml
@@ -203,7 +203,8 @@
<tag><c>deterministic</c></tag>
<item>
<p>Omit the <c>options</c> and <c>source</c> tuples in
- the list returned by <c>Module:module_info(compile)</c>.
+ the list returned by <c>Module:module_info(compile)</c>, and
+ reduce the paths in stack traces to the module name alone.
This option will make it easier to achieve reproducible builds.
</p>
</item>
@@ -347,8 +348,8 @@ module.beam: module.erl \
<tag><c>{source,FileName}</c></tag>
<item>
- <p>Sets the value of the source, as returned by
- <c>module_info(compile)</c>.</p>
+ <p>Overrides the source file name as presented in
+ <c>module_info(compile)</c> and stack traces.</p>
</item>
<tag><c>{outdir,Dir}</c></tag>
diff --git a/lib/compiler/doc/src/notes.xml b/lib/compiler/doc/src/notes.xml
index 5024310788..e0e5bc832b 100644
--- a/lib/compiler/doc/src/notes.xml
+++ b/lib/compiler/doc/src/notes.xml
@@ -32,6 +32,21 @@
<p>This document describes the changes made to the Compiler
application.</p>
+<section><title>Compiler 7.2.7</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>Fixed a bug where incorrect code was generated
+ following a binary match guard.</p>
+ <p>
+ Own Id: OTP-15353 Aux Id: ERL-753 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Compiler 7.2.6</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/lib/compiler/src/beam_jump.erl b/lib/compiler/src/beam_jump.erl
index 9eee56d604..22974da398 100644
--- a/lib/compiler/src/beam_jump.erl
+++ b/lib/compiler/src/beam_jump.erl
@@ -128,7 +128,7 @@
%%% on the program state.
%%%
--import(lists, [reverse/1,reverse/2,foldl/3]).
+-import(lists, [dropwhile/2,reverse/1,reverse/2,foldl/3]).
-type instruction() :: beam_utils:instruction().
@@ -411,14 +411,19 @@ opt_useless_loads([{test,_,{f,L},_}=I|Is], L, St) ->
opt_useless_loads(Is, _L, St) ->
{Is,St}.
-opt_useless_block_loads([{set,[Dst],_,_}=I|Is], L, Index) ->
- BlockJump = [{block,Is},{jump,{f,L}}],
+opt_useless_block_loads([{set,[Dst],_,_}=I|Is0], L, Index) ->
+ BlockJump = [{block,Is0},{jump,{f,L}}],
case beam_utils:is_killed(Dst, BlockJump, Index) of
true ->
- %% The register is killed and not used, we can remove the load
+ %% The register is killed and not used, we can remove the load.
+ %% Remove any `put` instructions in case we just
+ %% removed a `put_tuple` instruction.
+ Is = dropwhile(fun({set,_,_,put}) -> true;
+ (_) -> false
+ end, Is0),
opt_useless_block_loads(Is, L, Index);
false ->
- [I|opt_useless_block_loads(Is, L, Index)]
+ [I|opt_useless_block_loads(Is0, L, Index)]
end;
opt_useless_block_loads([I|Is], L, Index) ->
[I|opt_useless_block_loads(Is, L, Index)];
diff --git a/lib/compiler/src/beam_utils.erl b/lib/compiler/src/beam_utils.erl
index 5580d2f123..6b2ab5a2a4 100644
--- a/lib/compiler/src/beam_utils.erl
+++ b/lib/compiler/src/beam_utils.erl
@@ -745,8 +745,11 @@ check_liveness_block_2(R, {gc_bif,Op,{f,Lbl}}, Ss, St) ->
check_liveness_block_3(R, Lbl, {Op,length(Ss)}, St);
check_liveness_block_2(R, {bif,Op,{f,Lbl}}, Ss, St) ->
Arity = length(Ss),
+
+ %% Note that is_function/2 is a type test but is not safe.
case erl_internal:comp_op(Op, Arity) orelse
- erl_internal:new_type_test(Op, Arity) of
+ (erl_internal:new_type_test(Op, Arity) andalso
+ erl_bifs:is_safe(erlang, Op, Arity)) of
true ->
{killed,St};
false ->
@@ -1115,6 +1118,10 @@ defs([{bs_init,{f,L},_,Live,_,Dst}=I|Is], Regs0, D) ->
end,
Regs = def_regs([Dst], Regs1),
[I|defs(Is, Regs, update_regs(L, Regs, D))];
+defs([{test,bs_start_match2,{f,L},Live,_,Dst}=I|Is], _Regs, D) ->
+ Regs0 = init_def_regs(Live),
+ Regs = def_regs([Dst], Regs0),
+ [I|defs(Is, Regs, update_regs(L, Regs0, D))];
defs([{bs_put,{f,L},_,_}=I|Is], Regs, D) ->
[I|defs(Is, Regs, update_regs(L, Regs, D))];
defs([build_stacktrace=I|Is], _Regs, D) ->
diff --git a/lib/compiler/src/compile.erl b/lib/compiler/src/compile.erl
index 562d57a6ef..6510571441 100644
--- a/lib/compiler/src/compile.erl
+++ b/lib/compiler/src/compile.erl
@@ -931,11 +931,17 @@ parse_module(_Code, St0) ->
end.
do_parse_module(DefEncoding, #compile{ifile=File,options=Opts,dir=Dir}=St) ->
+ SourceName0 = proplists:get_value(source, Opts, File),
+ SourceName = case member(deterministic, Opts) of
+ true -> filename:basename(SourceName0);
+ false -> SourceName0
+ end,
R = epp:parse_file(File,
- [{includes,[".",Dir|inc_paths(Opts)]},
- {macros,pre_defs(Opts)},
- {default_encoding,DefEncoding},
- extra]),
+ [{includes,[".",Dir|inc_paths(Opts)]},
+ {source_name, SourceName},
+ {macros,pre_defs(Opts)},
+ {default_encoding,DefEncoding},
+ extra]),
case R of
{ok,Forms,Extra} ->
Encoding = proplists:get_value(encoding, Extra),
diff --git a/lib/compiler/src/sys_core_fold.erl b/lib/compiler/src/sys_core_fold.erl
index 3a65b40fa5..1681d97efb 100644
--- a/lib/compiler/src/sys_core_fold.erl
+++ b/lib/compiler/src/sys_core_fold.erl
@@ -2635,12 +2635,20 @@ opt_build_stacktrace(#c_let{vars=[#c_var{name=Cooked}],
#c_call{module=#c_literal{val=erlang},
name=#c_literal{val=raise},
args=[Class,Exp,#c_var{name=Cooked}]} ->
- %% The stacktrace is only used in a call to erlang:raise/3.
- %% There is no need to build the stacktrace. Replace the
- %% call to erlang:raise/3 with the the raw_raise/3 instruction,
- %% which will use a raw stacktrace.
- #c_primop{name=#c_literal{val=raw_raise},
- args=[Class,Exp,RawStk]};
+ case core_lib:is_var_used(Cooked, #c_cons{hd=Class,tl=Exp}) of
+ true ->
+ %% Not safe. The stacktrace is used in the class or
+ %% reason.
+ Let;
+ false ->
+ %% The stacktrace is only used in the last
+ %% argument for erlang:raise/3. There is no need
+ %% to build the stacktrace. Replace the call to
+ %% erlang:raise/3 with the the raw_raise/3
+ %% instruction, which will use a raw stacktrace.
+ #c_primop{name=#c_literal{val=raw_raise},
+ args=[Class,Exp,RawStk]}
+ end;
#c_let{vars=[#c_var{name=V}],arg=Arg,body=B0} when V =/= Cooked ->
case core_lib:is_var_used(Cooked, Arg) of
false ->
diff --git a/lib/compiler/test/beam_jump_SUITE.erl b/lib/compiler/test/beam_jump_SUITE.erl
index c61e4ab65c..faedc0c1f1 100644
--- a/lib/compiler/test/beam_jump_SUITE.erl
+++ b/lib/compiler/test/beam_jump_SUITE.erl
@@ -21,7 +21,8 @@
-export([all/0,suite/0,groups/0,init_per_suite/1,end_per_suite/1,
init_per_group/2,end_per_group/2,
- undefined_label/1,ambiguous_catch_try_state/1]).
+ undefined_label/1,ambiguous_catch_try_state/1,
+ build_tuple/1]).
suite() ->
[{ct_hooks,[ts_install_cth]}].
@@ -32,7 +33,8 @@ all() ->
groups() ->
[{p,[parallel],
[undefined_label,
- ambiguous_catch_try_state
+ ambiguous_catch_try_state,
+ build_tuple
]}].
init_per_suite(Config) ->
@@ -72,3 +74,16 @@ river() -> song.
checks(Wanted) ->
%% Must be one line to cause the unsafe optimization.
{catch case river() of sheet -> begin +Wanted, if "da" -> Wanted end end end, catch case river() of sheet -> begin + Wanted, if "da" -> Wanted end end end}.
+
+-record(message2, {id, p1}).
+-record(message3, {id, p1, p2}).
+
+build_tuple(_Config) ->
+ {'EXIT',{{badrecord,message3},_}} = (catch do_build_tuple(#message2{})),
+ ok.
+
+do_build_tuple(Message) ->
+ if is_record(Message, message2) ->
+ Res = {res, rand:uniform(100)},
+ {Message#message3.id, Res}
+ end.
diff --git a/lib/compiler/test/beam_utils_SUITE.erl b/lib/compiler/test/beam_utils_SUITE.erl
index ac19305d69..ff0f72d519 100644
--- a/lib/compiler/test/beam_utils_SUITE.erl
+++ b/lib/compiler/test/beam_utils_SUITE.erl
@@ -26,7 +26,7 @@
select/1,y_catch/1,otp_8949_b/1,liveopt/1,coverage/1,
y_registers/1,user_predef/1,scan_f/1,cafu/1,
receive_label/1,read_size_file_version/1,not_used/1,
- is_used_fr/1]).
+ is_used_fr/1,unsafe_is_function/1]).
-export([id/1]).
suite() -> [{ct_hooks,[ts_install_cth]}].
@@ -53,7 +53,8 @@ groups() ->
cafu,
read_size_file_version,
not_used,
- is_used_fr
+ is_used_fr,
+ unsafe_is_function
]}].
init_per_suite(Config) ->
@@ -570,6 +571,24 @@ is_used_fr(X, Y) ->
end,
X ! 1.
+%% ERL-778.
+unsafe_is_function(Config) ->
+ {undefined,any} = unsafe_is_function(undefined, any),
+ {ok,any} = unsafe_is_function(fun() -> ok end, any),
+ {'EXIT',{{case_clause,_},_}} = (catch unsafe_is_function(fun(_) -> ok end, any)),
+ ok.
+
+unsafe_is_function(F, M) ->
+ %% There would be an internal consistency failure:
+ %% Instruction: {bif,is_function,{f,0},[{x,0},{integer,0}],{x,2}}
+ %% Error: {uninitialized_reg,{y,0}}:
+
+ NewValue = case is_function(F, 0) of
+ true -> F();
+ false when F =:= undefined -> undefined
+ end,
+ {NewValue,M}.
+
%% The identity function.
id(I) -> I.
diff --git a/lib/compiler/test/bs_match_SUITE.erl b/lib/compiler/test/bs_match_SUITE.erl
index e97dbac8a6..a751f6fda5 100644
--- a/lib/compiler/test/bs_match_SUITE.erl
+++ b/lib/compiler/test/bs_match_SUITE.erl
@@ -40,7 +40,8 @@
map_and_binary/1,unsafe_branch_caching/1,
bad_literals/1,good_literals/1,constant_propagation/1,
parse_xml/1,get_payload/1,escape/1,num_slots_different/1,
- beam_bsm/1,guard/1,is_ascii/1,non_opt_eq/1,erl_689/1]).
+ beam_bsm/1,guard/1,is_ascii/1,non_opt_eq/1,erl_689/1,
+ bs_start_match2_defs/1]).
-export([coverage_id/1,coverage_external_ignore/2]).
@@ -72,7 +73,8 @@ groups() ->
map_and_binary,unsafe_branch_caching,
bad_literals,good_literals,constant_propagation,parse_xml,
get_payload,escape,num_slots_different,
- beam_bsm,guard,is_ascii,non_opt_eq,erl_689]}].
+ beam_bsm,guard,is_ascii,non_opt_eq,erl_689,
+ bs_start_match2_defs]}].
init_per_suite(Config) ->
@@ -1749,6 +1751,19 @@ do_erl_689_2b(_, <<Length, Data/binary>>) ->
{{Y, M, D}, Rest}
end.
+%% ERL-753
+
+bs_start_match2_defs(_Config) ->
+ {<<"http://127.0.0.1:1234/vsaas/hello">>} = api_url(<<"hello">>, dummy),
+ {"https://127.0.0.1:4321/vsaas/hello"} = api_url({https, "hello"}, dummy).
+
+api_url(URL, Auth) ->
+ Header = [],
+ case URL of
+ <<_/binary>> -> {<<"http://127.0.0.1:1234/vsaas/",URL/binary>>};
+ {https, [_|_] = URL1} -> {"https://127.0.0.1:4321/vsaas/"++URL1}
+ end.
+
check(F, R) ->
R = F().
diff --git a/lib/compiler/test/compile_SUITE.erl b/lib/compiler/test/compile_SUITE.erl
index 1ecae06128..6b230710b3 100644
--- a/lib/compiler/test/compile_SUITE.erl
+++ b/lib/compiler/test/compile_SUITE.erl
@@ -36,7 +36,7 @@
core_roundtrip/1, asm/1, optimized_guards/1,
sys_pre_attributes/1, dialyzer/1,
warnings/1, pre_load_check/1, env_compiler_options/1,
- bc_options/1, deterministic_include/1
+ bc_options/1, deterministic_include/1, deterministic_paths/1
]).
suite() -> [{ct_hooks,[ts_install_cth]}].
@@ -53,7 +53,7 @@ all() ->
cover, env, core_pp, core_roundtrip, asm, optimized_guards,
sys_pre_attributes, dialyzer, warnings, pre_load_check,
env_compiler_options, custom_debug_info, bc_options,
- custom_compile_info, deterministic_include].
+ custom_compile_info, deterministic_include, deterministic_paths].
groups() ->
[].
@@ -1531,6 +1531,30 @@ deterministic_include(Config) when is_list(Config) ->
ok.
+deterministic_paths(Config) when is_list(Config) ->
+ DataDir = proplists:get_value(data_dir, Config),
+
+ %% Files without +deterministic should differ if they were compiled from a
+ %% different directory.
+ true = deterministic_paths_1(DataDir, "simple", []),
+
+ %% ... but files with +deterministic shouldn't.
+ false = deterministic_paths_1(DataDir, "simple", [deterministic]),
+
+ ok.
+
+deterministic_paths_1(DataDir, Name, Opts) ->
+ Simple = filename:join(DataDir, "simple"),
+ {ok, Cwd} = file:get_cwd(),
+ try
+ {ok,_,A} = compile:file(Simple, [binary | Opts]),
+ ok = file:set_cwd(DataDir),
+ {ok,_,B} = compile:file(Name, [binary | Opts]),
+ A =/= B
+ after
+ file:set_cwd(Cwd)
+ end.
+
%%%
%%% Utilities.
%%%
diff --git a/lib/compiler/test/trycatch_SUITE.erl b/lib/compiler/test/trycatch_SUITE.erl
index 1b7ef4ddb0..8f9cd9ab1e 100644
--- a/lib/compiler/test/trycatch_SUITE.erl
+++ b/lib/compiler/test/trycatch_SUITE.erl
@@ -1189,7 +1189,8 @@ bad_raise(Expr) ->
test_raise(Expr) ->
test_raise_1(Expr),
test_raise_2(Expr),
- test_raise_3(Expr).
+ test_raise_3(Expr),
+ test_raise_4(Expr).
test_raise_1(Expr) ->
erase(exception),
@@ -1263,5 +1264,28 @@ do_test_raise_3(Expr) ->
erlang:raise(exit, {exception,C,E}, Stk)
end.
+test_raise_4(Expr) ->
+ try
+ do_test_raise_4(Expr)
+ catch
+ exit:{exception,C,E,Stk}:Stk ->
+ try
+ Expr()
+ catch
+ C:E:S ->
+ [StkTop|_] = S,
+ [StkTop|_] = Stk
+ end
+ end.
+
+do_test_raise_4(Expr) ->
+ try
+ Expr()
+ catch
+ C:E:Stk ->
+ %% Here the stacktrace must be built.
+ erlang:raise(exit, {exception,C,E,Stk}, Stk)
+ end.
+
id(I) -> I.
diff --git a/lib/compiler/vsn.mk b/lib/compiler/vsn.mk
index ab707885f4..92f8aec424 100644
--- a/lib/compiler/vsn.mk
+++ b/lib/compiler/vsn.mk
@@ -1 +1 @@
-COMPILER_VSN = 7.2.6
+COMPILER_VSN = 7.2.7
diff --git a/lib/crypto/c_src/crypto.c b/lib/crypto/c_src/crypto.c
index d40d285f86..df607732bf 100644
--- a/lib/crypto/c_src/crypto.c
+++ b/lib/crypto/c_src/crypto.c
@@ -173,10 +173,13 @@
#endif
// (test for >= 1.1.1pre8)
-#if OPENSSL_VERSION_NUMBER >= (PACKED_OPENSSL_VERSION_PLAIN(1,1,1) - 7) \
+#if OPENSSL_VERSION_NUMBER >= (PACKED_OPENSSL_VERSION_PLAIN(1,1,1) -7) \
&& !defined(HAS_LIBRESSL) \
&& defined(HAVE_EC)
# define HAVE_ED_CURVE_DH
+# if OPENSSL_VERSION_NUMBER >= (PACKED_OPENSSL_VERSION_PLAIN(1,1,1))
+# define HAVE_EDDSA
+# endif
#endif
#if OPENSSL_VERSION_NUMBER >= PACKED_OPENSSL_VERSION(0,9,8,'c')
@@ -189,6 +192,10 @@
# define HAVE_GCM
# define HAVE_CCM
# define HAVE_CMAC
+# if defined(RSA_PKCS1_OAEP_PADDING)
+# define HAVE_RSA_OAEP_PADDING
+# endif
+# define HAVE_RSA_MGF1_MD
# if OPENSSL_VERSION_NUMBER < PACKED_OPENSSL_VERSION(1,0,1,'d')
# define HAVE_GCM_EVP_DECRYPT_BUG
# endif
@@ -628,10 +635,8 @@ static ErlNifFunc nif_funcs[] = {
{"rsa_generate_key_nif", 2, rsa_generate_key_nif},
{"dh_generate_key_nif", 4, dh_generate_key_nif},
{"dh_compute_key_nif", 3, dh_compute_key_nif},
-
{"evp_compute_key_nif", 3, evp_compute_key_nif},
{"evp_generate_key_nif", 1, evp_generate_key_nif},
-
{"privkey_to_pubkey_nif", 2, privkey_to_pubkey_nif},
{"srp_value_B_nif", 5, srp_value_B_nif},
{"srp_user_secret_nif", 7, srp_user_secret_nif},
@@ -738,6 +743,12 @@ static ERL_NIF_TERM atom_x25519;
static ERL_NIF_TERM atom_x448;
#endif
+static ERL_NIF_TERM atom_eddsa;
+#ifdef HAVE_EDDSA
+static ERL_NIF_TERM atom_ed25519;
+static ERL_NIF_TERM atom_ed448;
+#endif
+
static ERL_NIF_TERM atom_rsa_mgf1_md;
static ERL_NIF_TERM atom_rsa_oaep_label;
static ERL_NIF_TERM atom_rsa_oaep_md;
@@ -1161,6 +1172,7 @@ static int initialize(ErlNifEnv* env, ERL_NIF_TERM load_info)
atom_ppbasis = enif_make_atom(env,"ppbasis");
atom_onbasis = enif_make_atom(env,"onbasis");
#endif
+
atom_aes_cfb8 = enif_make_atom(env, "aes_cfb8");
atom_aes_cfb128 = enif_make_atom(env, "aes_cfb128");
#ifdef HAVE_GCM
@@ -1191,6 +1203,11 @@ static int initialize(ErlNifEnv* env, ERL_NIF_TERM load_info)
atom_x25519 = enif_make_atom(env,"x25519");
atom_x448 = enif_make_atom(env,"x448");
#endif
+ atom_eddsa = enif_make_atom(env,"eddsa");
+#ifdef HAVE_EDDSA
+ atom_ed25519 = enif_make_atom(env,"ed25519");
+ atom_ed448 = enif_make_atom(env,"ed448");
+#endif
atom_rsa_mgf1_md = enif_make_atom(env,"rsa_mgf1_md");
atom_rsa_oaep_label = enif_make_atom(env,"rsa_oaep_label");
atom_rsa_oaep_md = enif_make_atom(env,"rsa_oaep_md");
@@ -1332,15 +1349,15 @@ static void unload(ErlNifEnv* env, void* priv_data)
static int algo_hash_cnt, algo_hash_fips_cnt;
static ERL_NIF_TERM algo_hash[12]; /* increase when extending the list */
static int algo_pubkey_cnt, algo_pubkey_fips_cnt;
-static ERL_NIF_TERM algo_pubkey[11]; /* increase when extending the list */
+static ERL_NIF_TERM algo_pubkey[12]; /* increase when extending the list */
static int algo_cipher_cnt, algo_cipher_fips_cnt;
static ERL_NIF_TERM algo_cipher[25]; /* increase when extending the list */
static int algo_mac_cnt, algo_mac_fips_cnt;
static ERL_NIF_TERM algo_mac[3]; /* increase when extending the list */
static int algo_curve_cnt, algo_curve_fips_cnt;
-static ERL_NIF_TERM algo_curve[87]; /* increase when extending the list */
+static ERL_NIF_TERM algo_curve[89]; /* increase when extending the list */
static int algo_rsa_opts_cnt, algo_rsa_opts_fips_cnt;
-static ERL_NIF_TERM algo_rsa_opts[10]; /* increase when extending the list */
+static ERL_NIF_TERM algo_rsa_opts[11]; /* increase when extending the list */
static void init_algorithms_types(ErlNifEnv* env)
{
@@ -1390,6 +1407,10 @@ static void init_algorithms_types(ErlNifEnv* env)
#endif
// Non-validated algorithms follow
algo_pubkey_fips_cnt = algo_pubkey_cnt;
+ // Don't know if Edward curves are fips validated
+#if defined(HAVE_EDDSA)
+ algo_pubkey[algo_pubkey_cnt++] = enif_make_atom(env, "eddsa");
+#endif
algo_pubkey[algo_pubkey_cnt++] = enif_make_atom(env, "srp");
// Validated algorithms first
@@ -1550,6 +1571,10 @@ static void init_algorithms_types(ErlNifEnv* env)
#endif
#endif
//--
+#ifdef HAVE_EDDSA
+ algo_curve[algo_curve_cnt++] = enif_make_atom(env,"ed25519");
+ algo_curve[algo_curve_cnt++] = enif_make_atom(env,"ed448");
+#endif
#ifdef HAVE_ED_CURVE_DH
algo_curve[algo_curve_cnt++] = enif_make_atom(env,"x25519");
algo_curve[algo_curve_cnt++] = enif_make_atom(env,"x448");
@@ -1562,7 +1587,12 @@ static void init_algorithms_types(ErlNifEnv* env)
algo_rsa_opts[algo_rsa_opts_cnt++] = enif_make_atom(env,"rsa_pkcs1_pss_padding");
algo_rsa_opts[algo_rsa_opts_cnt++] = enif_make_atom(env,"rsa_pss_saltlen");
# endif
+# ifdef HAVE_RSA_MGF1_MD
algo_rsa_opts[algo_rsa_opts_cnt++] = enif_make_atom(env,"rsa_mgf1_md");
+# endif
+# ifdef HAVE_RSA_OAEP_PADDING
+ algo_rsa_opts[algo_rsa_opts_cnt++] = enif_make_atom(env,"rsa_pkcs1_oaep_padding");
+# endif
# ifdef HAVE_RSA_OAEP_MD
algo_rsa_opts[algo_rsa_opts_cnt++] = enif_make_atom(env,"rsa_oaep_label");
algo_rsa_opts[algo_rsa_opts_cnt++] = enif_make_atom(env,"rsa_oaep_md");
@@ -3206,6 +3236,36 @@ static int get_rsa_public_key(ErlNifEnv* env, ERL_NIF_TERM key, RSA *rsa)
return 1;
}
+#ifdef HAVE_EDDSA
+ static int get_eddsa_key(ErlNifEnv* env, int public, ERL_NIF_TERM key, EVP_PKEY **pkey)
+{
+ /* key=[K] */
+ ERL_NIF_TERM head, tail, tail2, algo;
+ ErlNifBinary bin;
+ int type;
+
+ if (!enif_get_list_cell(env, key, &head, &tail)
+ || !enif_inspect_binary(env, head, &bin)
+ || !enif_get_list_cell(env, tail, &algo, &tail2)
+ || !enif_is_empty_list(env, tail2)) {
+ return 0;
+ }
+ if (algo == atom_ed25519) type = EVP_PKEY_ED25519;
+ else if (algo == atom_ed448) type = EVP_PKEY_ED448;
+ else
+ return 0;
+
+ if (public)
+ *pkey = EVP_PKEY_new_raw_public_key(type, NULL, bin.data, bin.size);
+ else
+ *pkey = EVP_PKEY_new_raw_private_key(type, NULL, bin.data, bin.size);
+
+ if (!pkey)
+ return 0;
+ return 1;
+}
+#endif
+
static int get_dss_private_key(ErlNifEnv* env, ERL_NIF_TERM key, DSA *dsa)
{
/* key=[P,Q,G,KEY] */
@@ -4174,9 +4234,9 @@ static ERL_NIF_TERM evp_compute_key_nif(ErlNifEnv* env, int argc, const ERL_NIF_
{
#ifdef HAVE_ED_CURVE_DH
int type;
- EVP_PKEY_CTX *ctx;
+ EVP_PKEY_CTX *ctx = NULL;
ErlNifBinary peer_bin, my_bin, key_bin;
- EVP_PKEY *peer_key, *my_key;
+ EVP_PKEY *peer_key = NULL, *my_key = NULL;
size_t max_size;
if (argv[0] == atom_x25519) type = EVP_PKEY_X25519;
@@ -4184,41 +4244,43 @@ static ERL_NIF_TERM evp_compute_key_nif(ErlNifEnv* env, int argc, const ERL_NIF_
else return enif_make_badarg(env);
if (!enif_inspect_binary(env, argv[1], &peer_bin) ||
- !enif_inspect_binary(env, argv[2], &my_bin)) {
- return enif_make_badarg(env);
- }
+ !enif_inspect_binary(env, argv[2], &my_bin))
+ goto return_badarg;
if (!(my_key = EVP_PKEY_new_raw_private_key(type, NULL, my_bin.data, my_bin.size)) ||
- !(ctx = EVP_PKEY_CTX_new(my_key, NULL))) {
- return enif_make_badarg(env);
- }
+ !(ctx = EVP_PKEY_CTX_new(my_key, NULL)))
+ goto return_badarg;
- if (!EVP_PKEY_derive_init(ctx)) {
- return enif_make_badarg(env);
- }
+ if (!EVP_PKEY_derive_init(ctx))
+ goto return_badarg;
if (!(peer_key = EVP_PKEY_new_raw_public_key(type, NULL, peer_bin.data, peer_bin.size)) ||
- !EVP_PKEY_derive_set_peer(ctx, peer_key)) {
- return enif_make_badarg(env);
- }
+ !EVP_PKEY_derive_set_peer(ctx, peer_key))
+ goto return_badarg;
- if (!EVP_PKEY_derive(ctx, NULL, &max_size)) {
- return enif_make_badarg(env);
- }
+ if (!EVP_PKEY_derive(ctx, NULL, &max_size))
+ goto return_badarg;
if (!enif_alloc_binary(max_size, &key_bin) ||
- !EVP_PKEY_derive(ctx, key_bin.data, &key_bin.size)) {
- return enif_make_badarg(env);
- }
+ !EVP_PKEY_derive(ctx, key_bin.data, &key_bin.size))
+ goto return_badarg;
if (key_bin.size < max_size) {
size_t actual_size = key_bin.size;
- if (!enif_realloc_binary(&key_bin, actual_size)) {
- return enif_make_badarg(env);
- }
+ if (!enif_realloc_binary(&key_bin, actual_size))
+ goto return_badarg;
}
+ EVP_PKEY_free(my_key);
+ EVP_PKEY_free(peer_key);
+ EVP_PKEY_CTX_free(ctx);
return enif_make_binary(env, &key_bin);
+
+return_badarg:
+ if (my_key) EVP_PKEY_free(my_key);
+ if (peer_key) EVP_PKEY_free(peer_key);
+ if (ctx) EVP_PKEY_CTX_free(ctx);
+ return enif_make_badarg(env);
#else
return atom_notsup;
#endif
@@ -4229,7 +4291,7 @@ static ERL_NIF_TERM evp_generate_key_nif(ErlNifEnv* env, int argc, const ERL_NIF
{
#ifdef HAVE_ED_CURVE_DH
int type;
- EVP_PKEY_CTX *ctx;
+ EVP_PKEY_CTX *ctx = NULL;
EVP_PKEY *pkey = NULL;
ERL_NIF_TERM ret_pub, ret_prv;
size_t key_len;
@@ -4240,22 +4302,30 @@ static ERL_NIF_TERM evp_generate_key_nif(ErlNifEnv* env, int argc, const ERL_NIF
if (!(ctx = EVP_PKEY_CTX_new_id(type, NULL))) return enif_make_badarg(env);
- if (!EVP_PKEY_keygen_init(ctx)) return atom_error;
- if (!EVP_PKEY_keygen(ctx, &pkey)) return atom_error;
+ if (!EVP_PKEY_keygen_init(ctx)) goto return_error;
+ if (!EVP_PKEY_keygen(ctx, &pkey)) goto return_error;
- if (!EVP_PKEY_get_raw_public_key(pkey, NULL, &key_len)) return atom_error;
+ if (!EVP_PKEY_get_raw_public_key(pkey, NULL, &key_len)) goto return_error;
if (!EVP_PKEY_get_raw_public_key(pkey,
enif_make_new_binary(env, key_len, &ret_pub),
&key_len))
- return atom_error;
+ goto return_error;
- if (!EVP_PKEY_get_raw_private_key(pkey, NULL, &key_len)) return atom_error;
+ if (!EVP_PKEY_get_raw_private_key(pkey, NULL, &key_len)) goto return_error;
if (!EVP_PKEY_get_raw_private_key(pkey,
enif_make_new_binary(env, key_len, &ret_prv),
&key_len))
- return atom_error;
+ goto return_error;
+ EVP_PKEY_free(pkey);
+ EVP_PKEY_CTX_free(ctx);
return enif_make_tuple2(env, ret_pub, ret_prv);
+
+return_error:
+ if (pkey) EVP_PKEY_free(pkey);
+ if (ctx) EVP_PKEY_CTX_free(ctx);
+ return atom_error;
+
#else
return atom_notsup;
#endif
@@ -4287,7 +4357,9 @@ static int get_pkey_digest_type(ErlNifEnv *env, ERL_NIF_TERM algorithm, ERL_NIF_
*md = NULL;
if (type == atom_none && algorithm == atom_rsa) return PKEY_OK;
-
+#ifdef HAVE_EDDSA
+ if (algorithm == atom_eddsa) return PKEY_OK;
+#endif
digp = get_digest_type(type);
if (!digp) return PKEY_BADARG;
if (!digp->md.p) return PKEY_NOTSUP;
@@ -4462,7 +4534,7 @@ static int get_engine_and_key_id(ErlNifEnv *env, ERL_NIF_TERM key, char ** id, E
static char *get_key_password(ErlNifEnv *env, ERL_NIF_TERM key) {
ERL_NIF_TERM tmp_term;
ErlNifBinary pwd_bin;
- char *pwd;
+ char *pwd = NULL;
if (enif_get_map_value(env, key, atom_password, &tmp_term) &&
enif_inspect_binary(env, tmp_term, &pwd_bin) &&
zero_terminate(pwd_bin, &pwd)
@@ -4487,16 +4559,17 @@ static int get_pkey_private_key(ErlNifEnv *env, ERL_NIF_TERM algorithm, ERL_NIF_
#ifdef HAS_ENGINE_SUPPORT
/* Use key stored in engine */
ENGINE *e;
- char *id;
+ char *id = NULL;
char *password;
if (!get_engine_and_key_id(env, key, &id, &e))
return PKEY_BADARG;
password = get_key_password(env, key);
*pkey = ENGINE_load_private_key(e, id, NULL, password);
+ if (password) enif_free(password);
+ enif_free(id);
if (!*pkey)
return PKEY_BADARG;
- enif_free(id);
#else
return PKEY_BADARG;
#endif
@@ -4537,6 +4610,14 @@ static int get_pkey_private_key(ErlNifEnv *env, ERL_NIF_TERM algorithm, ERL_NIF_
#else
return PKEY_NOTSUP;
#endif
+ } else if (algorithm == atom_eddsa) {
+#if defined(HAVE_EDDSA)
+ if (!get_eddsa_key(env, 0, key, pkey)) {
+ return PKEY_BADARG;
+ }
+#else
+ return PKEY_NOTSUP;
+#endif
} else if (algorithm == atom_dss) {
DSA *dsa = DSA_new();
@@ -4566,16 +4647,17 @@ static int get_pkey_public_key(ErlNifEnv *env, ERL_NIF_TERM algorithm, ERL_NIF_T
#ifdef HAS_ENGINE_SUPPORT
/* Use key stored in engine */
ENGINE *e;
- char *id;
+ char *id = NULL;
char *password;
if (!get_engine_and_key_id(env, key, &id, &e))
return PKEY_BADARG;
password = get_key_password(env, key);
*pkey = ENGINE_load_public_key(e, id, NULL, password);
+ if (password) enif_free(password);
+ enif_free(id);
if (!pkey)
return PKEY_BADARG;
- enif_free(id);
#else
return PKEY_BADARG;
#endif
@@ -4615,6 +4697,14 @@ static int get_pkey_public_key(ErlNifEnv *env, ERL_NIF_TERM algorithm, ERL_NIF_T
#else
return PKEY_NOTSUP;
#endif
+ } else if (algorithm == atom_eddsa) {
+#if defined(HAVE_EDDSA)
+ if (!get_eddsa_key(env, 1, key, pkey)) {
+ return PKEY_BADARG;
+ }
+#else
+ return PKEY_NOTSUP;
+#endif
} else if (algorithm == atom_dss) {
DSA *dsa = DSA_new();
@@ -4688,21 +4778,23 @@ printf("\r\n");
ctx = EVP_PKEY_CTX_new(pkey, NULL);
if (!ctx) goto badarg;
- if (EVP_PKEY_sign_init(ctx) <= 0) goto badarg;
- if (md != NULL && EVP_PKEY_CTX_set_signature_md(ctx, md) <= 0) goto badarg;
+ if (argv[0] != atom_eddsa) {
+ if (EVP_PKEY_sign_init(ctx) <= 0) goto badarg;
+ if (md != NULL && EVP_PKEY_CTX_set_signature_md(ctx, md) <= 0) goto badarg;
+ }
if (argv[0] == atom_rsa) {
if (EVP_PKEY_CTX_set_rsa_padding(ctx, sig_opt.rsa_padding) <= 0) goto badarg;
-#ifdef HAVE_RSA_PKCS1_PSS_PADDING
+# ifdef HAVE_RSA_PKCS1_PSS_PADDING
if (sig_opt.rsa_padding == RSA_PKCS1_PSS_PADDING) {
if (sig_opt.rsa_mgf1_md != NULL) {
-#if OPENSSL_VERSION_NUMBER >= PACKED_OPENSSL_VERSION_PLAIN(1,0,1)
+# ifdef HAVE_RSA_MGF1_MD
if (EVP_PKEY_CTX_set_rsa_mgf1_md(ctx, sig_opt.rsa_mgf1_md) <= 0) goto badarg;
-#else
+# else
EVP_PKEY_CTX_free(ctx);
EVP_PKEY_free(pkey);
return atom_notsup;
-#endif
+# endif
}
if (sig_opt.rsa_pss_saltlen > -2
&& EVP_PKEY_CTX_set_rsa_pss_saltlen(ctx, sig_opt.rsa_pss_saltlen) <= 0)
@@ -4711,14 +4803,40 @@ printf("\r\n");
#endif
}
- if (EVP_PKEY_sign(ctx, NULL, &siglen, tbs, tbslen) <= 0) goto badarg;
- enif_alloc_binary(siglen, &sig_bin);
+ if (argv[0] == atom_eddsa) {
+#ifdef HAVE_EDDSA
+ EVP_MD_CTX* mdctx = EVP_MD_CTX_new();
+ if (!EVP_DigestSignInit(mdctx, NULL, NULL, NULL, pkey)) {
+ if (mdctx) EVP_MD_CTX_free(mdctx);
+ goto badarg;
+ }
+
+ if (!EVP_DigestSign(mdctx, NULL, &siglen, tbs, tbslen)) {
+ EVP_MD_CTX_free(mdctx);
+ goto badarg;
+ }
+ enif_alloc_binary(siglen, &sig_bin);
- if (md != NULL) {
- ERL_VALGRIND_ASSERT_MEM_DEFINED(tbs, EVP_MD_size(md));
+ if (!EVP_DigestSign(mdctx, sig_bin.data, &siglen, tbs, tbslen)) {
+ EVP_MD_CTX_free(mdctx);
+ goto badarg;
+ }
+ EVP_MD_CTX_free(mdctx);
+#else
+ goto badarg;
+#endif
}
- i = EVP_PKEY_sign(ctx, sig_bin.data, &siglen, tbs, tbslen);
+ else
+ {
+ if (EVP_PKEY_sign(ctx, NULL, &siglen, tbs, tbslen) <= 0) goto badarg;
+ enif_alloc_binary(siglen, &sig_bin);
+ if (md != NULL) {
+ ERL_VALGRIND_ASSERT_MEM_DEFINED(tbs, EVP_MD_size(md));
+ }
+ i = EVP_PKEY_sign(ctx, sig_bin.data, &siglen, tbs, tbslen);
+ }
+
EVP_PKEY_CTX_free(ctx);
#else
/*printf("Old interface\r\n");
@@ -4826,20 +4944,23 @@ static ERL_NIF_TERM pkey_verify_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM
*/
ctx = EVP_PKEY_CTX_new(pkey, NULL);
if (!ctx) goto badarg;
- if (EVP_PKEY_verify_init(ctx) <= 0) goto badarg;
- if (md != NULL && EVP_PKEY_CTX_set_signature_md(ctx, md) <= 0) goto badarg;
+
+ if (argv[0] != atom_eddsa) {
+ if (EVP_PKEY_verify_init(ctx) <= 0) goto badarg;
+ if (md != NULL && EVP_PKEY_CTX_set_signature_md(ctx, md) <= 0) goto badarg;
+ }
if (argv[0] == atom_rsa) {
if (EVP_PKEY_CTX_set_rsa_padding(ctx, sig_opt.rsa_padding) <= 0) goto badarg;
if (sig_opt.rsa_padding == RSA_PKCS1_PSS_PADDING) {
if (sig_opt.rsa_mgf1_md != NULL) {
-#if OPENSSL_VERSION_NUMBER >= PACKED_OPENSSL_VERSION_PLAIN(1,0,1)
+# ifdef HAVE_RSA_MGF1_MD
if (EVP_PKEY_CTX_set_rsa_mgf1_md(ctx, sig_opt.rsa_mgf1_md) <= 0) goto badarg;
-#else
+# else
EVP_PKEY_CTX_free(ctx);
EVP_PKEY_free(pkey);
return atom_notsup;
-#endif
+# endif
}
if (sig_opt.rsa_pss_saltlen > -2
&& EVP_PKEY_CTX_set_rsa_pss_saltlen(ctx, sig_opt.rsa_pss_saltlen) <= 0)
@@ -4847,10 +4968,28 @@ static ERL_NIF_TERM pkey_verify_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM
}
}
- if (md != NULL) {
- ERL_VALGRIND_ASSERT_MEM_DEFINED(tbs, EVP_MD_size(md));
- }
- i = EVP_PKEY_verify(ctx, sig_bin.data, sig_bin.size, tbs, tbslen);
+ if (argv[0] == atom_eddsa) {
+#ifdef HAVE_EDDSA
+ EVP_MD_CTX* mdctx = EVP_MD_CTX_create();
+
+ if (!EVP_DigestVerifyInit(mdctx, NULL, NULL, NULL, pkey)) {
+ if (mdctx) EVP_MD_CTX_destroy(mdctx);
+ goto badarg;
+ }
+
+ i = EVP_DigestVerify(mdctx, sig_bin.data, sig_bin.size, tbs, tbslen);
+ EVP_MD_CTX_destroy(mdctx);
+#else
+ goto badarg;
+#endif
+ }
+ else
+ {
+ if (md != NULL) {
+ ERL_VALGRIND_ASSERT_MEM_DEFINED(tbs, EVP_MD_size(md));
+ }
+ i = EVP_PKEY_verify(ctx, sig_bin.data, sig_bin.size, tbs, tbslen);
+ }
EVP_PKEY_CTX_free(ctx);
#else
@@ -4932,8 +5071,10 @@ static int get_pkey_crypt_options(ErlNifEnv *env, ERL_NIF_TERM algorithm, ERL_NI
) {
if (tpl_terms[1] == atom_rsa_pkcs1_padding) {
opt->rsa_padding = RSA_PKCS1_PADDING;
+#ifdef HAVE_RSA_OAEP_PADDING
} else if (tpl_terms[1] == atom_rsa_pkcs1_oaep_padding) {
opt->rsa_padding = RSA_PKCS1_OAEP_PADDING;
+#endif
#ifdef HAVE_RSA_SSLV23_PADDING
} else if (tpl_terms[1] == atom_rsa_sslv23_padding) {
opt->rsa_padding = RSA_SSLV23_PADDING;
@@ -4952,7 +5093,7 @@ static int get_pkey_crypt_options(ErlNifEnv *env, ERL_NIF_TERM algorithm, ERL_NI
}
opt->signature_md = opt_md;
} else if (tpl_terms[0] == atom_rsa_mgf1_md && enif_is_atom(env, tpl_terms[1])) {
-#ifndef HAVE_RSA_OAEP_MD
+#ifndef HAVE_RSA_MGF1_MD
if (tpl_terms[1] != atom_sha)
return PKEY_NOTSUP;
#endif
@@ -4992,6 +5133,15 @@ static int get_pkey_crypt_options(ErlNifEnv *env, ERL_NIF_TERM algorithm, ERL_NI
return PKEY_OK;
}
+static size_t size_of_RSA(EVP_PKEY *pkey) {
+ size_t tmplen;
+ RSA *rsa = EVP_PKEY_get1_RSA(pkey);
+ if (rsa == NULL) return 0;
+ tmplen = RSA_size(rsa);
+ RSA_free(rsa);
+ return tmplen;
+}
+
static ERL_NIF_TERM pkey_crypt_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[])
{/* (Algorithm, Data, PublKey=[E,N]|[E,N,D]|[E,N,D,P1,P2,E1,E2,C], Options, IsPrivate, IsEncrypt) */
int i;
@@ -5089,9 +5239,8 @@ static ERL_NIF_TERM pkey_crypt_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM
#ifdef HAVE_RSA_SSLV23_PADDING
if (crypt_opt.rsa_padding == RSA_SSLV23_PADDING) {
if (is_encrypt) {
- RSA *rsa = EVP_PKEY_get1_RSA(pkey);
- if (rsa == NULL) goto badarg;
- tmplen = RSA_size(rsa);
+ tmplen = size_of_RSA(pkey);
+ if (tmplen == 0) goto badarg;
if (!enif_alloc_binary(tmplen, &tmp_bin)) goto badarg;
if (RSA_padding_add_SSLv23(tmp_bin.data, tmplen, in_bin.data, in_bin.size) <= 0)
goto badarg;
@@ -5111,7 +5260,7 @@ static ERL_NIF_TERM pkey_crypt_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM
if (crypt_opt.rsa_mgf1_md != NULL
&& EVP_PKEY_CTX_set_rsa_mgf1_md(ctx, crypt_opt.rsa_mgf1_md) <= 0) goto badarg;
if (crypt_opt.rsa_oaep_label.data != NULL && crypt_opt.rsa_oaep_label.size > 0) {
- unsigned char *label_copy;
+ unsigned char *label_copy = NULL;
label_copy = OPENSSL_malloc(crypt_opt.rsa_oaep_label.size);
if (label_copy == NULL) goto badarg;
memcpy((void *)(label_copy), (const void *)(crypt_opt.rsa_oaep_label.data),
@@ -5223,14 +5372,11 @@ static ERL_NIF_TERM pkey_crypt_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM
if ((i > 0) && argv[0] == atom_rsa && !is_encrypt) {
#ifdef HAVE_RSA_SSLV23_PADDING
if (crypt_opt.rsa_padding == RSA_SSLV23_PADDING) {
- RSA *rsa = EVP_PKEY_get1_RSA(pkey);
unsigned char *p;
- if (rsa == NULL) goto badarg;
- tmplen = RSA_size(rsa);
- if (!enif_alloc_binary(tmplen, &tmp_bin)) {
- RSA_free(rsa);
+ tmplen = size_of_RSA(pkey);
+ if (tmplen == 0) goto badarg;
+ if (!enif_alloc_binary(tmplen, &tmp_bin))
goto badarg;
- }
p = out_bin.data;
p++;
i = RSA_padding_check_SSLv23(tmp_bin.data, tmplen, p, out_bin.size - 1, tmplen);
@@ -5241,7 +5387,6 @@ static ERL_NIF_TERM pkey_crypt_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM
tmp_bin = in_bin;
i = 1;
}
- RSA_free(rsa);
}
#endif
}
@@ -5304,6 +5449,7 @@ static ERL_NIF_TERM privkey_to_pubkey_nif(ErlNifEnv* env, int argc, const ERL_NI
RSA_get0_key(rsa, &n, &e, &d);
result[0] = bin_from_bn(env, e); // Exponent E
result[1] = bin_from_bn(env, n); // Modulus N = p*q
+ RSA_free(rsa);
EVP_PKEY_free(pkey);
return enif_make_list_from_array(env, result, 2);
}
@@ -5318,6 +5464,7 @@ static ERL_NIF_TERM privkey_to_pubkey_nif(ErlNifEnv* env, int argc, const ERL_NI
result[1] = bin_from_bn(env, q);
result[2] = bin_from_bn(env, g);
result[3] = bin_from_bn(env, pub_key);
+ DSA_free(dsa);
EVP_PKEY_free(pkey);
return enif_make_list_from_array(env, result, 4);
}
diff --git a/lib/crypto/c_src/crypto_callback.c b/lib/crypto/c_src/crypto_callback.c
index 23d2bed057..0cc7dd609d 100644
--- a/lib/crypto/c_src/crypto_callback.c
+++ b/lib/crypto/c_src/crypto_callback.c
@@ -179,6 +179,10 @@ DLLEXPORT struct crypto_callbacks* get_crypto_callbacks(int nlocks)
/* This is not really a NIF library, but we use ERL_NIF_INIT in order to
* get access to the erl_nif API (on Windows).
*/
-ERL_NIF_INIT(dummy, (ErlNifFunc*)NULL , NULL, NULL, NULL, NULL)
+static struct {
+ int dummy__;
+ ErlNifFunc funcv[0];
+} empty;
+ERL_NIF_INIT(dummy, empty.funcv, NULL, NULL, NULL, NULL)
#endif
diff --git a/lib/crypto/c_src/otp_test_engine.c b/lib/crypto/c_src/otp_test_engine.c
index 34c825059f..2c8cce094e 100644
--- a/lib/crypto/c_src/otp_test_engine.c
+++ b/lib/crypto/c_src/otp_test_engine.c
@@ -35,7 +35,12 @@
#if OPENSSL_VERSION_NUMBER < PACKED_OPENSSL_VERSION_PLAIN(1,1,0) \
|| defined(LIBRESSL_VERSION_NUMBER)
-#define OLD
+# define OLD
+#endif
+
+#if OPENSSL_VERSION_NUMBER >= PACKED_OPENSSL_VERSION_PLAIN(1,1,0) \
+ && !defined(LIBRESSL_VERSION_NUMBER)
+# define FAKE_RSA_IMPL
#endif
#if OPENSSL_VERSION_NUMBER >= PACKED_OPENSSL_VERSION(0,9,8,'o') \
@@ -56,13 +61,41 @@
static const char *test_engine_id = "MD5";
static const char *test_engine_name = "MD5 test engine";
-/* The callback that does the job of fetching keys on demand by the Engine */
-EVP_PKEY* test_key_load(ENGINE *er, const char *id, UI_METHOD *ui_method, void *callback_data);
+#if defined(FAKE_RSA_IMPL)
+/*-------- test of private/public keys and RSA in engine ---------*/
+static RSA_METHOD *test_rsa_method = NULL;
+
+/* Our on "RSA" implementation */
+static int test_rsa_sign(int dtype, const unsigned char *m,
+ unsigned int m_len, unsigned char *sigret,
+ unsigned int *siglen, const RSA *rsa);
+static int test_rsa_verify(int dtype, const unsigned char *m,
+ unsigned int m_len, const unsigned char *sigret,
+ unsigned int siglen, const RSA *rsa);
+static int test_rsa_free(RSA *rsa);
+#endif /* if defined(FAKE_RSA_IMPL) */
+
+/* The callbacks that does the job of fetching keys on demand by the Engine */
+EVP_PKEY* test_privkey_load(ENGINE *eng, const char *id, UI_METHOD *ui_method, void *callback_data);
+EVP_PKEY* test_pubkey_load(ENGINE *eng, const char *id, UI_METHOD *ui_method, void *callback_data);
+EVP_PKEY* test_key_load(ENGINE *er, const char *id, UI_METHOD *ui_method, void *callback_data, int priv);
+
+/*----------------------------------------------------------------*/
static int test_init(ENGINE *e) {
printf("OTP Test Engine Initializatzion!\r\n");
+#if defined(FAKE_RSA_IMPL)
+ if ( !RSA_meth_set_finish(test_rsa_method, test_rsa_free)
+ || !RSA_meth_set_sign(test_rsa_method, test_rsa_sign)
+ || !RSA_meth_set_verify(test_rsa_method, test_rsa_verify)
+ ) {
+ fprintf(stderr, "Setup RSA_METHOD failed\r\n");
+ return 0;
+ }
+#endif /* if defined(FAKE_RSA_IMPL) */
+
/* Load all digest and cipher algorithms. Needed for password protected private keys */
OpenSSL_add_all_ciphers();
OpenSSL_add_all_digests();
@@ -79,6 +112,19 @@ static void add_test_data(unsigned char *md, unsigned int len)
}
}
+#if defined(FAKE_RSA_IMPL)
+static int chk_test_data(const unsigned char *md, unsigned int len)
+{
+ unsigned int i;
+
+ for (i=0; i<len; i++) {
+ if (md[i] != (unsigned char)(i & 0xff))
+ return 0;
+ }
+ return 1;
+}
+#endif /* if defined(FAKE_RSA_IMPL) */
+
/* MD5 part */
#undef data
#ifdef OLD
@@ -184,19 +230,34 @@ static int test_engine_digest_selector(ENGINE *e, const EVP_MD **digest,
return ok;
}
-
static int bind_helper(ENGINE * e, const char *id)
{
- if (!ENGINE_set_id(e, test_engine_id) ||
- !ENGINE_set_name(e, test_engine_name) ||
- !ENGINE_set_init_function(e, test_init) ||
- !ENGINE_set_digests(e, &test_engine_digest_selector) ||
+#if defined(FAKE_RSA_IMPL)
+ test_rsa_method = RSA_meth_new("OTP test RSA method", 0);
+ if (test_rsa_method == NULL) {
+ fprintf(stderr, "RSA_meth_new failed\r\n");
+ return 0;
+ }
+#endif /* if defined(FAKE_RSA_IMPL) */
+
+ if (!ENGINE_set_id(e, test_engine_id)
+ || !ENGINE_set_name(e, test_engine_name)
+ || !ENGINE_set_init_function(e, test_init)
+ || !ENGINE_set_digests(e, &test_engine_digest_selector)
/* For testing of key storage in an Engine: */
- !ENGINE_set_load_privkey_function(e, &test_key_load) ||
- !ENGINE_set_load_pubkey_function(e, &test_key_load)
- )
+ || !ENGINE_set_load_privkey_function(e, &test_privkey_load)
+ || !ENGINE_set_load_pubkey_function(e, &test_pubkey_load)
+ )
return 0;
+#if defined(FAKE_RSA_IMPL)
+ if ( !ENGINE_set_RSA(e, test_rsa_method) ) {
+ RSA_meth_free(test_rsa_method);
+ test_rsa_method = NULL;
+ return 0;
+ }
+#endif /* if defined(FAKE_RSA_IMPL) */
+
return 1;
}
@@ -211,24 +272,29 @@ IMPLEMENT_DYNAMIC_BIND_FN(bind_helper);
*/
int pem_passwd_cb_fun(char *buf, int size, int rwflag, void *password);
-EVP_PKEY* test_key_load(ENGINE *er, const char *id, UI_METHOD *ui_method, void *callback_data)
+EVP_PKEY* test_privkey_load(ENGINE *eng, const char *id, UI_METHOD *ui_method, void *callback_data) {
+ return test_key_load(eng, id, ui_method, callback_data, 1);
+}
+
+EVP_PKEY* test_pubkey_load(ENGINE *eng, const char *id, UI_METHOD *ui_method, void *callback_data) {
+ return test_key_load(eng, id, ui_method, callback_data, 0);
+}
+
+EVP_PKEY* test_key_load(ENGINE *eng, const char *id, UI_METHOD *ui_method, void *callback_data, int priv)
{
EVP_PKEY *pkey = NULL;
FILE *f = fopen(id, "r");
if (!f) {
- fprintf(stderr, "%s:%d fopen(%s) failed\r\n", __FILE__,__LINE__,id);
- return NULL;
+ fprintf(stderr, "%s:%d fopen(%s) failed\r\n", __FILE__,__LINE__,id);
+ return NULL;
}
- /* First try to read as a private key. If that fails, try to read as a public key: */
- pkey = PEM_read_PrivateKey(f, NULL, pem_passwd_cb_fun, callback_data);
- if (!pkey) {
- /* ERR_print_errors_fp (stderr); */
- fclose(f);
- f = fopen(id, "r");
- pkey = PEM_read_PUBKEY(f, NULL, NULL, NULL);
- }
+ pkey =
+ priv
+ ? PEM_read_PrivateKey(f, NULL, pem_passwd_cb_fun, callback_data)
+ : PEM_read_PUBKEY(f, NULL, NULL, NULL);
+
fclose(f);
if (!pkey) {
@@ -278,3 +344,71 @@ int pem_passwd_cb_fun(char *buf, int size, int rwflag, void *password)
}
#endif
+
+#if defined(FAKE_RSA_IMPL)
+/* RSA sign. This returns a fixed string so the test case can test that it was called
+ instead of the cryptolib default RSA sign */
+
+unsigned char fake_flag[] = {255,3,124,180,35,10,180,151,101,247,62,59,80,122,220,
+ 142,24,180,191,34,51,150,112,27,43,142,195,60,245,213,80,179};
+
+int test_rsa_sign(int dtype,
+ /* The digest to sign */
+ const unsigned char *m, unsigned int m_len,
+ /* The allocated buffer to fill with the signature */
+ unsigned char *sigret, unsigned int *siglen,
+ /* The key */
+ const RSA *rsa)
+{
+ int slen;
+ fprintf(stderr, "test_rsa_sign (dtype=%i) called m_len=%u *siglen=%u\r\n", dtype, m_len, *siglen);
+ if (!sigret) {
+ fprintf(stderr, "sigret = NULL\r\n");
+ return -1;
+ }
+
+ /* {int i;
+ fprintf(stderr, "Digest =\r\n");
+ for(i=0; i<m_len; i++)
+ fprintf(stderr, "%i,", m[i]);
+ fprintf(stderr, "\r\n");
+ } */
+
+ if ((sizeof(fake_flag) == m_len)
+ && bcmp(m,fake_flag,m_len) == 0) {
+ printf("To be faked\r\n");
+ /* To be faked */
+ slen = RSA_size(rsa);
+ add_test_data(sigret, slen); /* The signature is 0,1,2...255,0,1... */
+ *siglen = slen; /* Must set this. Why? */
+ return 1; /* 1 = success */
+ }
+ return 0;
+}
+
+int test_rsa_verify(int dtype,
+ /* The digest to verify */
+ const unsigned char *m, unsigned int m_len,
+ /* The signature */
+ const unsigned char *sigret, unsigned int siglen,
+ /* The key */
+ const RSA *rsa)
+{
+ printf("test_rsa_verify (dtype=%i) called m_len=%u siglen=%u\r\n", dtype, m_len, siglen);
+
+ if ((sizeof(fake_flag) == m_len)
+ && bcmp(m,fake_flag,m_len) == 0) {
+ printf("To be faked\r\n");
+ return (siglen == RSA_size(rsa))
+ && chk_test_data(sigret, siglen);
+ }
+ return 0;
+}
+
+static int test_rsa_free(RSA *rsa)
+{
+ printf("test_rsa_free called\r\n");
+ return 1;
+}
+
+#endif /* if defined(FAKE_RSA_IMPL) */
diff --git a/lib/crypto/doc/src/algorithm_details.xml b/lib/crypto/doc/src/algorithm_details.xml
index 68ad264df7..854bfbb4b1 100644
--- a/lib/crypto/doc/src/algorithm_details.xml
+++ b/lib/crypto/doc/src/algorithm_details.xml
@@ -63,9 +63,9 @@
<row><cell><c>aes_ige256</c></cell><cell>16</cell><cell>32</cell><cell>16</cell></row>
<row><cell><c>blowfish_cbc</c></cell> <cell>4-56</cell> <cell>8</cell> <cell>8</cell></row>
- <row><cell><c>blowfish_cfb64</c></cell> <cell>1-</cell> <cell>8</cell> <cell>any</cell></row>
- <row><cell><c>blowfish_ecb</c></cell><cell>1-</cell><cell> </cell><cell>8</cell></row>
- <row><cell><c>blowfish_ofb64</c></cell><cell>1-</cell><cell>8</cell><cell>any</cell></row>
+ <row><cell><c>blowfish_cfb64</c></cell> <cell>&#8805;1</cell> <cell>8</cell> <cell>any</cell></row>
+ <row><cell><c>blowfish_ecb</c></cell><cell>&#8805;1</cell><cell> </cell><cell>8</cell></row>
+ <row><cell><c>blowfish_ofb64</c></cell><cell>&#8805;1</cell><cell>8</cell><cell>any</cell></row>
<row><cell><c>des3_cbc</c><br/><i>(=DES EDE3 CBC)</i></cell><cell>[8,8,8]</cell><cell>8</cell><cell>8</cell></row>
<row><cell><c>des3_cfb</c><br/><i>(=DES EDE3 CFB)</i></cell><cell>[8,8,8]</cell><cell>8</cell><cell>any</cell></row>
@@ -74,7 +74,7 @@
<row><cell><c>des_cfb</c></cell><cell>8</cell><cell>8</cell><cell>any</cell></row>
<row><cell><c>des_ecb</c></cell><cell>8</cell><cell> </cell><cell>8</cell></row>
<row><cell><c>des_ede3</c><br/><i>(=DES EDE3 CBC)</i></cell><cell>[8,8,8]</cell><cell>8</cell><cell>8</cell></row>
- <row><cell><c>rc2_cbc</c></cell><cell>1-</cell><cell>8</cell><cell>8</cell></row>
+ <row><cell><c>rc2_cbc</c></cell><cell>&#8805;1</cell><cell>8</cell><cell>8</cell></row>
<tcaption>Block cipher key lengths</tcaption>
</table>
</section>
@@ -90,9 +90,9 @@
</p>
<table>
<row><cell><strong>Cipher and Mode</strong></cell><cell><strong>Key length</strong><br/><strong>[bytes]</strong></cell><cell><strong>IV length</strong><br/><strong>[bytes]</strong></cell><cell><strong>AAD length</strong><br/><strong>[bytes]</strong></cell><cell><strong>Tag length</strong><br/><strong>[bytes]</strong></cell><cell><strong>Block size</strong><br/><strong>[bytes]</strong></cell><cell><strong>Supported with</strong><br/><strong>OpenSSL versions</strong></cell></row>
- <row><cell><c>aes_ccm</c></cell> <cell>16,24,32</cell> <cell>7-13</cell> <cell>any</cell> <cell>even 4-16<br/>default: 12</cell> <cell>any</cell><cell>1.1.0 -</cell></row>
- <row><cell><c>aes_gcm</c></cell> <cell>16,24,32</cell> <cell>1-</cell> <cell>any</cell> <cell>1-16<br/>default: 16</cell> <cell>any</cell><cell>1.1.0 -</cell></row>
- <row><cell><c>chacha20_poly1305</c></cell><cell>32</cell> <cell>1-16</cell> <cell>any</cell> <cell>16</cell> <cell>any</cell><cell>1.1.0 -</cell></row>
+ <row><cell><c>aes_ccm</c></cell> <cell>16,24,32</cell> <cell>7-13</cell> <cell>any</cell> <cell>even 4-16<br/>default: 12</cell> <cell>any</cell><cell>&#8805;1.1.0</cell></row>
+ <row><cell><c>aes_gcm</c></cell> <cell>16,24,32</cell> <cell>&#8805;1</cell> <cell>any</cell> <cell>1-16<br/>default: 16</cell> <cell>any</cell><cell>&#8805;1.1.0</cell></row>
+ <row><cell><c>chacha20_poly1305</c></cell><cell>32</cell> <cell>1-16</cell> <cell>any</cell> <cell>16</cell> <cell>any</cell><cell>&#8805;1.1.0</cell></row>
<tcaption>AEAD cipher key lengths</tcaption>
</table>
</section>
@@ -108,8 +108,8 @@
</p>
<table>
<row><cell><strong>Cipher and Mode</strong></cell><cell><strong>Key length</strong><br/><strong>[bytes]</strong></cell><cell><strong>IV length</strong><br/><strong>[bytes]</strong></cell><cell><strong>Supported with</strong><br/><strong>OpenSSL versions</strong></cell></row>
- <row><cell><c>aes_ctr</c></cell><cell>16, 24, 32</cell><cell>16</cell><cell>1.0.1 -</cell></row>
- <row><cell><c>rc4</c></cell><cell>1-</cell><cell> </cell> <cell>all</cell></row>
+ <row><cell><c>aes_ctr</c></cell><cell>16, 24, 32</cell><cell>16</cell><cell>&#8805;1.0.1</cell></row>
+ <row><cell><c>rc4</c></cell><cell>&#8805;1</cell><cell> </cell> <cell>all</cell></row>
<tcaption>Stream cipher key lengths</tcaption>
</table>
</section>
@@ -141,9 +141,9 @@
<row><cell><c>aes_cfb8</c></cell> <cell>16</cell><cell>1</cell></row>
<row><cell><c>blowfish_cbc</c></cell> <cell>4-56</cell> <cell>8</cell></row>
- <row><cell><c>blowfish_cfb64</c></cell> <cell>1-</cell> <cell>1</cell></row>
- <row><cell><c>blowfish_ecb</c></cell><cell>1-</cell> <cell>8</cell></row>
- <row><cell><c>blowfish_ofb64</c></cell><cell>1-</cell> <cell>1</cell></row>
+ <row><cell><c>blowfish_cfb64</c></cell> <cell>&#8805;1</cell> <cell>1</cell></row>
+ <row><cell><c>blowfish_ecb</c></cell><cell>&#8805;1</cell> <cell>8</cell></row>
+ <row><cell><c>blowfish_ofb64</c></cell><cell>&#8805;1</cell> <cell>1</cell></row>
<row><cell><c>des3_cbc</c><br/><i>(=DES EDE3 CBC)</i></cell><cell>[8,8,8]</cell><cell>8</cell></row>
<row><cell><c>des3_cfb</c><br/><i>(=DES EDE3 CFB)</i></cell><cell>[8,8,8]</cell><cell>1</cell></row>
@@ -152,7 +152,7 @@
<row><cell><c>des_cfb</c></cell><cell>8</cell><cell>1</cell></row>
<row><cell><c>des_ecb</c></cell><cell>8</cell><cell>1</cell></row>
- <row><cell><c>rc2_cbc</c></cell><cell>1-</cell><cell>8</cell></row>
+ <row><cell><c>rc2_cbc</c></cell><cell>&#8805;1</cell><cell>8</cell></row>
<tcaption>CMAC cipher key lengths</tcaption>
</table>
</section>
@@ -195,7 +195,7 @@
</row>
<row><cell>SHA1</cell><cell>sha</cell><cell>all</cell></row>
<row><cell>SHA2</cell><cell>sha224, sha256, sha384, sha512</cell><cell>all</cell></row>
- <row><cell>SHA3</cell><cell>sha3_224, sha3_256, sha3_384, sha3_512</cell><cell>1.1.1 -</cell></row>
+ <row><cell>SHA3</cell><cell>sha3_224, sha3_256, sha3_384, sha3_512</cell><cell>&#8805;1.1.1</cell></row>
<row><cell>MD4</cell><cell>md4</cell><cell>all</cell></row>
<row><cell>MD5</cell><cell>md5</cell><cell>all</cell></row>
<row><cell>RIPEMD</cell><cell>ripemd160</cell><cell>all</cell></row>
@@ -221,18 +221,62 @@
without prior notice.</p>
</warning>
<table>
- <row><cell><strong>Option</strong></cell> <cell><strong>sign/verify</strong></cell> <cell><strong>encrypt/decrypt</strong></cell> <cell><strong>Supported with</strong><br/><strong>OpenSSL versions</strong></cell> </row>
- <row><cell>{rsa_mgf1_md,atom()}</cell> <cell>x</cell> <cell>x</cell> <cell>1.0.1</cell></row>
- <row><cell>{rsa_oaep_label, binary()}</cell> <cell> </cell> <cell>x</cell> <cell></cell></row>
- <row><cell>{rsa_oaep_md, atom()}</cell> <cell> </cell> <cell>x</cell> <cell></cell></row>
- <row><cell>{rsa_padding,rsa_pkcs1_pss_padding}</cell> <cell>x</cell> <cell> </cell> <cell>1.0.0</cell></row>
- <row><cell>{rsa_pss_saltlen, -2..}</cell> <cell>x</cell> <cell> </cell> <cell>1.0.0</cell></row>
- <row><cell>{rsa_padding,rsa_no_padding}</cell> <cell>x</cell> <cell>x</cell> <cell></cell></row>
- <row><cell>{rsa_padding,rsa_pkcs1_padding}</cell> <cell>x</cell> <cell>x</cell> <cell></cell></row>
- <row><cell>{rsa_padding,rsa_sslv23_padding}</cell> <cell> </cell> <cell>x</cell> <cell></cell></row>
- <row><cell>{rsa_padding,rsa_x931_padding}</cell> <cell>x</cell> <cell> </cell> <cell></cell></row>
+ <row><cell><strong>Option</strong></cell>
+ <cell><strong>sign/verify</strong></cell>
+ <cell><strong>public encrypt</strong><br/><strong>private decrypt</strong></cell>
+ <cell><strong>private encrypt</strong><br/><strong>public decrypt</strong></cell>
+ </row>
+ <row><cell>{rsa_padding,rsa_x931_padding}</cell>
+ <cell>x</cell>
+ <cell></cell>
+ <cell>x</cell>
+ </row>
+ <row><cell>{rsa_padding,rsa_pkcs1_padding}</cell>
+ <cell>x</cell>
+ <cell>x</cell>
+ <cell>x</cell>
+ </row>
+ <row><cell>{rsa_padding,rsa_pkcs1_pss_padding}<br/>
+ {rsa_pss_saltlen, -2..}<br/>
+ {rsa_mgf1_md, atom()}
+ </cell>
+ <cell>x (2)<br/>
+ x (2)<br/>
+ x (2)</cell>
+ <cell></cell>
+ <cell></cell>
+ </row>
+ <row><cell>{rsa_padding,rsa_pkcs1_oaep_padding}<br/>
+ {rsa_mgf1_md, atom()}<br/>
+ {rsa_oaep_label, binary()}}<br/>
+ {rsa_oaep_md, atom()}
+ </cell>
+ <cell></cell>
+ <cell>x (2)<br/>
+ x (2)<br/>
+ x (3)<br/>
+ x (3)
+ </cell>
+ <cell></cell>
+ </row>
+ <row><cell>{rsa_padding,rsa_no_padding}</cell>
+ <cell>x (1)</cell>
+ <cell></cell>
+ <cell></cell>
+ </row>
+ <!-- row><cell>{rsa_padding,rsa_sslv23_padding}</cell>
+ <cell></cell>
+ <cell></cell>
+ <cell></cell>
+ </row -->
<tcaption></tcaption>
</table>
+ <p>Notes:</p>
+ <list type="ordered">
+ <item>(1) OpenSSL &#8804; 1.0.0</item>
+ <item>(2) OpenSSL &#8805; 1.0.1</item>
+ <item>(3) OpenSSL &#8805; 1.1.0</item>
+ </list>
</section>
<section>
@@ -259,6 +303,20 @@
</section>
<section>
+ <title>EdDSA</title>
+ <p>EdDSA is available with OpenSSL 1.1.1 or later if not disabled by configuration.
+ To dynamically check availability, check that the atom <c>eddsa</c> is present in the
+ list with the <c>public_keys</c> tag in the return value of
+ <seealso marker="crypto#supports-0">crypto:supports()</seealso>.
+ </p>
+ <p>Support for the curves ed25519 and ed448 is implemented.
+ The actual supported named curves could be checked by examining the list with the
+ <c>curves</c> tag in the return value of
+ <seealso marker="crypto#supports-0">crypto:supports()</seealso>.
+ </p>
+ </section>
+
+ <section>
<title>Diffie-Hellman</title>
<p>Diffie-Hellman computations are available with OpenSSL versions compatible with Erlang CRYPTO
if not disabled by configuration.
diff --git a/lib/crypto/doc/src/crypto.xml b/lib/crypto/doc/src/crypto.xml
index 651b647e1c..b33db0d6e4 100644
--- a/lib/crypto/doc/src/crypto.xml
+++ b/lib/crypto/doc/src/crypto.xml
@@ -268,7 +268,8 @@
<datatype_title>Elliptic Curves</datatype_title>
<datatype>
<name name="ec_named_curve"/>
- <name name="edwards_curve"/>
+ <name name="edwards_curve_dh"/>
+ <name name="edwards_curve_ed"/>
<desc>
<p>Note that some curves are disabled if FIPS is enabled.</p>
</desc>
@@ -348,6 +349,14 @@
</datatype>
<datatype>
+ <name name="eddsa_public"/>
+ <name name="eddsa_private"/>
+ <name name="eddsa_params"/>
+ <desc>
+ </desc>
+ </datatype>
+
+ <datatype>
<name name="srp_public"/>
<name name="srp_private"/>
<desc>
diff --git a/lib/crypto/src/crypto.erl b/lib/crypto/src/crypto.erl
index 2db73c4af0..72cb9aabfd 100644
--- a/lib/crypto/src/crypto.erl
+++ b/lib/crypto/src/crypto.erl
@@ -118,7 +118,11 @@
-type ecdsa_public() :: key_integer() .
-type ecdsa_private() :: key_integer() .
--type ecdsa_params() :: ec_named_curve() | edwards_curve() | ec_explicit_curve() .
+-type ecdsa_params() :: ec_named_curve() | ec_explicit_curve() .
+
+-type eddsa_public() :: key_integer() .
+-type eddsa_private() :: key_integer() .
+-type eddsa_params() :: edwards_curve_ed() .
-type srp_public() :: key_integer() .
-type srp_private() :: key_integer() .
@@ -135,7 +139,7 @@
-type ecdh_public() :: key_integer() .
-type ecdh_private() :: key_integer() .
--type ecdh_params() :: ec_named_curve() | edwards_curve() | ec_explicit_curve() .
+-type ecdh_params() :: ec_named_curve() | edwards_curve_dh() | ec_explicit_curve() .
%%% Curves
@@ -247,8 +251,9 @@
| wtls9
.
--type edwards_curve() :: x25519
- | x448 .
+-type edwards_curve_dh() :: x25519 | x448 .
+
+-type edwards_curve_ed() :: ed25519 | ed448 .
%%%
-type block_cipher_with_iv() :: cbc_cipher()
@@ -328,7 +333,7 @@ stop() ->
],
PKs :: [rsa | dss | ecdsa | dh | ecdh | ec_gf2m],
Macs :: [hmac | cmac | poly1305],
- Curves :: [ec_named_curve() | edwards_curve()],
+ Curves :: [ec_named_curve() | edwards_curve_dh() | edwards_curve_ed()],
RSAopts :: [rsa_sign_verify_opt() | rsa_opt()] .
supports()->
{Hashs, PubKeys, Ciphers, Macs, Curves, RsaOpts} = algorithms(),
@@ -777,7 +782,7 @@ rand_seed_nif(_Seed) -> ?nif_stub.
%%% Sign/verify
%%%
%%%================================================================
--type pk_sign_verify_algs() :: rsa | dss | ecdsa .
+-type pk_sign_verify_algs() :: rsa | dss | ecdsa | eddsa .
-type pk_sign_verify_opts() :: [ rsa_sign_verify_opt() ] .
@@ -801,7 +806,8 @@ rand_seed_nif(_Seed) -> ?nif_stub.
Msg :: binary() | {digest,binary()},
Key :: rsa_private()
| dss_private()
- | [ecdsa_private()|ecdsa_params()]
+ | [ecdsa_private() | ecdsa_params()]
+ | [eddsa_private() | eddsa_params()]
| engine_key_ref(),
Signature :: binary() .
@@ -820,6 +826,7 @@ sign(Algorithm, Type, Data, Key) ->
Key :: rsa_private()
| dss_private()
| [ecdsa_private() | ecdsa_params()]
+ | [eddsa_private() | eddsa_params()]
| engine_key_ref(),
Options :: pk_sign_verify_opts(),
Signature :: binary() .
@@ -842,12 +849,14 @@ pkey_sign_nif(_Algorithm, _Type, _Digest, _Key, _Options) -> ?nif_stub.
when Algorithm :: pk_sign_verify_algs(),
DigestType :: rsa_digest_type()
| dss_digest_type()
- | ecdsa_digest_type(),
+ | ecdsa_digest_type()
+ | none,
Msg :: binary() | {digest,binary()},
Signature :: binary(),
- Key :: rsa_private()
- | dss_private()
- | [ecdsa_private() | ecdsa_params()]
+ Key :: rsa_public()
+ | dss_public()
+ | [ecdsa_public() | ecdsa_params()]
+ | [eddsa_public() | eddsa_params()]
| engine_key_ref(),
Result :: boolean().
@@ -865,6 +874,7 @@ verify(Algorithm, Type, Data, Signature, Key) ->
Key :: rsa_public()
| dss_public()
| [ecdsa_public() | ecdsa_params()]
+ | [eddsa_public() | eddsa_params()]
| engine_key_ref(),
Options :: pk_sign_verify_opts(),
Result :: boolean().
@@ -1214,7 +1224,11 @@ engine_load_1(Engine, PreCmds, PostCmds, EngineMethods) ->
throw:Error ->
%% The engine couldn't initialise, release the structural reference
ok = engine_free_nif(Engine),
- throw(Error)
+ throw(Error);
+ error:badarg ->
+ %% For example bad argument list, release the structural reference
+ ok = engine_free_nif(Engine),
+ error(badarg)
end.
engine_load_2(Engine, PostCmds, EngineMethods) ->
@@ -1762,7 +1776,9 @@ ec_key_generate(_Curve, _Key) -> ?nif_stub.
ecdh_compute_key_nif(_Others, _Curve, _My) -> ?nif_stub.
--spec ec_curves() -> [EllipticCurve] when EllipticCurve :: ec_named_curve() | edwards_curve() .
+-spec ec_curves() -> [EllipticCurve] when EllipticCurve :: ec_named_curve()
+ | edwards_curve_dh()
+ | edwards_curve_ed() .
ec_curves() ->
crypto_ec_curves:curves().
@@ -2026,7 +2042,7 @@ check_otp_test_engine(LibDir) ->
case filelib:wildcard("otp_test_engine*", LibDir) of
[] ->
{error, notexist};
- [LibName] ->
+ [LibName|_] -> % In case of Valgrind there could be more than one
LibPath = filename:join(LibDir,LibName),
case filelib:is_file(LibPath) of
true ->
diff --git a/lib/crypto/test/crypto_SUITE.erl b/lib/crypto/test/crypto_SUITE.erl
index 495c2adb55..6c6188f775 100644
--- a/lib/crypto/test/crypto_SUITE.erl
+++ b/lib/crypto/test/crypto_SUITE.erl
@@ -1,4 +1,4 @@
-%%
+%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 1999-2018. All Rights Reserved.
@@ -38,6 +38,7 @@ all() ->
mod_pow,
exor,
rand_uniform,
+ rand_threads,
rand_plugin,
rand_plugin_s
].
@@ -58,6 +59,8 @@ groups() ->
{group, rsa},
{group, dss},
{group, ecdsa},
+ {group, ed25519},
+ {group, ed448},
{group, dh},
{group, ecdh},
{group, srp},
@@ -145,6 +148,12 @@ groups() ->
{ecdsa, [], [sign_verify
%% Does not work yet: ,public_encrypt, private_encrypt
]},
+ {ed25519, [], [sign_verify
+ %% Does not work yet: ,public_encrypt, private_encrypt
+ ]},
+ {ed448, [], [sign_verify
+ %% Does not work yet: ,public_encrypt, private_encrypt
+ ]},
{dh, [], [generate_compute,
compute_bug]},
{ecdh, [], [generate_all_supported, compute, generate]},
@@ -495,14 +504,14 @@ sign_verify(Config) when is_list(Config) ->
public_encrypt() ->
[{doc, "Test public_encrypt/decrypt "}].
public_encrypt(Config) when is_list(Config) ->
- Params = proplists:get_value(pub_priv_encrypt, Config),
+ Params = proplists:get_value(pub_pub_encrypt, Config, []),
lists:foreach(fun do_public_encrypt/1, Params).
%%--------------------------------------------------------------------
private_encrypt() ->
[{doc, "Test private_encrypt/decrypt functions. "}].
private_encrypt(Config) when is_list(Config) ->
- Params = proplists:get_value(pub_priv_encrypt, Config),
+ Params = proplists:get_value(pub_priv_encrypt, Config, []),
lists:foreach(fun do_private_encrypt/1, Params).
%%--------------------------------------------------------------------
@@ -572,7 +581,8 @@ generate_all_supported(_Config) ->
ct:log("ERROR: Curve ~p exception ~p:~p~n~p", [C,Cls,Err,Stack]),
{error,{C,{Cls,Err}}}
end
- || C <- crypto:ec_curves()
+ || C <- crypto:ec_curves(),
+ not lists:member(C, [ed25519, ed448])
],
OK = [C || {ok,C} <- Results],
ct:log("Ok (len=~p): ~p", [length(OK), OK]),
@@ -605,6 +615,25 @@ rand_uniform(Config) when is_list(Config) ->
10 = byte_size(crypto:strong_rand_bytes(10)).
%%--------------------------------------------------------------------
+rand_threads() ->
+ [{doc, "strong_rand_bytes in parallel threads"}].
+rand_threads(Config) when is_list(Config) ->
+ %% This will crash the emulator on at least one version of libcrypto
+ %% with buggy multithreading in RAND_bytes().
+ %% The test needs to run at least a few minutes...
+ NofThreads = 4,
+ Fun = fun F() -> crypto:strong_rand_bytes(16), F() end,
+ PidRefs = [spawn_monitor(Fun) || _ <- lists:seq(1, NofThreads)],
+%%% The test case takes too much time to run.
+%%% Keep it around for reference by setting it down to just 10 seconds.
+%%% receive after 10 * 60 * 1000 -> ok end, % 10 minutes
+ receive after 10 * 1000 -> ok end, % 10 seconds
+ spawn_link(fun () -> receive after 5000 -> exit(timeout) end end),
+ [exit(Pid, stop) || {Pid,_Ref} <- PidRefs],
+ [receive {'DOWN',Ref,_,_,stop} -> ok end || {_Pid,Ref} <- PidRefs],
+ ok.
+
+%%--------------------------------------------------------------------
rand_plugin() ->
[{doc, "crypto rand plugin testing (implicit state / process dictionary)"}].
rand_plugin(Config) when is_list(Config) ->
@@ -884,6 +913,29 @@ aead_cipher({Type, Key, PlainText, IV, AAD, CipherText, CipherTag, TagLen, Info}
{got, Other1}})
end.
+do_sign_verify({Type, undefined=Hash, Private, Public, Msg, Signature}) ->
+ case crypto:sign(eddsa, Hash, Msg, [Private,Type]) of
+ Signature ->
+ ct:log("OK crypto:sign(eddsa, ~p, Msg, [Private,~p])", [Hash,Type]),
+ case crypto:verify(eddsa, Hash, Msg, Signature, [Public,Type]) of
+ true ->
+ ct:log("OK crypto:verify(eddsa, ~p, Msg, Signature, [Public,~p])", [Hash,Type]),
+ negative_verify(eddsa, Hash, Msg, <<10,20>>, [Public,Type]);
+ false ->
+ ct:log("ERROR crypto:verify(eddsa, ~p, Msg= ~p, Signature= ~p, [Public= ~p,~p])",
+ [Hash,Msg,Signature,Public,Type]),
+ ct:fail({{crypto, verify, [eddsa, Hash, Msg, Signature, [Public,Type]]}})
+ end;
+ ErrorSig ->
+ ct:log("ERROR crypto:sign(~p, ~p, ..., [Private= ~p,~p])", [eddsa,Hash,Private,Type]),
+ ct:log("ERROR crypto:verify(eddsa, ~p, Msg= ~p, [Public= ~p,~p])~n"
+ "ErrorSig = ~p~n"
+ "CorrectSig = ~p~n"
+ ,
+ [Hash,Msg,Public,Type,ErrorSig,Signature]),
+ ct:fail({{crypto, sign, [Type, Hash, Msg, ErrorSig, [Private]]}})
+ end;
+
do_sign_verify({Type, Hash, Public, Private, Msg}) ->
Signature = crypto:sign(Type, Hash, Msg, Private),
case crypto:verify(Type, Hash, Msg, Signature, Public) of
@@ -943,30 +995,6 @@ negative_verify(Type, Hash, Msg, Signature, Public, Options) ->
ok
end.
--define(PUB_PRIV_ENC_DEC_CATCH(Type,Padding),
- CC:EE ->
- ct:log("~p:~p in ~p:~p/~p, line ~p.~n"
- "Type = ~p~nPadding = ~p",
- [CC,EE,?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY,?LINE,(Type),(Padding)]),
- MaybeUnsupported =
- case crypto:info_lib() of
- [{<<"OpenSSL">>,_,_}] ->
- is_list(Padding) andalso
- lists:any(fun(P) -> lists:member(P,(Padding)) end,
- [{rsa_padding, rsa_pkcs1_oaep_padding},
- {rsa_padding, rsa_sslv23_padding},
- {rsa_padding, rsa_x931_padding}]);
- _ ->
- false
- end,
- case CC of
- error when MaybeUnsupported ->
- ct:comment("Padding unsupported?",[]);
- _ ->
- ct:fail({?FUNCTION_NAME,CC,EE,(Type),(Padding)})
- end
- ).
-
do_public_encrypt({Type, Public, Private, Msg, Padding}) ->
try
crypto:public_encrypt(Type, Msg, Public, Padding)
@@ -980,10 +1008,12 @@ do_public_encrypt({Type, Public, Private, Msg, Padding}) ->
Other ->
ct:fail({{crypto, private_decrypt, [Type, PublicEcn, Private, Padding]}, {expected, Msg}, {got, Other}})
catch
- ?PUB_PRIV_ENC_DEC_CATCH(Type, Padding)
+ CC:EE ->
+ ct:fail({{crypto, private_decrypt, [Type, PublicEcn, Private, Padding]}, {expected, Msg}, {got, {CC,EE}}})
end
catch
- ?PUB_PRIV_ENC_DEC_CATCH(Type, Padding)
+ CC:EE ->
+ ct:fail({{crypto, public_encrypt, [Type, Msg, Public, Padding]}, {got, {CC,EE}}})
end.
@@ -1000,10 +1030,12 @@ do_private_encrypt({Type, Public, Private, Msg, Padding}) ->
Other ->
ct:fail({{crypto, public_decrypt, [Type, PrivEcn, Public, Padding]}, {expected, Msg}, {got, Other}})
catch
- ?PUB_PRIV_ENC_DEC_CATCH(Type, Padding)
+ CC:EE ->
+ ct:fail({{crypto, public_decrypt, [Type, PrivEcn, Public, Padding]}, {expected, Msg}, {got, {CC,EE}}})
end
catch
- ?PUB_PRIV_ENC_DEC_CATCH(Type, Padding)
+ CC:EE ->
+ ct:fail({{crypto, private_encrypt, [Type, Msg, Private, Padding]}, {got, {CC,EE}}})
end.
do_generate_compute({srp = Type, UserPrivate, UserGenParams, UserComParams,
@@ -1395,36 +1427,42 @@ group_config(sha3_384 = Type, Config) ->
group_config(sha3_512 = Type, Config) ->
{Msgs,Digests} = sha3_test_vectors(Type),
[{hash, {Type, Msgs, Digests}}, {hmac, hmac_sha3(Type)} | Config];
-group_config(rsa = Type, Config) ->
+group_config(rsa, Config) ->
Msg = rsa_plain(),
Public = rsa_public(),
Private = rsa_private(),
PublicS = rsa_public_stronger(),
PrivateS = rsa_private_stronger(),
- SignVerify =
- case ?config(fips, Config) of
- true ->
- %% Use only the strong keys in FIPS mode
- sign_verify_tests(Type, Msg,
- PublicS, PrivateS,
- PublicS, PrivateS);
- false ->
- sign_verify_tests(Type, Msg,
- Public, Private,
- PublicS, PrivateS)
- end,
MsgPubEnc = <<"7896345786348 Asldi">>,
- PubPrivEnc = [{rsa, PublicS, PrivateS, MsgPubEnc, rsa_pkcs1_padding},
- {rsa, PublicS, PrivateS, MsgPubEnc, [{rsa_padding, rsa_pkcs1_padding}]},
- {rsa, PublicS, PrivateS, MsgPubEnc, [{rsa_padding, rsa_sslv23_padding}]},
- {rsa, PublicS, PrivateS, MsgPubEnc, [{rsa_padding, rsa_x931_padding}]},
- rsa_oaep(),
- %% rsa_oaep_label(),
- %% rsa_oaep256(),
- no_padding()
+ SignVerify_OptsToTry = [[{rsa_padding, rsa_x931_padding}],
+ [{rsa_padding, rsa_pkcs1_padding}],
+ [{rsa_padding, rsa_pkcs1_pss_padding}],
+ [{rsa_padding, rsa_pkcs1_pss_padding}, {rsa_pss_saltlen, -2}],
+ [{rsa_padding, rsa_pkcs1_pss_padding}, {rsa_pss_saltlen, 5}],
+ [{rsa_padding, rsa_pkcs1_pss_padding}, {rsa_mgf1_md,sha}],
+ [{rsa_padding, rsa_pkcs1_pss_padding}, {rsa_mgf1_md,sha}, {rsa_pss_saltlen, 5}]
+ ],
+ PrivEnc_OptsToTry = [rsa_pkcs1_padding, % Compatibility
+ [{rsa_pad, rsa_pkcs1_padding}], % Compatibility
+ [{rsa_padding, rsa_pkcs1_padding}],
+ [{rsa_padding,rsa_x931_padding}]
+ ],
+ PubEnc_OptsToTry = [rsa_pkcs1_padding, % Compatibility
+ [{rsa_pad, rsa_pkcs1_padding}], % Compatibility
+ [{rsa_padding, rsa_pkcs1_padding}],
+ [{rsa_padding,rsa_pkcs1_oaep_padding}],
+ [{rsa_padding,rsa_pkcs1_oaep_padding}, {rsa_oaep_label, <<"Hej hopp">>}],
+ [{rsa_padding,rsa_pkcs1_oaep_padding}, {rsa_oaep_md,sha}],
+ [{rsa_padding,rsa_pkcs1_oaep_padding}, {rsa_oaep_md,sha}, {rsa_oaep_label, <<"Hej hopp">>}],
+ [{rsa_padding,rsa_pkcs1_oaep_padding}, {rsa_mgf1_md,sha}],
+ [{rsa_padding,rsa_pkcs1_oaep_padding}, {rsa_mgf1_md,sha}, {rsa_oaep_label, <<"Hej hopp">>}],
+ [{rsa_padding,rsa_pkcs1_oaep_padding}, {rsa_mgf1_md,sha}, {rsa_oaep_md,sha}, {rsa_oaep_label, <<"Hej hopp">>}]
],
- Generate = [{rsa, 1024, 3}, {rsa, 2048, 17}, {rsa, 3072, 65537}],
- [{sign_verify, SignVerify}, {pub_priv_encrypt, PubPrivEnc}, {generate, Generate} | Config];
+ [{sign_verify, rsa_sign_verify_tests(Config, Msg, Public, Private, PublicS, PrivateS, SignVerify_OptsToTry)},
+ {pub_priv_encrypt, gen_rsa_pub_priv_tests(PublicS, PrivateS, MsgPubEnc, PrivEnc_OptsToTry)},
+ {pub_pub_encrypt, gen_rsa_pub_priv_tests(PublicS, PrivateS, MsgPubEnc, PubEnc_OptsToTry)},
+ {generate, [{rsa, 1024, 3}, {rsa, 2048, 17}, {rsa, 3072, 65537}]}
+ | Config];
group_config(dss = Type, Config) ->
Msg = dss_plain(),
Public = dss_params() ++ [dss_public()],
@@ -1457,6 +1495,12 @@ group_config(ecdsa = Type, Config) ->
MsgPubEnc = <<"7896345786348 Asldi">>,
PubPrivEnc = [{ecdsa, Public, Private, MsgPubEnc, []}],
[{sign_verify, SignVerify}, {pub_priv_encrypt, PubPrivEnc} | Config];
+
+group_config(Type, Config) when Type == ed25519 ; Type == ed448 ->
+ TestVectors = eddsa(Type),
+ [{sign_verify,TestVectors} | Config];
+
+
group_config(srp, Config) ->
GenerateCompute = [srp3(), srp6(), srp6a(), srp6a_smaller_prime()],
[{generate_compute, GenerateCompute} | Config];
@@ -1553,40 +1597,74 @@ group_config(aes_cbc, Config) ->
group_config(_, Config) ->
Config.
-sign_verify_tests(Type, Msg, Public, Private, PublicS, PrivateS) ->
- gen_sign_verify_tests(Type, [md5, ripemd160, sha, sha224, sha256], Msg, Public, Private,
- [undefined,
- [{rsa_padding, rsa_pkcs1_pss_padding}],
- [{rsa_padding, rsa_pkcs1_pss_padding}, {rsa_pss_saltlen, 0}],
- [{rsa_padding, rsa_x931_padding}]
- ]) ++
- gen_sign_verify_tests(Type, [sha384, sha512], Msg, PublicS, PrivateS,
- [undefined,
- [{rsa_padding, rsa_pkcs1_pss_padding}],
- [{rsa_padding, rsa_pkcs1_pss_padding}, {rsa_pss_saltlen, 0}],
- [{rsa_padding, rsa_x931_padding}]
- ]).
-
-gen_sign_verify_tests(Type, Hashs, Msg, Public, Private, Opts) ->
+rsa_sign_verify_tests(Config, Msg, Public, Private, PublicS, PrivateS, OptsToTry) ->
+ case ?config(fips, Config) of
+ true ->
+ %% Use only the strong keys in FIPS mode
+ rsa_sign_verify_tests(Msg,
+ PublicS, PrivateS,
+ PublicS, PrivateS,
+ OptsToTry);
+ false ->
+ rsa_sign_verify_tests(Msg,
+ Public, Private,
+ PublicS, PrivateS,
+ OptsToTry)
+ end.
+
+rsa_sign_verify_tests(Msg, Public, Private, PublicS, PrivateS, OptsToTry) ->
+ gen_rsa_sign_verify_tests([md5, ripemd160, sha, sha224, sha256], Msg, Public, Private,
+ [undefined | OptsToTry]) ++
+ gen_rsa_sign_verify_tests([sha384, sha512], Msg, PublicS, PrivateS,
+ [undefined | OptsToTry]).
+
+gen_rsa_sign_verify_tests(Hashs, Msg, Public, Private, Opts) ->
+ SupOpts = proplists:get_value(rsa_opts, crypto:supports(), []),
lists:foldr(fun(Hash, Acc0) ->
case is_supported(Hash) of
true ->
lists:foldr(fun
(undefined, Acc1) ->
- [{Type, Hash, Public, Private, Msg} | Acc1];
+ [{rsa, Hash, Public, Private, Msg} | Acc1];
([{rsa_padding, rsa_x931_padding} | _], Acc1)
when Hash =:= md5
orelse Hash =:= ripemd160
orelse Hash =:= sha224 ->
Acc1;
(Opt, Acc1) ->
- [{Type, Hash, Public, Private, Msg, Opt} | Acc1]
+ case rsa_opt_is_supported(Opt, SupOpts) of
+ true ->
+ [{rsa, Hash, Public, Private, Msg, Opt} | Acc1];
+ false ->
+ Acc1
+ end
end, Acc0, Opts);
false ->
Acc0
end
end, [], Hashs).
+
+gen_rsa_pub_priv_tests(Public, Private, Msg, OptsToTry) ->
+ SupOpts = proplists:get_value(rsa_opts, crypto:supports(), []),
+ lists:foldr(fun(Opt, Acc) ->
+ case rsa_opt_is_supported(Opt, SupOpts) of
+ true ->
+ [{rsa, Public, Private, Msg, Opt} | Acc];
+ false ->
+ Acc
+ end
+ end, [], OptsToTry).
+
+
+rsa_opt_is_supported([_|_]=Opt, Sup) ->
+ lists:all(fun(O) -> rsa_opt_is_supported(O,Sup) end, Opt);
+rsa_opt_is_supported({A,B}, Sup) ->
+ rsa_opt_is_supported(A,Sup) orelse rsa_opt_is_supported(B,Sup);
+rsa_opt_is_supported(Opt, Sup) ->
+ lists:member(Opt, Sup).
+
+
rfc_1321_msgs() ->
[<<"">>,
<<"a">>,
@@ -2682,6 +2760,392 @@ srp(ClientPrivate, Generator, Prime, Version, Verifier, ServerPublic, ServerPriv
ServerPublic, ServerPrivate, {host, [Verifier, Generator, Prime, Version]},
{host, [Verifier, Prime, Version, Scrambler]},
SessionKey}.
+
+eddsa(ed25519) ->
+ %% https://tools.ietf.org/html/rfc8032#section-7.1
+ %% {ALGORITHM, (SHA)}, SECRET KEY, PUBLIC KEY, MESSAGE, SIGNATURE}
+ [
+ %% TEST 1
+ {ed25519, undefined,
+ hexstr2bin("9d61b19deffd5a60ba844af492ec2cc4"
+ "4449c5697b326919703bac031cae7f60"),
+ hexstr2bin("d75a980182b10ab7d54bfed3c964073a"
+ "0ee172f3daa62325af021a68f707511a"),
+ hexstr2bin(""),
+ hexstr2bin("e5564300c360ac729086e2cc806e828a"
+ "84877f1eb8e5d974d873e06522490155"
+ "5fb8821590a33bacc61e39701cf9b46b"
+ "d25bf5f0595bbe24655141438e7a100b")},
+ %% TEST 2
+ {ed25519, undefined,
+ hexstr2bin("4ccd089b28ff96da9db6c346ec114e0f"
+ "5b8a319f35aba624da8cf6ed4fb8a6fb"),
+ hexstr2bin("3d4017c3e843895a92b70aa74d1b7ebc"
+ "9c982ccf2ec4968cc0cd55f12af4660c"),
+ hexstr2bin("72"),
+ hexstr2bin("92a009a9f0d4cab8720e820b5f642540"
+ "a2b27b5416503f8fb3762223ebdb69da"
+ "085ac1e43e15996e458f3613d0f11d8c"
+ "387b2eaeb4302aeeb00d291612bb0c00")},
+ %% TEST 3
+ {ed25519, undefined,
+ hexstr2bin("c5aa8df43f9f837bedb7442f31dcb7b1"
+ "66d38535076f094b85ce3a2e0b4458f7"),
+ hexstr2bin("fc51cd8e6218a1a38da47ed00230f058"
+ "0816ed13ba3303ac5deb911548908025"),
+ hexstr2bin("af82"),
+ hexstr2bin("6291d657deec24024827e69c3abe01a3"
+ "0ce548a284743a445e3680d7db5ac3ac"
+ "18ff9b538d16f290ae67f760984dc659"
+ "4a7c15e9716ed28dc027beceea1ec40a")},
+ %% TEST 1024
+ {ed25519, undefined,
+ hexstr2bin("f5e5767cf153319517630f226876b86c"
+ "8160cc583bc013744c6bf255f5cc0ee5"),
+ hexstr2bin("278117fc144c72340f67d0f2316e8386"
+ "ceffbf2b2428c9c51fef7c597f1d426e"),
+ hexstr2bin("08b8b2b733424243760fe426a4b54908"
+ "632110a66c2f6591eabd3345e3e4eb98"
+ "fa6e264bf09efe12ee50f8f54e9f77b1"
+ "e355f6c50544e23fb1433ddf73be84d8"
+ "79de7c0046dc4996d9e773f4bc9efe57"
+ "38829adb26c81b37c93a1b270b20329d"
+ "658675fc6ea534e0810a4432826bf58c"
+ "941efb65d57a338bbd2e26640f89ffbc"
+ "1a858efcb8550ee3a5e1998bd177e93a"
+ "7363c344fe6b199ee5d02e82d522c4fe"
+ "ba15452f80288a821a579116ec6dad2b"
+ "3b310da903401aa62100ab5d1a36553e"
+ "06203b33890cc9b832f79ef80560ccb9"
+ "a39ce767967ed628c6ad573cb116dbef"
+ "efd75499da96bd68a8a97b928a8bbc10"
+ "3b6621fcde2beca1231d206be6cd9ec7"
+ "aff6f6c94fcd7204ed3455c68c83f4a4"
+ "1da4af2b74ef5c53f1d8ac70bdcb7ed1"
+ "85ce81bd84359d44254d95629e9855a9"
+ "4a7c1958d1f8ada5d0532ed8a5aa3fb2"
+ "d17ba70eb6248e594e1a2297acbbb39d"
+ "502f1a8c6eb6f1ce22b3de1a1f40cc24"
+ "554119a831a9aad6079cad88425de6bd"
+ "e1a9187ebb6092cf67bf2b13fd65f270"
+ "88d78b7e883c8759d2c4f5c65adb7553"
+ "878ad575f9fad878e80a0c9ba63bcbcc"
+ "2732e69485bbc9c90bfbd62481d9089b"
+ "eccf80cfe2df16a2cf65bd92dd597b07"
+ "07e0917af48bbb75fed413d238f5555a"
+ "7a569d80c3414a8d0859dc65a46128ba"
+ "b27af87a71314f318c782b23ebfe808b"
+ "82b0ce26401d2e22f04d83d1255dc51a"
+ "ddd3b75a2b1ae0784504df543af8969b"
+ "e3ea7082ff7fc9888c144da2af58429e"
+ "c96031dbcad3dad9af0dcbaaaf268cb8"
+ "fcffead94f3c7ca495e056a9b47acdb7"
+ "51fb73e666c6c655ade8297297d07ad1"
+ "ba5e43f1bca32301651339e22904cc8c"
+ "42f58c30c04aafdb038dda0847dd988d"
+ "cda6f3bfd15c4b4c4525004aa06eeff8"
+ "ca61783aacec57fb3d1f92b0fe2fd1a8"
+ "5f6724517b65e614ad6808d6f6ee34df"
+ "f7310fdc82aebfd904b01e1dc54b2927"
+ "094b2db68d6f903b68401adebf5a7e08"
+ "d78ff4ef5d63653a65040cf9bfd4aca7"
+ "984a74d37145986780fc0b16ac451649"
+ "de6188a7dbdf191f64b5fc5e2ab47b57"
+ "f7f7276cd419c17a3ca8e1b939ae49e4"
+ "88acba6b965610b5480109c8b17b80e1"
+ "b7b750dfc7598d5d5011fd2dcc5600a3"
+ "2ef5b52a1ecc820e308aa342721aac09"
+ "43bf6686b64b2579376504ccc493d97e"
+ "6aed3fb0f9cd71a43dd497f01f17c0e2"
+ "cb3797aa2a2f256656168e6c496afc5f"
+ "b93246f6b1116398a346f1a641f3b041"
+ "e989f7914f90cc2c7fff357876e506b5"
+ "0d334ba77c225bc307ba537152f3f161"
+ "0e4eafe595f6d9d90d11faa933a15ef1"
+ "369546868a7f3a45a96768d40fd9d034"
+ "12c091c6315cf4fde7cb68606937380d"
+ "b2eaaa707b4c4185c32eddcdd306705e"
+ "4dc1ffc872eeee475a64dfac86aba41c"
+ "0618983f8741c5ef68d3a101e8a3b8ca"
+ "c60c905c15fc910840b94c00a0b9d0"),
+ hexstr2bin("0aab4c900501b3e24d7cdf4663326a3a"
+ "87df5e4843b2cbdb67cbf6e460fec350"
+ "aa5371b1508f9f4528ecea23c436d94b"
+ "5e8fcd4f681e30a6ac00a9704a188a03")},
+ %% TEST SHA(abc)
+ {ed25519, undefined,
+ hexstr2bin("833fe62409237b9d62ec77587520911e"
+ "9a759cec1d19755b7da901b96dca3d42"),
+ hexstr2bin("ec172b93ad5e563bf4932c70e1245034"
+ "c35467ef2efd4d64ebf819683467e2bf"),
+ hexstr2bin("ddaf35a193617abacc417349ae204131"
+ "12e6fa4e89a97ea20a9eeee64b55d39a"
+ "2192992a274fc1a836ba3c23a3feebbd"
+ "454d4423643ce80e2a9ac94fa54ca49f"),
+ hexstr2bin("dc2a4459e7369633a52b1bf277839a00"
+ "201009a3efbf3ecb69bea2186c26b589"
+ "09351fc9ac90b3ecfdfbc7c66431e030"
+ "3dca179c138ac17ad9bef1177331a704")}
+ ];
+
+eddsa(ed448) ->
+ %% https://tools.ietf.org/html/rfc8032#section-7.4
+ [{ed448, undefined,
+ hexstr2bin("6c82a562cb808d10d632be89c8513ebf"
+ "6c929f34ddfa8c9f63c9960ef6e348a3"
+ "528c8a3fcc2f044e39a3fc5b94492f8f"
+ "032e7549a20098f95b"),
+ hexstr2bin("5fd7449b59b461fd2ce787ec616ad46a"
+ "1da1342485a70e1f8a0ea75d80e96778"
+ "edf124769b46c7061bd6783df1e50f6c"
+ "d1fa1abeafe8256180"),
+ hexstr2bin(""),
+ hexstr2bin("533a37f6bbe457251f023c0d88f976ae"
+ "2dfb504a843e34d2074fd823d41a591f"
+ "2b233f034f628281f2fd7a22ddd47d78"
+ "28c59bd0a21bfd3980ff0d2028d4b18a"
+ "9df63e006c5d1c2d345b925d8dc00b41"
+ "04852db99ac5c7cdda8530a113a0f4db"
+ "b61149f05a7363268c71d95808ff2e65"
+ "2600")},
+ %% 1 octet
+ {ed448, undefined,
+ hexstr2bin("c4eab05d357007c632f3dbb48489924d"
+ "552b08fe0c353a0d4a1f00acda2c463a"
+ "fbea67c5e8d2877c5e3bc397a659949e"
+ "f8021e954e0a12274e"),
+ hexstr2bin("43ba28f430cdff456ae531545f7ecd0a"
+ "c834a55d9358c0372bfa0c6c6798c086"
+ "6aea01eb00742802b8438ea4cb82169c"
+ "235160627b4c3a9480"),
+ hexstr2bin("03"),
+ hexstr2bin("26b8f91727bd62897af15e41eb43c377"
+ "efb9c610d48f2335cb0bd0087810f435"
+ "2541b143c4b981b7e18f62de8ccdf633"
+ "fc1bf037ab7cd779805e0dbcc0aae1cb"
+ "cee1afb2e027df36bc04dcecbf154336"
+ "c19f0af7e0a6472905e799f1953d2a0f"
+ "f3348ab21aa4adafd1d234441cf807c0"
+ "3a00")},
+
+ %% %% 1 octet (with context)
+ %% {ed448, undefined,
+ %% hexstr2bin("c4eab05d357007c632f3dbb48489924d"
+ %% "552b08fe0c353a0d4a1f00acda2c463a"
+ %% "fbea67c5e8d2877c5e3bc397a659949e"
+ %% "f8021e954e0a12274e"),
+ %% hexstr2bin("43ba28f430cdff456ae531545f7ecd0a"
+ %% "c834a55d9358c0372bfa0c6c6798c086"
+ %% "6aea01eb00742802b8438ea4cb82169c"
+ %% "235160627b4c3a9480"),
+ %% hexstr2bin("03"),
+ %% hexstr2bin("666f6f"), % Context
+ %% hexstr2bin("d4f8f6131770dd46f40867d6fd5d5055"
+ %% "de43541f8c5e35abbcd001b32a89f7d2"
+ %% "151f7647f11d8ca2ae279fb842d60721"
+ %% "7fce6e042f6815ea000c85741de5c8da"
+ %% "1144a6a1aba7f96de42505d7a7298524"
+ %% "fda538fccbbb754f578c1cad10d54d0d"
+ %% "5428407e85dcbc98a49155c13764e66c"
+ %% "3c00")},
+
+ %% 11 octets
+ {ed448, undefined,
+ hexstr2bin("cd23d24f714274e744343237b93290f5"
+ "11f6425f98e64459ff203e8985083ffd"
+ "f60500553abc0e05cd02184bdb89c4cc"
+ "d67e187951267eb328"),
+ hexstr2bin("dcea9e78f35a1bf3499a831b10b86c90"
+ "aac01cd84b67a0109b55a36e9328b1e3"
+ "65fce161d71ce7131a543ea4cb5f7e9f"
+ "1d8b00696447001400"),
+ hexstr2bin("0c3e544074ec63b0265e0c"),
+ hexstr2bin("1f0a8888ce25e8d458a21130879b840a"
+ "9089d999aaba039eaf3e3afa090a09d3"
+ "89dba82c4ff2ae8ac5cdfb7c55e94d5d"
+ "961a29fe0109941e00b8dbdeea6d3b05"
+ "1068df7254c0cdc129cbe62db2dc957d"
+ "bb47b51fd3f213fb8698f064774250a5"
+ "028961c9bf8ffd973fe5d5c206492b14"
+ "0e00")},
+ %% 12 octets
+ {ed448, undefined,
+ hexstr2bin("258cdd4ada32ed9c9ff54e63756ae582"
+ "fb8fab2ac721f2c8e676a72768513d93"
+ "9f63dddb55609133f29adf86ec9929dc"
+ "cb52c1c5fd2ff7e21b"),
+ hexstr2bin("3ba16da0c6f2cc1f30187740756f5e79"
+ "8d6bc5fc015d7c63cc9510ee3fd44adc"
+ "24d8e968b6e46e6f94d19b945361726b"
+ "d75e149ef09817f580"),
+ hexstr2bin("64a65f3cdedcdd66811e2915"),
+ hexstr2bin("7eeeab7c4e50fb799b418ee5e3197ff6"
+ "bf15d43a14c34389b59dd1a7b1b85b4a"
+ "e90438aca634bea45e3a2695f1270f07"
+ "fdcdf7c62b8efeaf00b45c2c96ba457e"
+ "b1a8bf075a3db28e5c24f6b923ed4ad7"
+ "47c3c9e03c7079efb87cb110d3a99861"
+ "e72003cbae6d6b8b827e4e6c143064ff"
+ "3c00")},
+ %% 13 octets
+ {ed448, undefined,
+ hexstr2bin("7ef4e84544236752fbb56b8f31a23a10"
+ "e42814f5f55ca037cdcc11c64c9a3b29"
+ "49c1bb60700314611732a6c2fea98eeb"
+ "c0266a11a93970100e"),
+ hexstr2bin("b3da079b0aa493a5772029f0467baebe"
+ "e5a8112d9d3a22532361da294f7bb381"
+ "5c5dc59e176b4d9f381ca0938e13c6c0"
+ "7b174be65dfa578e80"),
+ hexstr2bin("64a65f3cdedcdd66811e2915e7"),
+ hexstr2bin("6a12066f55331b6c22acd5d5bfc5d712"
+ "28fbda80ae8dec26bdd306743c5027cb"
+ "4890810c162c027468675ecf645a8317"
+ "6c0d7323a2ccde2d80efe5a1268e8aca"
+ "1d6fbc194d3f77c44986eb4ab4177919"
+ "ad8bec33eb47bbb5fc6e28196fd1caf5"
+ "6b4e7e0ba5519234d047155ac727a105"
+ "3100")},
+ %% 64 octets
+ {ed448, undefined,
+ hexstr2bin("d65df341ad13e008567688baedda8e9d"
+ "cdc17dc024974ea5b4227b6530e339bf"
+ "f21f99e68ca6968f3cca6dfe0fb9f4fa"
+ "b4fa135d5542ea3f01"),
+ hexstr2bin("df9705f58edbab802c7f8363cfe5560a"
+ "b1c6132c20a9f1dd163483a26f8ac53a"
+ "39d6808bf4a1dfbd261b099bb03b3fb5"
+ "0906cb28bd8a081f00"),
+ hexstr2bin("bd0f6a3747cd561bdddf4640a332461a"
+ "4a30a12a434cd0bf40d766d9c6d458e5"
+ "512204a30c17d1f50b5079631f64eb31"
+ "12182da3005835461113718d1a5ef944"),
+ hexstr2bin("554bc2480860b49eab8532d2a533b7d5"
+ "78ef473eeb58c98bb2d0e1ce488a98b1"
+ "8dfde9b9b90775e67f47d4a1c3482058"
+ "efc9f40d2ca033a0801b63d45b3b722e"
+ "f552bad3b4ccb667da350192b61c508c"
+ "f7b6b5adadc2c8d9a446ef003fb05cba"
+ "5f30e88e36ec2703b349ca229c267083"
+ "3900")},
+ %% 256 octets
+ {ed448, undefined,
+ hexstr2bin("2ec5fe3c17045abdb136a5e6a913e32a"
+ "b75ae68b53d2fc149b77e504132d3756"
+ "9b7e766ba74a19bd6162343a21c8590a"
+ "a9cebca9014c636df5"),
+ hexstr2bin("79756f014dcfe2079f5dd9e718be4171"
+ "e2ef2486a08f25186f6bff43a9936b9b"
+ "fe12402b08ae65798a3d81e22e9ec80e"
+ "7690862ef3d4ed3a00"),
+ hexstr2bin("15777532b0bdd0d1389f636c5f6b9ba7"
+ "34c90af572877e2d272dd078aa1e567c"
+ "fa80e12928bb542330e8409f31745041"
+ "07ecd5efac61ae7504dabe2a602ede89"
+ "e5cca6257a7c77e27a702b3ae39fc769"
+ "fc54f2395ae6a1178cab4738e543072f"
+ "c1c177fe71e92e25bf03e4ecb72f47b6"
+ "4d0465aaea4c7fad372536c8ba516a60"
+ "39c3c2a39f0e4d832be432dfa9a706a6"
+ "e5c7e19f397964ca4258002f7c0541b5"
+ "90316dbc5622b6b2a6fe7a4abffd9610"
+ "5eca76ea7b98816af0748c10df048ce0"
+ "12d901015a51f189f3888145c03650aa"
+ "23ce894c3bd889e030d565071c59f409"
+ "a9981b51878fd6fc110624dcbcde0bf7"
+ "a69ccce38fabdf86f3bef6044819de11"),
+ hexstr2bin("c650ddbb0601c19ca11439e1640dd931"
+ "f43c518ea5bea70d3dcde5f4191fe53f"
+ "00cf966546b72bcc7d58be2b9badef28"
+ "743954e3a44a23f880e8d4f1cfce2d7a"
+ "61452d26da05896f0a50da66a239a8a1"
+ "88b6d825b3305ad77b73fbac0836ecc6"
+ "0987fd08527c1a8e80d5823e65cafe2a"
+ "3d00")},
+ %% 1023 octets
+ {ed448, undefined,
+ hexstr2bin("872d093780f5d3730df7c212664b37b8"
+ "a0f24f56810daa8382cd4fa3f77634ec"
+ "44dc54f1c2ed9bea86fafb7632d8be19"
+ "9ea165f5ad55dd9ce8"),
+ hexstr2bin("a81b2e8a70a5ac94ffdbcc9badfc3feb"
+ "0801f258578bb114ad44ece1ec0e799d"
+ "a08effb81c5d685c0c56f64eecaef8cd"
+ "f11cc38737838cf400"),
+ hexstr2bin("6ddf802e1aae4986935f7f981ba3f035"
+ "1d6273c0a0c22c9c0e8339168e675412"
+ "a3debfaf435ed651558007db4384b650"
+ "fcc07e3b586a27a4f7a00ac8a6fec2cd"
+ "86ae4bf1570c41e6a40c931db27b2faa"
+ "15a8cedd52cff7362c4e6e23daec0fbc"
+ "3a79b6806e316efcc7b68119bf46bc76"
+ "a26067a53f296dafdbdc11c77f7777e9"
+ "72660cf4b6a9b369a6665f02e0cc9b6e"
+ "dfad136b4fabe723d2813db3136cfde9"
+ "b6d044322fee2947952e031b73ab5c60"
+ "3349b307bdc27bc6cb8b8bbd7bd32321"
+ "9b8033a581b59eadebb09b3c4f3d2277"
+ "d4f0343624acc817804728b25ab79717"
+ "2b4c5c21a22f9c7839d64300232eb66e"
+ "53f31c723fa37fe387c7d3e50bdf9813"
+ "a30e5bb12cf4cd930c40cfb4e1fc6225"
+ "92a49588794494d56d24ea4b40c89fc0"
+ "596cc9ebb961c8cb10adde976a5d602b"
+ "1c3f85b9b9a001ed3c6a4d3b1437f520"
+ "96cd1956d042a597d561a596ecd3d173"
+ "5a8d570ea0ec27225a2c4aaff26306d1"
+ "526c1af3ca6d9cf5a2c98f47e1c46db9"
+ "a33234cfd4d81f2c98538a09ebe76998"
+ "d0d8fd25997c7d255c6d66ece6fa56f1"
+ "1144950f027795e653008f4bd7ca2dee"
+ "85d8e90f3dc315130ce2a00375a318c7"
+ "c3d97be2c8ce5b6db41a6254ff264fa6"
+ "155baee3b0773c0f497c573f19bb4f42"
+ "40281f0b1f4f7be857a4e59d416c06b4"
+ "c50fa09e1810ddc6b1467baeac5a3668"
+ "d11b6ecaa901440016f389f80acc4db9"
+ "77025e7f5924388c7e340a732e554440"
+ "e76570f8dd71b7d640b3450d1fd5f041"
+ "0a18f9a3494f707c717b79b4bf75c984"
+ "00b096b21653b5d217cf3565c9597456"
+ "f70703497a078763829bc01bb1cbc8fa"
+ "04eadc9a6e3f6699587a9e75c94e5bab"
+ "0036e0b2e711392cff0047d0d6b05bd2"
+ "a588bc109718954259f1d86678a579a3"
+ "120f19cfb2963f177aeb70f2d4844826"
+ "262e51b80271272068ef5b3856fa8535"
+ "aa2a88b2d41f2a0e2fda7624c2850272"
+ "ac4a2f561f8f2f7a318bfd5caf969614"
+ "9e4ac824ad3460538fdc25421beec2cc"
+ "6818162d06bbed0c40a387192349db67"
+ "a118bada6cd5ab0140ee273204f628aa"
+ "d1c135f770279a651e24d8c14d75a605"
+ "9d76b96a6fd857def5e0b354b27ab937"
+ "a5815d16b5fae407ff18222c6d1ed263"
+ "be68c95f32d908bd895cd76207ae7264"
+ "87567f9a67dad79abec316f683b17f2d"
+ "02bf07e0ac8b5bc6162cf94697b3c27c"
+ "d1fea49b27f23ba2901871962506520c"
+ "392da8b6ad0d99f7013fbc06c2c17a56"
+ "9500c8a7696481c1cd33e9b14e40b82e"
+ "79a5f5db82571ba97bae3ad3e0479515"
+ "bb0e2b0f3bfcd1fd33034efc6245eddd"
+ "7ee2086ddae2600d8ca73e214e8c2b0b"
+ "db2b047c6a464a562ed77b73d2d841c4"
+ "b34973551257713b753632efba348169"
+ "abc90a68f42611a40126d7cb21b58695"
+ "568186f7e569d2ff0f9e745d0487dd2e"
+ "b997cafc5abf9dd102e62ff66cba87"),
+ hexstr2bin("e301345a41a39a4d72fff8df69c98075"
+ "a0cc082b802fc9b2b6bc503f926b65bd"
+ "df7f4c8f1cb49f6396afc8a70abe6d8a"
+ "ef0db478d4c6b2970076c6a0484fe76d"
+ "76b3a97625d79f1ce240e7c576750d29"
+ "5528286f719b413de9ada3e8eb78ed57"
+ "3603ce30d8bb761785dc30dbc320869e"
+ "1a00")}
+ ].
+
ecdh() ->
%% http://csrc.nist.gov/groups/STM/cavp/
Curves = crypto:ec_curves() ++
@@ -2803,6 +3267,8 @@ ecdh() ->
dh() ->
{dh, 90970053988169282502023478715631717259407236400413906591937635666709823903223997309250405131675572047545403771567755831138144089197560332757755059848492919215391041119286178688014693040542889497092308638580104031455627238700168892909539193174537248629499995652186913900511641708112112482297874449292467498403, 2}.
+
+
rsa_oaep() ->
%% ftp://ftp.rsa.com/pub/rsalabs/tmp/pkcs1v15crypt-vectors.txt
Public = [hexstr2bin("010001"),
@@ -2877,13 +3343,6 @@ cmac_nist(Config, aes_cbc256 = Type) ->
read_rsp(Config, Type,
["CMACGenAES256.rsp", "CMACVerAES256.rsp"]).
-no_padding() ->
- Public = [_, Mod] = rsa_public_stronger(),
- Private = rsa_private_stronger(),
- MsgLen = erlang:byte_size(int_to_bin(Mod)),
- Msg = list_to_binary(lists:duplicate(MsgLen, $X)),
- {rsa, Public, Private, Msg, rsa_no_padding}.
-
int_to_bin(X) when X < 0 -> int_to_bin_neg(X, []);
int_to_bin(X) -> int_to_bin_pos(X, []).
@@ -3063,7 +3522,7 @@ parse_rsp(Type,
file := File,
alen := Alen,
plen := Plen,
- nlen := Nlen,
+ nlen := _Nlen,
tlen := Tlen,
key := Key,
nonce := IV,
diff --git a/lib/crypto/test/engine_SUITE.erl b/lib/crypto/test/engine_SUITE.erl
index b083b30d70..8a45fc9076 100644
--- a/lib/crypto/test/engine_SUITE.erl
+++ b/lib/crypto/test/engine_SUITE.erl
@@ -51,12 +51,14 @@ all() ->
ctrl_cmd_string,
ctrl_cmd_string_optional,
ensure_load,
- {group, engine_stored_key}
+ {group, engine_stored_key},
+ {group, engine_fakes_rsa}
].
groups() ->
[{engine_stored_key, [],
- [sign_verify_rsa,
+ [
+ sign_verify_rsa,
sign_verify_dsa,
sign_verify_ecdsa,
sign_verify_rsa_pwd,
@@ -71,15 +73,25 @@ groups() ->
get_pub_from_priv_key_rsa_pwd_bad_pwd,
get_pub_from_priv_key_dsa,
get_pub_from_priv_key_ecdsa
- ]}].
+ ]},
+ {engine_fakes_rsa, [], [sign_verify_rsa_fake
+ ]}
+ ].
init_per_suite(Config) ->
- case crypto:info_lib() of
- [{_,_, <<"OpenSSL 1.0.1s-freebsd 1 Mar 2016">>}] ->
+ case {os:type(), crypto:info_lib()} of
+ {_, [{_,_, <<"OpenSSL 1.0.1s-freebsd 1 Mar 2016">>}]} ->
{skip, "Problem with engine on OpenSSL 1.0.1s-freebsd"};
- Res ->
- ct:log("crypto:info_lib() -> ~p\n", [Res]),
+
+ {{unix,darwin}, _} ->
+ {skip, "Engine unsupported on Darwin"};
+
+ {{win32,_}, _} ->
+ {skip, "Engine unsupported on Windows"};
+
+ {OS, Res} ->
+ ct:log("crypto:info_lib() -> ~p\nos:type() -> ~p", [Res,OS]),
try crypto:start() of
ok ->
Config;
@@ -95,7 +107,20 @@ end_per_suite(_Config) ->
%%--------------------------------------------------------------------
init_per_group(engine_stored_key, Config) ->
- case load_storage_engine(Config) of
+ group_load_engine(Config, [engine_method_rsa]);
+init_per_group(engine_fakes_rsa, Config) ->
+ case crypto:info_lib() of
+ [{<<"OpenSSL">>,LibVer,_}] when is_integer(LibVer), LibVer >= 16#10100000 ->
+ group_load_engine(Config, []);
+ _ ->
+ {skip, "Too low OpenSSL cryptolib version"}
+ end;
+init_per_group(_Group, Config0) ->
+ Config0.
+
+
+group_load_engine(Config, ExcludeMthds) ->
+ case load_storage_engine(Config, ExcludeMthds) of
{ok, E} ->
KeyDir = key_dir(Config),
[{storage_engine,E}, {storage_dir,KeyDir} | Config];
@@ -108,19 +133,19 @@ init_per_group(engine_stored_key, Config) ->
Other ->
ct:log("Engine load failed: ~p",[Other]),
{fail, "Engine load failed"}
- end;
-init_per_group(_Group, Config0) ->
- Config0.
+ end.
-end_per_group(engine_stored_key, Config) ->
+
+
+
+
+end_per_group(_, Config) ->
case proplists:get_value(storage_engine, Config) of
undefined ->
ok;
E ->
ok = crypto:engine_unload(E)
- end;
-end_per_group(_, _) ->
- ok.
+ end.
%%--------------------------------------------------------------------
init_per_testcase(_Case, Config) ->
@@ -414,6 +439,9 @@ bad_arguments(Config) when is_list(Config) ->
try
try
crypto:engine_load(fail_engine, [], [])
+ of
+ X1 ->
+ ct:fail("1 Got ~p",[X1])
catch
error:badarg ->
ok
@@ -425,6 +453,11 @@ bad_arguments(Config) when is_list(Config) ->
{<<"ID">>, <<"MD5">>},
<<"LOAD">>],
[])
+ of
+ {error,bad_engine_id} ->
+ throw(dynamic_engine_unsupported);
+ X2 ->
+ ct:fail("2 Got ~p",[X2])
catch
error:badarg ->
ok
@@ -435,13 +468,20 @@ bad_arguments(Config) when is_list(Config) ->
{'ID', <<"MD5">>},
<<"LOAD">>],
[])
+ of
+ {error,bad_engine_id} -> % should have happend in the previous try...catch end!
+ throw(dynamic_engine_unsupported);
+ X3 ->
+ ct:fail("3 Got ~p",[X3])
catch
error:badarg ->
ok
end
catch
error:notsup ->
- {skip, "Engine not supported on this SSL version"}
+ {skip, "Engine not supported on this SSL version"};
+ throw:dynamic_engine_unsupported ->
+ {skip, "Dynamic Engine not supported"}
end
end.
@@ -547,11 +587,11 @@ ctrl_cmd_string(Config) when is_list(Config) ->
{ok, E} ->
case crypto:engine_ctrl_cmd_string(E, <<"TEST">>, <<"17">>) of
ok ->
+ ok = crypto:engine_unload(E),
ct:fail(fail_ctrl_cmd_should_fail);
{error,ctrl_cmd_failed} ->
- ok
- end,
- ok = crypto:engine_unload(E);
+ ok = crypto:engine_unload(E)
+ end;
{error, bad_engine_id} ->
{skip, "Dynamic Engine not supported"}
end
@@ -577,11 +617,12 @@ ctrl_cmd_string_optional(Config) when is_list(Config) ->
{ok, E} ->
case crypto:engine_ctrl_cmd_string(E, <<"TEST">>, <<"17">>, true) of
ok ->
- ok;
- _ ->
+ ok = crypto:engine_unload(E);
+ Err ->
+ ct:log("Error: ~p",[Err]),
+ ok = crypto:engine_unload(E),
ct:fail(fail_ctrl_cmd_string)
- end,
- ok = crypto:engine_unload(E);
+ end;
{error, bad_engine_id} ->
{skip, "Dynamic Engine not supported"}
end
@@ -643,6 +684,14 @@ sign_verify_rsa(Config) ->
key_id => key_id(Config, "rsa_public_key.pem")},
sign_verify(rsa, sha, Priv, Pub).
+sign_verify_rsa_fake(Config) ->
+ %% Use fake engine rsa implementation
+ Priv = #{engine => engine_ref(Config),
+ key_id => key_id(Config, "rsa_private_key.pem")},
+ Pub = #{engine => engine_ref(Config),
+ key_id => key_id(Config, "rsa_public_key.pem")},
+ sign_verify_fake(rsa, sha256, Priv, Pub).
+
sign_verify_dsa(Config) ->
Priv = #{engine => engine_ref(Config),
key_id => key_id(Config, "dsa_private_key.pem")},
@@ -802,13 +851,18 @@ get_pub_from_priv_key_ecdsa(Config) ->
%%%================================================================
%%% Help for engine_stored_pub_priv_keys* test cases
%%%
-load_storage_engine(_Config) ->
+load_storage_engine(Config) ->
+ load_storage_engine(Config, []).
+
+load_storage_engine(_Config, ExcludeMthds) ->
case crypto:get_test_engine() of
{ok, Engine} ->
try crypto:engine_load(<<"dynamic">>,
[{<<"SO_PATH">>, Engine},
<<"LOAD">>],
- [])
+ [],
+ crypto:engine_get_all_methods() -- ExcludeMthds
+ )
catch
error:notsup ->
{error, notsup}
@@ -866,10 +920,47 @@ sign_verify(Alg, Sha, KeySign, KeyVerify) ->
true ->
PlainText = <<"Hej på dig">>,
Signature = crypto:sign(Alg, Sha, PlainText, KeySign),
- case crypto:verify(Alg, Sha, PlainText, Signature, KeyVerify) of
- true -> ok;
- _ -> {fail, "Sign-verify error"}
+ case is_fake(Signature) of
+ true ->
+ ct:pal("SIG ~p ~p size ~p~n~p",[Alg,Sha,size(Signature),Signature]),
+ {fail, "Faked RSA impl used!!"};
+ false ->
+ case crypto:verify(Alg, Sha, PlainText, Signature, KeyVerify) of
+ true -> ok;
+ _ -> {fail, "Sign-verify error"}
+ end
end;
false ->
{skip, lists:concat([Alg," is not supported by cryptolib"])}
end.
+
+
+%%% Use fake engine rsa implementation
+sign_verify_fake(Alg, Sha, KeySign, KeyVerify) ->
+ case pubkey_alg_supported(Alg) of
+ true ->
+ PlainText = <<"Fake me!">>,
+ Signature = crypto:sign(Alg, Sha, PlainText, KeySign),
+ case is_fake(Signature) of
+ true ->
+ case crypto:verify(Alg, Sha, PlainText, Signature, KeyVerify) of
+ true -> ok;
+ _ -> {fail, "Sign-verify error"}
+ end;
+ false ->
+ ct:pal("SIG ~p ~p size ~p~n~p",[Alg,Sha,size(Signature),Signature]),
+ {fail, "Faked impl not used"}
+ end;
+ false ->
+ {skip, lists:concat([Alg," is not supported by cryptolib"])}
+ end.
+
+
+is_fake(Sig) -> is_fake(Sig, 0).
+
+is_fake(<<>>, _) -> true;
+is_fake(<<B,Rest/binary>>, B) -> is_fake(Rest, B+1);
+is_fake(_, _) -> false.
+
+
+
diff --git a/lib/eldap/doc/src/notes.xml b/lib/eldap/doc/src/notes.xml
index b390e0c047..bf9358c4d1 100644
--- a/lib/eldap/doc/src/notes.xml
+++ b/lib/eldap/doc/src/notes.xml
@@ -77,6 +77,22 @@
</section>
+<section><title>Eldap 1.2.3.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ A race condition at close could cause the eldap client to
+ exit with a badarg message as cause.</p>
+ <p>
+ Own Id: OTP-15342 Aux Id: ERIERL-242 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Eldap 1.2.3</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -92,6 +108,22 @@
</section>
+<section><title>Eldap 1.2.2.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ A race condition at close could cause the eldap client to
+ exit with a badarg message as cause.</p>
+ <p>
+ Own Id: OTP-15342 Aux Id: ERIERL-242 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Eldap 1.2.2</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -368,4 +400,3 @@
<p>New application. </p>
</section>
</chapter>
-
diff --git a/lib/et/src/et_wx_viewer.erl b/lib/et/src/et_wx_viewer.erl
index 4dd44e7a4c..041527fec4 100644
--- a/lib/et/src/et_wx_viewer.erl
+++ b/lib/et/src/et_wx_viewer.erl
@@ -793,8 +793,8 @@ handle_info(#wx{event = #wxScroll{type = scroll_changed}} = Wx, S) ->
N = round(S#state.n_events * Pos / Range),
Diff =
case N - event_pos(S) of
- D when D < 0 -> D - 1;
- D -> D + 1
+ D when D < 0 -> D;
+ D -> D
end,
S2 = scroll_changed(S, Diff),
noreply(S2);
@@ -1002,7 +1002,7 @@ scroll_changed(S, Expected) ->
scroll_first(S);
last ->
scroll_last(S)
- end;
+ end;
true ->
%% Down
OldPos = event_pos(S),
@@ -1018,19 +1018,24 @@ scroll_changed(S, Expected) ->
end.
jump_up(S, OldKey, OldPos, NewPos) ->
- Try = NewPos - OldPos,
+ Try = NewPos - OldPos -1,
Order = S#state.event_order,
- Fun = fun(Event, #e{pos = P}) when P >= NewPos ->
- Key = et_collector:make_key(Order, Event),
- #e{event = Event, key = Key, pos = P - 1};
- (_, Acc) ->
- Acc
- end,
- PrevE = et_collector:iterate(S#state.collector_pid,
- OldKey,
- Try,
- Fun,
- #e{key = OldKey, pos = OldPos}),
+ PrevE =
+ if NewPos =:= 0 ->
+ first;
+ true ->
+ Fun = fun(Event, #e{pos = P}) when P >= NewPos ->
+ Key = et_collector:make_key(Order, Event),
+ #e{event = Event, key = Key, pos = P - 1};
+ (_E, Acc) ->
+ Acc
+ end,
+ et_collector:iterate(S#state.collector_pid,
+ OldKey,
+ Try,
+ Fun,
+ #e{key = OldKey, pos = OldPos})
+ end,
case collect_more_events(S, PrevE, S#state.events_per_page) of
{_, []} ->
S;
@@ -2013,7 +2018,7 @@ update_scroll_bar(#state{scroll_bar = ScrollBar,
PixelsPerEvent = Range / EventsPerPage,
Share = EventsPerPage / N,
wxScrollBar:setScrollbar(ScrollBar,
- trunc(EventPos * Share * PixelsPerEvent),
+ trunc(EventPos * Share * PixelsPerEvent),
round(Share * Range),
Range,
round(Share * Range),
diff --git a/lib/hipe/main/hipe.erl b/lib/hipe/main/hipe.erl
index ac2e6c1e3b..e2cb9c0f0b 100644
--- a/lib/hipe/main/hipe.erl
+++ b/lib/hipe/main/hipe.erl
@@ -542,7 +542,7 @@ file(File) ->
| {'error', term()}
when Mod :: mod().
file(File, Options) when is_atom(File) ->
- case beam_lib:info(File) of
+ case beam_lib:info(atom_to_list(File)) of
L when is_list(L) ->
{module, Mod} = lists:keyfind(module, 1, L),
case compile(Mod, File, Options) of
diff --git a/lib/inets/src/http_client/httpc_request.erl b/lib/inets/src/http_client/httpc_request.erl
index 9b81bd7a80..0f20d93bc1 100644
--- a/lib/inets/src/http_client/httpc_request.erl
+++ b/lib/inets/src/http_client/httpc_request.erl
@@ -213,15 +213,18 @@ update_body(Headers, Body) ->
update_headers(Headers, ContentType, Body, []) ->
case Body of
[] ->
- Headers#http_request_h{'content-length' = "0"};
+ Headers1 = Headers#http_request_h{'content-length' = "0"},
+ handle_content_type(Headers1, ContentType);
<<>> ->
- Headers#http_request_h{'content-length' = "0"};
+ Headers1 = Headers#http_request_h{'content-length' = "0"},
+ handle_content_type(Headers1, ContentType);
{Fun, _Acc} when is_function(Fun, 1) ->
%% A client MUST NOT generate a 100-continue expectation in a request
%% that does not include a message body. This implies that either the
%% Content-Length or the Transfer-Encoding header MUST be present.
%% DO NOT send content-type when Body is empty.
- Headers#http_request_h{'content-type' = ContentType};
+ Headers1 = Headers#http_request_h{'content-type' = ContentType},
+ handle_transfer_encoding(Headers1);
_ ->
Headers#http_request_h{
'content-length' = body_length(Body),
@@ -230,12 +233,26 @@ update_headers(Headers, ContentType, Body, []) ->
update_headers(_, _, _, HeadersAsIs) ->
HeadersAsIs.
+handle_transfer_encoding(Headers = #http_request_h{'transfer-encoding' = undefined}) ->
+ Headers;
+handle_transfer_encoding(Headers) ->
+ %% RFC7230 3.3.2
+ %% A sender MUST NOT send a 'Content-Length' header field in any message
+ %% that contains a 'Transfer-Encoding' header field.
+ Headers#http_request_h{'content-length' = undefined}.
+
body_length(Body) when is_binary(Body) ->
integer_to_list(size(Body));
body_length(Body) when is_list(Body) ->
integer_to_list(length(Body)).
+%% Set 'Content-Type' when it is explicitly set.
+handle_content_type(Headers, "") ->
+ Headers;
+handle_content_type(Headers, ContentType) ->
+ Headers#http_request_h{'content-type' = ContentType}.
+
method(Method) ->
http_util:to_upper(atom_to_list(Method)).
diff --git a/lib/inets/test/httpc_SUITE.erl b/lib/inets/test/httpc_SUITE.erl
index 3d375222b5..8357e02014 100644
--- a/lib/inets/test/httpc_SUITE.erl
+++ b/lib/inets/test/httpc_SUITE.erl
@@ -156,6 +156,7 @@ only_simulated() ->
multipart_chunks,
get_space,
delete_no_body,
+ post_with_content_type,
stream_fun_server_close
].
@@ -170,7 +171,8 @@ misc() ->
server_does_not_exist,
timeout_memory_leak,
wait_for_whole_response,
- post_204_chunked
+ post_204_chunked,
+ chunkify_fun
].
sim_mixed() ->
@@ -1408,7 +1410,8 @@ post_204_chunked(_Config) ->
{ok, ListenSocket} = gen_tcp:listen(0, [{active,once}, binary]),
{ok,{_,Port}} = inet:sockname(ListenSocket),
- spawn(fun () -> custom_server(Msg, Chunk, ListenSocket) end),
+ spawn(fun () -> custom_server(Msg, Chunk, ListenSocket,
+ fun post_204_receive/0) end),
{ok,Host} = inet:gethostname(),
End = "/cgi-bin/erl/httpd_example:post_204",
@@ -1418,16 +1421,26 @@ post_204_chunked(_Config) ->
%% Second request times out in the faulty case.
{ok, _} = httpc:request(post, {URL, [], "text/html", []}, [], []).
-custom_server(Msg, Chunk, ListenSocket) ->
+post_204_receive() ->
+ receive
+ {tcp, _, Msg} ->
+ ct:log("Message received: ~p", [Msg])
+ after
+ 1000 ->
+ ct:fail("Timeout: did not recive packet")
+ end.
+
+%% Custom server is used to test special cases when using chunked encoding
+custom_server(Msg, Chunk, ListenSocket, ReceiveFun) ->
{ok, Accept} = gen_tcp:accept(ListenSocket),
- receive_packet(),
+ ReceiveFun(),
send_response(Msg, Chunk, Accept),
- custom_server_loop(Msg, Chunk, Accept).
+ custom_server_loop(Msg, Chunk, Accept, ReceiveFun).
-custom_server_loop(Msg, Chunk, Accept) ->
- receive_packet(),
+custom_server_loop(Msg, Chunk, Accept, ReceiveFun) ->
+ ReceiveFun(),
send_response(Msg, Chunk, Accept),
- custom_server_loop(Msg, Chunk, Accept).
+ custom_server_loop(Msg, Chunk, Accept, ReceiveFun).
send_response(Msg, Chunk, Socket) ->
inet:setopts(Socket, [{active, once}]),
@@ -1435,15 +1448,54 @@ send_response(Msg, Chunk, Socket) ->
timer:sleep(250),
gen_tcp:send(Socket, Chunk).
-receive_packet() ->
+%%--------------------------------------------------------------------
+chunkify_fun() ->
+ [{doc,"Test that a chunked encoded request does not include the 'Content-Length header'"}].
+chunkify_fun(_Config) ->
+ Msg = "HTTP/1.1 204 No Content\r\n" ++
+ "Date: Thu, 23 Aug 2018 13:36:29 GMT\r\n" ++
+ "Content-Type: text/html\r\n" ++
+ "Server: inets/6.5.2.3\r\n" ++
+ "Cache-Control: no-cache\r\n" ++
+ "Pragma: no-cache\r\n" ++
+ "Expires: Fri, 24 Aug 2018 07:49:35 GMT\r\n" ++
+ "Transfer-Encoding: chunked\r\n" ++
+ "\r\n",
+ Chunk = "0\r\n\r\n",
+
+ {ok, ListenSocket} = gen_tcp:listen(0, [{active,once}, binary]),
+ {ok,{_,Port}} = inet:sockname(ListenSocket),
+ spawn(fun () -> custom_server(Msg, Chunk, ListenSocket,
+ fun chunkify_receive/0) end),
+
+ {ok,Host} = inet:gethostname(),
+ End = "/cgi-bin/erl/httpd_example",
+ URL = ?URL_START ++ Host ++ ":" ++ integer_to_list(Port) ++ End,
+ Fun = fun(_) -> {ok,<<1>>,eof_body} end,
+ Acc = start,
+
+ {ok, {{_,204,_}, _, _}} =
+ httpc:request(put, {URL, [], "text/html", {chunkify, Fun, Acc}}, [], []).
+
+chunkify_receive() ->
+ Error = "HTTP/1.1 500 Internal Server Error\r\n" ++
+ "Content-Length: 0\r\n\r\n",
receive
- {tcp, _, Msg} ->
- ct:log("Message received: ~p", [Msg])
+ {tcp, Port, Msg} ->
+ case binary:match(Msg, <<"content-length">>) of
+ nomatch ->
+ ct:log("Message received: ~s", [binary_to_list(Msg)]);
+ {_, _} ->
+ ct:log("Message received (negative): ~s", [binary_to_list(Msg)]),
+ %% Signal a testcase failure when the received HTTP request
+ %% contains a 'Content-Length' header.
+ gen_tcp:send(Port, Error),
+ ct:fail("Content-Length present in received headers.")
+ end
after
1000 ->
ct:fail("Timeout: did not recive packet")
end.
-
%%--------------------------------------------------------------------
stream_fun_server_close() ->
[{doc, "Test that an error msg is received when using a receiver fun as stream target"}].
@@ -1550,6 +1602,15 @@ delete_no_body(Config) when is_list(Config) ->
httpc:request(delete, {URL, [], "text/plain", "TEST"}, [], []).
%%--------------------------------------------------------------------
+post_with_content_type(doc) ->
+ ["Test that a POST request with explicit 'Content-Type' does not drop the 'Content-Type' header - Solves ERL-736"];
+post_with_content_type(Config) when is_list(Config) ->
+ URL = url(group_name(Config), "/delete_no_body.html", Config),
+ %% Simulated server replies 500 if 'Content-Type' header is present
+ {ok, {{_,500,_}, _, _}} =
+ httpc:request(post, {URL, [], "application/x-www-form-urlencoded", ""}, [], []).
+
+%%--------------------------------------------------------------------
request_options() ->
[{doc, "Test http get request with socket options against local server (IPv6)"}].
request_options(Config) when is_list(Config) ->
diff --git a/lib/kernel/doc/src/inet.xml b/lib/kernel/doc/src/inet.xml
index ed775d67eb..87b08e4e36 100644
--- a/lib/kernel/doc/src/inet.xml
+++ b/lib/kernel/doc/src/inet.xml
@@ -198,6 +198,79 @@ fe80::204:acff:fe17:bf38
</desc>
</datatype>
<datatype>
+ <name name="getifaddrs_ifopts"/>
+ <desc>
+ <p>
+ Interface address description list returned from
+ <seealso marker="#getifaddrs/0"><c>getifaddrs/0,1</c></seealso>
+ for a named interface, translated from the returned
+ data of the POSIX API function <c>getaddrinfo()</c>.
+ </p>
+ <p>
+ <c><anno>Hwaddr</anno></c> is hardware dependent,
+ for example, on Ethernet interfaces it is
+ the 6-byte Ethernet address (MAC address (EUI-48 address)).
+ </p>
+ <p>
+ The tuples <c>{addr,<anno>Addr</anno>}</c>,
+ <c>{netmask,<anno>Netmask</anno>}</c>, and possibly
+ <c>{broadaddr,<anno>Broadaddr</anno>}</c> or
+ <c>{dstaddr,<anno>Dstaddr</anno>}</c>
+ are repeated in the list
+ if the interface has got multiple addresses.
+ An interface may have multiple <c>{flag,_}</c> tuples
+ for example if it has different flags
+ for different address families.
+ Multiple <c>{hwaddr,<anno>Hwaddr</anno>}</c> tuples
+ is hard to say anything definite about, though.
+ The tuple <c>{flag,<anno>Flags</anno>}</c> is mandatory,
+ all others are optional.
+ </p>
+ <p>
+ Do not rely too much on the order
+ of <c><anno>Flags</anno></c> atoms
+ or the <c><anno>Ifopt</anno></c> tuples.
+ There are however some rules:
+ </p>
+ <list type="bulleted">
+ <item><p>
+ A <c>{flag,_}</c> tuple applies to all other tuples that follow.
+ </p></item>
+ <item><p>
+ Immediately after <c>{addr,_}</c> follows <c>{netmask,_}</c>.
+ </p></item>
+ <item><p>
+ Immediately thereafter may <c>{broadaddr,_}</c> follow
+ if <c>broadcast</c> is member of <c><anno>Flags</anno></c>,
+ or <c>{dstaddr,_}</c> if <c>pointtopoint</c>
+ is member of <c><anno>Flags</anno></c>.
+ Both <c>{dstaddr,_}</c> and <c>{broadaddr,_}</c>
+ does not occur for the same <c>{addr,_}</c>.
+ </p></item>
+ <item><p>
+ Any <c>{netmask,_}</c>, <c>{broadaddr,_}</c>, or
+ <c>{dstaddr,_}</c> tuples that follow an
+ <c>{addr,<anno>Addr</anno>}</c>
+ tuple concerns the address <c><anno>Addr</anno></c>.
+ </p></item>
+ </list>
+ <p>
+ The tuple <c>{hwaddr,_}</c> is not returned on Solaris, as the
+ hardware address historically belongs to the link layer
+ and it is not returned by the Solaris API function
+ <c>getaddrinfo()</c>.
+ </p>
+ <warning>
+ <p>
+ On Windows, the data is fetched from different
+ OS API functions, so the <c><anno>Netmask</anno></c>
+ and <c><anno>Broadaddr</anno></c> values may be calculated,
+ just as some <c><anno>Flags</anno></c> values.
+ </p>
+ </warning>
+ </desc>
+ </datatype>
+ <datatype>
<name name="posix"/>
<desc>
<p>An atom that is named from the POSIX error codes used in Unix,
@@ -324,38 +397,64 @@ fe80::204:acff:fe17:bf38
<name name="getifaddrs" arity="0"/>
<fsummary>Return a list of interfaces and their addresses.</fsummary>
<desc>
- <p>Returns a list of 2-tuples containing interface names and the
- interface addresses. <c><anno>Ifname</anno></c> is a Unicode string.
- <c><anno>Hwaddr</anno></c> is hardware dependent, for example, on
- Ethernet interfaces
- it is the 6-byte Ethernet address (MAC address (EUI-48 address)).</p>
- <p>The tuples <c>{addr,<anno>Addr</anno>}</c>, <c>{netmask,_}</c>, and
- <c>{broadaddr,_}</c> are repeated in the result list if the interface
- has multiple addresses. If you come across an interface with
- multiple <c>{flag,_}</c> or <c>{hwaddr,_}</c> tuples, you have
- a strange interface or possibly a bug in this function. The tuple
- <c>{flag,_}</c> is mandatory, all others are optional.</p>
- <p>Do not rely too much on the order of <c><anno>Flag</anno></c> atoms
- or <c><anno>Ifopt</anno></c> tuples. There are however some rules:</p>
- <list type="bulleted">
- <item><p>Immediately after
- <c>{addr,_}</c> follows <c>{netmask,_}</c>.</p></item>
- <item><p>Immediately thereafter follows <c>{broadaddr,_}</c> if flag
- <c>broadcast</c> is <em>not</em> set and flag
- <c>pointtopoint</c> <em>is</em> set.</p></item>
- <item><p>Any <c>{netmask,_}</c>, <c>{broadaddr,_}</c>, or
- <c>{dstaddr,_}</c> tuples that follow an <c>{addr,_}</c>
- tuple concerns that address.</p></item>
- </list>
- <p>The tuple <c>{hwaddr,_}</c> is not returned on Solaris, as the
- hardware address historically belongs to the link layer and only
- the superuser can read such addresses.</p>
- <warning>
- <p>On Windows, the data is fetched from different OS API functions,
- so the <c><anno>Netmask</anno></c> and <c><anno>Broadaddr</anno></c>
- values can be calculated, just as some <c><anno>Flag</anno></c>
- values. Report flagrant bugs.</p>
- </warning>
+ <p>
+ Returns a list of 2-tuples containing interface names and
+ the interfaces' addresses. <c><anno>Ifname</anno></c>
+ is a Unicode string and
+ <c><anno>Ifopts</anno></c> is a list of
+ interface address description tuples.
+ </p>
+ <p>
+ The interface address description tuples
+ are documented under the type of the
+ <seealso marker="#type-getifaddrs_ifopts">
+ <c><anno>Ifopts</anno></c>
+ </seealso>
+ value.
+ </p>
+ </desc>
+ </func>
+
+ <func>
+ <name>getifaddrs(Opts) ->
+ {ok, [{Ifname, Ifopts}]} | {error, Posix}
+ </name>
+ <fsummary>Return a list of interfaces and their addresses.</fsummary>
+ <type>
+ <v>
+ Opts = [{netns, Namespace}]
+ </v>
+ <v>
+ Namespace =
+ <seealso marker="file#type-filename_all">
+ file:filename_all()
+ </seealso>
+ </v>
+ <v>Ifname = string()</v>
+ <v>
+ Ifopts =
+ <seealso marker="#type-getifaddrs_ifopts">
+ getifaddrs_ifopts()
+ </seealso>
+ </v>
+ <v>Posix = <seealso marker="#type-posix">posix()</seealso></v>
+ </type>
+ <desc>
+ <p>
+ The same as
+ <seealso marker="#getifaddrs/0"><c>getifaddrs/0</c></seealso>
+ but the <c>Option</c>
+ <c>{netns, Namespace}</c> sets a network namespace
+ for the OS call, on platforms that supports that feature.
+ </p>
+ <p>
+ See the socket option
+ <seealso marker="#option-netns">
+ <c>{netns, Namespace}</c>
+ </seealso>
+ under
+ <seealso marker="#setopts/2"><c>setopts/2</c></seealso>.
+ </p>
</desc>
</func>
@@ -950,20 +1049,29 @@ get_tcpi_sacked(Sock) ->
</item>
<tag><c>{mode, Mode :: binary | list}</c></tag>
<item>
- <p>Received <c>Packet</c> is delivered as defined by <c>Mode</c>.
+ <p>
+ Received <c>Packet</c> is delivered as defined by <c>Mode</c>.
</p>
</item>
- <tag><c>{netns, Namespace :: file:filename_all()}</c></tag>
+ <tag>
+ <marker id="option-netns"/>
+ <c>{netns, Namespace :: file:filename_all()}</c>
+ </tag>
<item>
- <p>Sets a network namespace for the socket. Parameter
+ <p>
+ Sets a network namespace for the socket. Parameter
<c>Namespace</c> is a filename defining the namespace, for
example, <c>"/var/run/netns/example"</c>, typically created by
command <c>ip netns add example</c>. This option must be used in
a function call that creates a socket, that is,
<seealso marker="gen_tcp#connect/3"><c>gen_tcp:connect/3,4</c></seealso>,
<seealso marker="gen_tcp#listen/2"><c>gen_tcp:listen/2</c></seealso>,
- <seealso marker="gen_udp#open/1"><c>gen_udp:open/1,2</c></seealso>, or
- <seealso marker="gen_sctp#open/0"><c>gen_sctp:open/0,1,2</c></seealso>.</p>
+ <seealso marker="gen_udp#open/1"><c>gen_udp:open/1,2</c></seealso>
+ or
+ <seealso marker="gen_sctp#open/0"><c>gen_sctp:open/0,1,2</c></seealso>,
+ and also
+ <seealso marker="#getifaddrs/1"><c>getifaddrs/1</c></seealso>.
+ </p>
<p>This option uses the Linux-specific syscall
<c>setns()</c>, such as in Linux kernel 3.0 or later,
and therefore only exists when the runtime system
@@ -1039,6 +1147,18 @@ setcap cap_sys_admin,cap_sys_ptrace,cap_dac_read_search+epi beam.smp</code>
is turned on for the socket, which means that also small
amounts of data are sent immediately.</p>
</item>
+ <tag><c>{nopush, Boolean}</c>(TCP/IP sockets)</tag>
+ <item>
+ <p>This translates to <c>TCP_NOPUSH</c> on BSD and
+ to <c>TCP_CORK</c> on Linux.</p>
+ <p>If <c>Boolean == true</c>, the corresponding option
+ is turned on for the socket, which means that small
+ amounts of data are accumulated until a full MSS-worth
+ of data is available or this option is turned off.</p>
+ <p>Note that while <c>TCP_NOPUSH</c> socket option is available on OSX, its semantics
+ is very different (e.g., unsetting it does not cause immediate send
+ of accumulated data). Hence, <c>nopush</c> option is intentionally ignored on OSX.</p>
+ </item>
<tag><c>{packet, PacketType}</c>(TCP/IP sockets)</tag>
<item>
<p><marker id="packet"/>Defines the type of packets to use for a socket.
diff --git a/lib/kernel/doc/src/logger.xml b/lib/kernel/doc/src/logger.xml
index 464c65ba76..556f25f96a 100644
--- a/lib/kernel/doc/src/logger.xml
+++ b/lib/kernel/doc/src/logger.xml
@@ -290,7 +290,10 @@ logger:error("error happened because: ~p", [Reason]). % Without macro
<section>
<title>Macros</title>
- <p>The following macros are defined:</p>
+ <p>The following macros are defined in <c>logger.hrl</c>, which
+ is included in a module with the directive</p>
+ <code>
+ -include_lib("kernel/include/logger.hrl").</code>
<list>
<item><c>?LOG_EMERGENCY(StringOrReport[,Metadata])</c></item>
@@ -748,6 +751,14 @@ start(_, []) ->
exists, its associated value will be changed
to the given value. If it does not exist, it will
be added.</p>
+ <p>If the value is incomplete, which for example can be the
+ case for the <c>config</c> key, it is up to the handler
+ implementation how the unspecified parts are set. For all
+ handlers in the Kernel application, unspecified data for
+ the <c>config</c> key is set to default values. To update
+ only specified data, and keep the existing configuration for
+ the rest, use <seealso marker="#update_handler_config-3">
+ <c>update_handler_config/3</c></seealso>.</p>
<p>See the definition of
the <seealso marker="#type-handler_config">
<c>handler_config()</c></seealso> type for more
@@ -933,6 +944,42 @@ logger:set_handler_config(HandlerId, maps:merge(Old, Config)).
</func>
<func>
+ <name name="update_handler_config" arity="3" clause_i="1"/>
+ <name name="update_handler_config" arity="3" clause_i="2"/>
+ <name name="update_handler_config" arity="3" clause_i="3"/>
+ <name name="update_handler_config" arity="3" clause_i="4"/>
+ <name name="update_handler_config" arity="3" clause_i="5"/>
+ <fsummary>Add or update configuration data for the specified
+ handler.</fsummary>
+ <type variable="HandlerId"/>
+ <type variable="Level" name_i="1"/>
+ <type variable="FilterDefault" name_i="2"/>
+ <type variable="Filters" name_i="3"/>
+ <type variable="Formatter" name_i="4"/>
+ <type variable="Config" name_i="5"/>
+ <type variable="Return"/>
+ <desc>
+ <p>Add or update configuration data for the specified
+ handler. If the given <c><anno>Key</anno></c> already
+ exists, its associated value will be changed
+ to the given value. If it does not exist, it will
+ be added.</p>
+ <p>If the value is incomplete, which for example can be the
+ case for the <c>config</c> key, it is up to the handler
+ implementation how the unspecified parts are set. For all
+ handlers in the Kernel application, unspecified data for
+ the <c>config</c> key is not changed. To reset unspecified
+ data to default values,
+ use <seealso marker="#set_handler_config-3">
+ <c>set_handler_config/3</c></seealso>.</p>
+ <p>See the definition of
+ the <seealso marker="#type-handler_config">
+ <c>handler_config()</c></seealso> type for more
+ information about the different parameters.</p>
+ </desc>
+ </func>
+
+ <func>
<name name="update_primary_config" arity="1"/>
<fsummary>Update primary configuration data for Logger.</fsummary>
<desc>
@@ -1041,10 +1088,11 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(), Meta)).
</func>
<func>
- <name>HModule:changing_config(Config1, Config2) -> {ok, Config3} | {error, Reason}</name>
+ <name>HModule:changing_config(SetOrUpdate, OldConfig, NewConfig) -> {ok, Config} | {error, Reason}</name>
<fsummary>The configuration for this handler is about to change.</fsummary>
<type>
- <v>Config1 = Config2 = Config3 =
+ <v>SetOrUpdate = set | update</v>
+ <v>OldConfig = NewConfig = Config =
<seealso marker="#type-handler_config">handler_config()</seealso></v>
<v>Reason = term()</v>
</type>
@@ -1053,18 +1101,51 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(), Meta)).
<p>The function is called on a temporary process when the
configuration for a handler is about to change. The purpose
is to verify and act on the new configuration.</p>
- <p><c>Config1</c> is the existing configuration
- and <c>Config2</c> is the new configuration.</p>
+ <p><c>OldConfig</c> is the existing configuration
+ and <c>NewConfig</c> is the new configuration.</p>
<p>The handler identity is associated with the <c>id</c> key
- in <c>Config1</c>.</p>
+ in <c>OldConfig</c>.</p>
+ <p><c>SetOrUpdate</c> has the value <c>set</c> if the
+ configuration change originates from a call to
+ <seealso marker="#set_handler_config-2">
+ <c>set_handler_config/2,3</c></seealso>, and <c>update</c>
+ if it originates from <seealso marker="#update_handler_config-2">
+ <c>update_handler_config/2,3</c></seealso>. The handler can
+ use this parameteter to decide how to update the value of
+ the <c>config</c> field, that is, the handler specific
+ configuration data. Typically, if <c>SetOrUpdate</c>
+ equals <c>set</c>, values that are not specified must be
+ given their default values. If <c>SetOrUpdate</c>
+ equals <c>update</c>, the values found in <c>OldConfig</c>
+ must be used instead.</p>
<p>If everything succeeds, the callback function must return a
- possibly adjusted configuration in <c>{ok,Config3}</c>.</p>
+ possibly adjusted configuration in <c>{ok,Config}</c>.</p>
<p>If the configuration is faulty, the callback function must
return <c>{error,Reason}</c>.</p>
</desc>
</func>
<func>
+ <name>HModule:filter_config(Config) -> FilteredConfig</name>
+ <fsummary>Remove internal data from configuration.</fsummary>
+ <type>
+ <v>Config = FilteredConfig =
+ <seealso marker="#type-handler_config">handler_config()</seealso></v>
+ </type>
+ <desc>
+ <p>This callback function is optional.</p>
+ <p>The function is called when one of the Logger API functions
+ for fetching the handler configuration is called, for
+ example
+ <seealso marker="#get_handler_config-1">
+ <c>logger:get_handler_config/1</c></seealso>.</p>
+ <p>It allows the handler to remove internal data fields from
+ its configuration data before it is returned to the
+ caller.</p>
+ </desc>
+ </func>
+
+ <func>
<name>HModule:log(LogEvent, Config) -> void()</name>
<fsummary>Log the given log event.</fsummary>
<type>
@@ -1136,7 +1217,7 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(), Meta)).
<item><seealso marker="logger#set_handler_config-2">
<c>logger:set_handler_config/2,3</c></seealso></item>
<item><seealso marker="logger#update_handler_config-2">
- <c>logger:updata_handler_config/2</c></seealso></item>
+ <c>logger:update_handler_config/2,3</c></seealso></item>
<item><seealso marker="logger#update_formatter_config-2">
<c>logger:update_formatter_config/2</c></seealso></item>
</list>
diff --git a/lib/kernel/doc/src/logger_chapter.xml b/lib/kernel/doc/src/logger_chapter.xml
index 4a81cfa34a..458e61cef5 100644
--- a/lib/kernel/doc/src/logger_chapter.xml
+++ b/lib/kernel/doc/src/logger_chapter.xml
@@ -113,7 +113,10 @@
of functions on the form <c>logger:Level/1,2,3</c>, which are
all shortcuts
for <seealso marker="logger#log-2">
- <c>logger:log(Level,Arg1[,Arg2[,Arg3]])</c></seealso>.</p>
+ <c>logger:log(Level,Arg1[,Arg2[,Arg3]])</c></seealso>.</p>
+ <p>The macros are defined in <c>logger.hrl</c>, which is included
+ in a module with the directive</p>
+ <code>-include_lib("kernel/include/logger.hrl").</code>
<p>The difference between using the macros and the exported
functions is that macros add location (originator) information
to the metadata, and performs lazy evaluation by wrapping the
@@ -384,8 +387,8 @@ logger:debug(#{got => connection_request, id => Id, state => State},
<p>In addition to the mandatory callback function <c>log/2</c>, a
handler module can export the optional callback
- functions <c>adding_handler/1</c>, <c>changing_config/2</c>
- and <c>removing_handler/1</c>. See
+ functions <c>adding_handler/1</c>, <c>changing_config/3</c>,
+ <c>filter_config/1</c>, and <c>removing_handler/1</c>. See
section <seealso marker="logger#handler_callback_functions">Handler
Callback Functions</seealso> in the logger(3) manual page for
more information about these function.</p>
@@ -555,7 +558,7 @@ logger:debug(#{got => connection_request, id => Id, state => State},
<item><seealso marker="logger#set_handler_config-2">
<c>set_handler_config/2,3</c></seealso></item>
<item><seealso marker="logger#update_handler_config-2">
- <c>update_handler_config/2</c></seealso></item>
+ <c>update_handler_config/2,3</c></seealso></item>
<item><seealso marker="logger#add_handler_filter-3">
<c>add_handler_filter/3</c></seealso></item>
<item><seealso marker="logger#remove_handler_filter-2">
@@ -704,9 +707,13 @@ logger:debug(#{got => connection_request, id => Id, state => State},
<item>
<p>If <c>HandlerId</c> is <c>default</c>, then this entry
modifies the default handler, equivalent to calling</p>
- <pre><seealso marker="logger#set_handler_config-2">
- logger:set_handler_config(default, Module, HandlerConfig)
- </seealso></pre>
+ <pre><seealso marker="logger#remove_handler-1">
+ logger:remove_handler(default)
+ </seealso></pre>
+ <p>followed by</p>
+ <pre><seealso marker="logger#add_handler-3">
+ logger:add_handler(default, Module, HandlerConfig)
+ </seealso></pre>
<p>For all other values of <c>HandlerId</c>, this entry
adds a new handler, equivalent to calling</p>
<pre><seealso marker="logger:add_handler/3">
@@ -1024,7 +1031,8 @@ ok</pre>
<list>
<item><c>adding_handler(Config)</c></item>
<item><c>removing_handler(Config)</c></item>
- <item><c>changing_config(OldConfig, NewConfig)</c></item>
+ <item><c>changing_config(SetOrUpdate, OldConfig, NewConfig)</c></item>
+ <item><c>filter_config(Config)</c></item>
</list>
<p>When a handler is added, by for example a call
to <seealso marker="logger#add_handler-3">
@@ -1043,11 +1051,18 @@ ok</pre>
<p>When <seealso marker="logger#set_handler_config-2">
<c>logger:set_handler_config/2,3</c></seealso>
or <seealso marker="logger#update_handler_config/2">
- <c>logger:update_handler_config/2</c></seealso> is called,
+ <c>logger:update_handler_config/2,3</c></seealso> is called,
Logger
- calls <c>HModule:changing_config(OldConfig, NewConfig)</c>. If
+ calls <c>HModule:changing_config(SetOrUpdate, OldConfig, NewConfig)</c>. If
this function returns <c>{ok,NewConfig1}</c>, Logger
writes <c>NewConfig1</c> to the configuration database.</p>
+ <p>When <seealso marker="logger#get_config-0">
+ <c>logger:get_config/0</c></seealso> or
+ <seealso marker="logger#get_handler_config-0">
+ <c>logger:get_handler_config/0,1</c></seealso> is called,
+ Logger calls <c>HModule:filter_config(Config)</c>. This function
+ must return the handler configuration where internal data is
+ removed.</p>
<p>A simple handler that prints to the terminal can be implemented
as follows:</p>
diff --git a/lib/kernel/doc/src/logger_disk_log_h.xml b/lib/kernel/doc/src/logger_disk_log_h.xml
index dfe2ab3275..d9b941a0a9 100644
--- a/lib/kernel/doc/src/logger_disk_log_h.xml
+++ b/lib/kernel/doc/src/logger_disk_log_h.xml
@@ -66,6 +66,10 @@
corresponds to the <c>name</c> property in the
<seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
datatype.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to the same name as the handler identity, in the
+ current directory.</p>
</item>
<tag><c>type</c></tag>
<item>
@@ -73,6 +77,8 @@
corresponds to the <c>type</c> property in the
<seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
datatype.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
<p>Defaults to <c>wrap</c>.</p>
</item>
<tag><c>max_no_files</c></tag>
@@ -82,6 +88,8 @@
corresponds to the <c>MaxNoFiles</c> element in the <c>size</c> property in the
<seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
datatype.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
<p>Defaults to <c>10</c>.</p>
<p>The setting has no effect on a halt log.</p>
</item>
@@ -93,6 +101,8 @@
corresponds to the <c>MaxNoBytes</c> element in the <c>size</c> property in the
<seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
datatype.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
<p>Defaults to <c>1048576</c> bytes for a wrap log, and
<c>infinity</c> for a halt log.</p>
</item>
diff --git a/lib/kernel/doc/src/logger_formatter.xml b/lib/kernel/doc/src/logger_formatter.xml
index 24772fd6c4..d066e263df 100644
--- a/lib/kernel/doc/src/logger_formatter.xml
+++ b/lib/kernel/doc/src/logger_formatter.xml
@@ -303,7 +303,7 @@ exit_reason: "It crashed"</code>
<item><seealso marker="logger#set_handler_config-2">
<c>logger:set_handler_config/2,3</c></seealso></item>
<item><seealso marker="logger#update_handler_config-2">
- <c>logger:updata_handler_config/2</c></seealso></item>
+ <c>logger:update_handler_config/2</c></seealso></item>
<item><seealso marker="logger#update_formatter_config-2">
<c>logger:update_formatter_config/2</c></seealso></item>
</list>
diff --git a/lib/kernel/doc/src/logger_std_h.xml b/lib/kernel/doc/src/logger_std_h.xml
index fcd8189bae..e156f5719b 100644
--- a/lib/kernel/doc/src/logger_std_h.xml
+++ b/lib/kernel/doc/src/logger_std_h.xml
@@ -74,7 +74,9 @@
circular logging. Use the disk_log handler,
<seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c></seealso>,
for this.</p>
- <p> Defaults to <c>standard_io</c>.</p>
+ <p>The value is set when the handler is added, and it can not
+ be changed in runtime.</p>
+ <p>Defaults to <c>standard_io</c>.</p>
</item>
<tag><c>filesync_repeat_interval</c></tag>
<item>
diff --git a/lib/kernel/doc/src/notes.xml b/lib/kernel/doc/src/notes.xml
index 8188ede6a2..bfbaf6ef3e 100644
--- a/lib/kernel/doc/src/notes.xml
+++ b/lib/kernel/doc/src/notes.xml
@@ -31,6 +31,22 @@
</header>
<p>This document describes the changes made to the Kernel application.</p>
+<section><title>Kernel 6.1.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Fix bug causing net_kernel process crash on connection
+ attempt from node with name identical to local node.</p>
+ <p>
+ Own Id: OTP-15438 Aux Id: ERL-781 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Kernel 6.1</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/lib/kernel/src/inet.erl b/lib/kernel/src/inet.erl
index 5dd68dc285..9f22eb6aaa 100644
--- a/lib/kernel/src/inet.erl
+++ b/lib/kernel/src/inet.erl
@@ -154,6 +154,15 @@
'running' | 'multicast' | 'loopback']} |
{'hwaddr', ether_address()}.
+-type getifaddrs_ifopts() ::
+ [Ifopt :: {flags, Flags :: [up | broadcast | loopback |
+ pointtopoint | running | multicast]} |
+ {addr, Addr :: ip_address()} |
+ {netmask, Netmask :: ip_address()} |
+ {broadaddr, Broadaddr :: ip_address()} |
+ {dstaddr, Dstaddr :: ip_address()} |
+ {hwaddr, Hwaddr :: [byte()]}].
+
-type address_family() :: 'inet' | 'inet6' | 'local'.
-type socket_protocol() :: 'tcp' | 'udp' | 'sctp'.
-type socket_type() :: 'stream' | 'dgram' | 'seqpacket'.
@@ -321,32 +330,32 @@ getopts(Socket, Opts) ->
Other
end.
--spec getifaddrs(Socket :: socket()) ->
- {'ok', [string()]} | {'error', posix()}.
-
+-spec getifaddrs(
+ [Option :: {netns, Namespace :: file:filename_all()}]
+ | socket()) ->
+ {'ok', [{Ifname :: string(),
+ Ifopts :: getifaddrs_ifopts()}]}
+ | {'error', posix()}.
+getifaddrs(Opts) when is_list(Opts) ->
+ withsocket(fun(S) -> prim_inet:getifaddrs(S) end, Opts);
getifaddrs(Socket) ->
prim_inet:getifaddrs(Socket).
--spec getifaddrs() -> {ok, Iflist} | {error, posix()} when
- Iflist :: [{Ifname,[Ifopt]}],
- Ifname :: string(),
- Ifopt :: {flags,[Flag]} | {addr,Addr} | {netmask,Netmask}
- | {broadaddr,Broadaddr} | {dstaddr,Dstaddr}
- | {hwaddr,Hwaddr},
- Flag :: up | broadcast | loopback | pointtopoint
- | running | multicast,
- Addr :: ip_address(),
- Netmask :: ip_address(),
- Broadaddr :: ip_address(),
- Dstaddr :: ip_address(),
- Hwaddr :: [byte()].
-
+-spec getifaddrs() ->
+ {'ok', [{Ifname :: string(),
+ Ifopts :: getifaddrs_ifopts()}]}
+ | {'error', posix()}.
getifaddrs() ->
withsocket(fun(S) -> prim_inet:getifaddrs(S) end).
--spec getiflist(Socket :: socket()) ->
- {'ok', [string()]} | {'error', posix()}.
+-spec getiflist(
+ [Option :: {netns, Namespace :: file:filename_all()}]
+ | socket()) ->
+ {'ok', [string()]} | {'error', posix()}.
+
+getiflist(Opts) when is_list(Opts) ->
+ withsocket(fun(S) -> prim_inet:getiflist(S) end, Opts);
getiflist(Socket) ->
prim_inet:getiflist(Socket).
@@ -363,11 +372,19 @@ getiflist() ->
ifget(Socket, Name, Opts) ->
prim_inet:ifget(Socket, Name, Opts).
--spec ifget(Name :: string() | atom(), Opts :: [if_getopt()]) ->
+-spec ifget(
+ Name :: string() | atom(),
+ Opts :: [if_getopt() |
+ {netns, Namespace :: file:filename_all()}]) ->
{'ok', [if_getopt_result()]} | {'error', posix()}.
ifget(Name, Opts) ->
- withsocket(fun(S) -> prim_inet:ifget(S, Name, Opts) end).
+ {NSOpts,IFOpts} =
+ lists:partition(
+ fun ({netns,_}) -> true;
+ (_) -> false
+ end, Opts),
+ withsocket(fun(S) -> prim_inet:ifget(S, Name, IFOpts) end, NSOpts).
-spec ifset(Socket :: socket(),
Name :: string() | atom(),
@@ -377,11 +394,19 @@ ifget(Name, Opts) ->
ifset(Socket, Name, Opts) ->
prim_inet:ifset(Socket, Name, Opts).
--spec ifset(Name :: string() | atom(), Opts :: [if_setopt()]) ->
+-spec ifset(
+ Name :: string() | atom(),
+ Opts :: [if_setopt() |
+ {netns, Namespace :: file:filename_all()}]) ->
'ok' | {'error', posix()}.
ifset(Name, Opts) ->
- withsocket(fun(S) -> prim_inet:ifset(S, Name, Opts) end).
+ {NSOpts,IFOpts} =
+ lists:partition(
+ fun ({netns,_}) -> true;
+ (_) -> false
+ end, Opts),
+ withsocket(fun(S) -> prim_inet:ifset(S, Name, IFOpts) end, NSOpts).
-spec getif() ->
{'ok', [{ip_address(), ip_address() | 'undefined', ip_address()}]} |
@@ -391,10 +416,14 @@ getif() ->
withsocket(fun(S) -> getif(S) end).
%% backwards compatible getif
--spec getif(Socket :: socket()) ->
+-spec getif(
+ [Option :: {netns, Namespace :: file:filename_all()}]
+ | socket()) ->
{'ok', [{ip_address(), ip_address() | 'undefined', ip_address()}]} |
{'error', posix()}.
+getif(Opts) when is_list(Opts) ->
+ withsocket(fun(S) -> getif(S) end, Opts);
getif(Socket) ->
case prim_inet:getiflist(Socket) of
{ok, IfList} ->
@@ -415,7 +444,10 @@ getif(Socket) ->
end.
withsocket(Fun) ->
- case inet_udp:open(0,[]) of
+ withsocket(Fun, []).
+%%
+withsocket(Fun, Opts) ->
+ case inet_udp:open(0, Opts) of
{ok,Socket} ->
Res = Fun(Socket),
inet_udp:close(Socket),
diff --git a/lib/kernel/src/inet_int.hrl b/lib/kernel/src/inet_int.hrl
index c8e09d18ad..f6525d7261 100644
--- a/lib/kernel/src/inet_int.hrl
+++ b/lib/kernel/src/inet_int.hrl
@@ -162,6 +162,7 @@
-define(INET_OPT_PKTOPTIONS, 45).
-define(INET_OPT_TTL, 46).
-define(INET_OPT_RECVTTL, 47).
+-define(TCP_OPT_NOPUSH, 48).
% Specific SCTP options: separate range:
-define(SCTP_OPT_RTOINFO, 100).
-define(SCTP_OPT_ASSOCINFO, 101).
diff --git a/lib/kernel/src/inet_tcp_dist.erl b/lib/kernel/src/inet_tcp_dist.erl
index d1701afdaa..c37212b0f9 100644
--- a/lib/kernel/src/inet_tcp_dist.erl
+++ b/lib/kernel/src/inet_tcp_dist.erl
@@ -450,7 +450,7 @@ get_tcp_address(Driver, Socket) ->
get_address_resolver(EpmdModule) ->
case erlang:function_exported(EpmdModule, address_please, 3) of
true -> {EpmdModule, address_please};
- _ -> {inet, getaddr}
+ _ -> {erl_epmd, address_please}
end.
%% ------------------------------------------------------------
diff --git a/lib/kernel/src/kernel.appup.src b/lib/kernel/src/kernel.appup.src
index 305a1c788c..e73cea8351 100644
--- a/lib/kernel/src/kernel.appup.src
+++ b/lib/kernel/src/kernel.appup.src
@@ -16,13 +16,40 @@
%% limitations under the License.
%%
%% %CopyrightEnd%
+%%
+%% We allow upgrade from, and downgrade to all previous
+%% versions from the following OTP releases:
+%% - OTP 20
+%% - OTP 21
+%%
+%% We also allow upgrade from, and downgrade to all
+%% versions that have branched off from the above
+%% stated previous versions.
+%%
{"%VSN%",
- %% Up from - max one major revision back
- [{<<"5\\.3(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.0
- {<<"5\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.1+
- {<<"6\\.0(\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-21
- %% Down to - max one major revision back
- [{<<"5\\.3(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.0
- {<<"5\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.1+
- {<<"6\\.0(\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-21
-}.
+ [{<<"^5\\.3$">>,[restart_new_emulator]},
+ {<<"^5\\.3\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^5\\.3\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^5\\.4$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.2(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.3(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^6\\.0$">>,[restart_new_emulator]},
+ {<<"^6\\.0\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^6\\.0\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^6\\.1$">>,[restart_new_emulator]},
+ {<<"^6\\.1\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]}],
+ [{<<"^5\\.3$">>,[restart_new_emulator]},
+ {<<"^5\\.3\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^5\\.3\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^5\\.4$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.2(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.3(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^6\\.0$">>,[restart_new_emulator]},
+ {<<"^6\\.0\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^6\\.0\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^6\\.1$">>,[restart_new_emulator]},
+ {<<"^6\\.1\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]}]}.
diff --git a/lib/kernel/src/logger.erl b/lib/kernel/src/logger.erl
index 752dd8d493..6762998d4f 100644
--- a/lib/kernel/src/logger.erl
+++ b/lib/kernel/src/logger.erl
@@ -43,7 +43,8 @@
get_module_level/0, get_module_level/1,
set_primary_config/1, set_primary_config/2,
set_handler_config/2, set_handler_config/3,
- update_primary_config/1, update_handler_config/2,
+ update_primary_config/1,
+ update_handler_config/2, update_handler_config/3,
update_formatter_config/2, update_formatter_config/3,
get_primary_config/0, get_handler_config/1,
get_handler_config/0, get_handler_ids/0, get_config/0,
@@ -423,6 +424,29 @@ set_handler_config(HandlerId,Config) ->
update_primary_config(Config) ->
logger_server:update_config(primary,Config).
+-spec update_handler_config(HandlerId,level,Level) -> Return when
+ HandlerId :: handler_id(),
+ Level :: level() | all | none,
+ Return :: ok | {error,term()};
+ (HandlerId,filter_default,FilterDefault) -> Return when
+ HandlerId :: handler_id(),
+ FilterDefault :: log | stop,
+ Return :: ok | {error,term()};
+ (HandlerId,filters,Filters) -> Return when
+ HandlerId :: handler_id(),
+ Filters :: [{filter_id(),filter()}],
+ Return :: ok | {error,term()};
+ (HandlerId,formatter,Formatter) -> Return when
+ HandlerId :: handler_id(),
+ Formatter :: {module(), formatter_config()},
+ Return :: ok | {error,term()};
+ (HandlerId,config,Config) -> Return when
+ HandlerId :: handler_id(),
+ Config :: term(),
+ Return :: ok | {error,term()}.
+update_handler_config(HandlerId,Key,Value) ->
+ logger_server:update_config(HandlerId,Key,Value).
+
-spec update_handler_config(HandlerId,Config) -> ok | {error,term()} when
HandlerId :: handler_id(),
Config :: handler_config().
@@ -439,7 +463,14 @@ get_primary_config() ->
HandlerId :: handler_id(),
Config :: handler_config().
get_handler_config(HandlerId) ->
- logger_config:get(?LOGGER_TABLE,HandlerId).
+ case logger_config:get(?LOGGER_TABLE,HandlerId) of
+ {ok,#{module:=Module}=Config} ->
+ {ok,try Module:filter_config(Config)
+ catch _:_ -> Config
+ end};
+ Error ->
+ Error
+ end.
-spec get_handler_config() -> [Config] when
Config :: handler_config().
diff --git a/lib/kernel/src/logger_config.erl b/lib/kernel/src/logger_config.erl
index 6bfe658552..5e9faf332c 100644
--- a/lib/kernel/src/logger_config.erl
+++ b/lib/kernel/src/logger_config.erl
@@ -31,7 +31,9 @@
-include("logger_internal.hrl").
new(Name) ->
- _ = ets:new(Name,[set,protected,named_table,{write_concurrency,true}]),
+ _ = ets:new(Name,[set,protected,named_table,
+ {read_concurrency,true},
+ {write_concurrency,true}]),
ets:whereis(Name).
delete(Tid,Id) ->
diff --git a/lib/kernel/src/logger_disk_log_h.erl b/lib/kernel/src/logger_disk_log_h.erl
index a8f141f135..41e0d51a9d 100644
--- a/lib/kernel/src/logger_disk_log_h.erl
+++ b/lib/kernel/src/logger_disk_log_h.erl
@@ -19,58 +19,33 @@
%%
-module(logger_disk_log_h).
--behaviour(gen_server).
-
-include("logger.hrl").
-include("logger_internal.hrl").
-include("logger_h_common.hrl").
%%% API
--export([start_link/3, info/1, filesync/1, reset/1]).
+-export([info/1, filesync/1, reset/1]).
-%% gen_server callbacks
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
+%% logger_h_common callbacks
+-export([init/2, check_config/4, reset_state/2,
+ filesync/3, write/4, handle_info/3, terminate/3]).
%% logger callbacks
--export([log/2, adding_handler/1, removing_handler/1, changing_config/2]).
-
-%% handler internal
--export([log_handler_info/4]).
+-export([log/2, adding_handler/1, removing_handler/1, changing_config/3,
+ filter_config/1]).
%%%===================================================================
%%% API
%%%===================================================================
%%%-----------------------------------------------------------------
-%%% Start a disk_log handler process and link to caller.
-%%% This function is called by the kernel supervisor when this
-%%% handler process gets added (as a result of calling add/3).
--spec start_link(Name, Config, HandlerState) -> {ok,Pid} | {error,Reason} when
- Name :: atom(),
- Config :: logger:handler_config(),
- HandlerState :: map(),
- Pid :: pid(),
- Reason :: term().
-
-start_link(Name, Config, HandlerState) ->
- proc_lib:start_link(?MODULE,init,[[Name,Config,HandlerState]]).
-
-%%%-----------------------------------------------------------------
%%%
-spec filesync(Name) -> ok | {error,Reason} when
Name :: atom(),
Reason :: handler_busy | {badarg,term()}.
-filesync(Name) when is_atom(Name) ->
- try
- gen_server:call(?name_to_reg_name(?MODULE,Name),
- disk_log_sync, ?DEFAULT_CALL_TIMEOUT)
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
filesync(Name) ->
- {error,{badarg,{filesync,[Name]}}}.
+ logger_h_common:filesync(?MODULE,Name).
%%%-----------------------------------------------------------------
%%%
@@ -79,15 +54,8 @@ filesync(Name) ->
Info :: term(),
Reason :: handler_busy | {badarg,term()}.
-info(Name) when is_atom(Name) ->
- try
- gen_server:call(?name_to_reg_name(?MODULE,Name),
- info, ?DEFAULT_CALL_TIMEOUT)
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
info(Name) ->
- {error,{badarg,{info,[Name]}}}.
+ logger_h_common:info(?MODULE,Name).
%%%-----------------------------------------------------------------
%%%
@@ -95,16 +63,8 @@ info(Name) ->
Name :: atom(),
Reason :: handler_busy | {badarg,term()}.
-reset(Name) when is_atom(Name) ->
- try
- gen_server:call(?name_to_reg_name(?MODULE,Name),
- reset, ?DEFAULT_CALL_TIMEOUT)
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
reset(Name) ->
- {error,{badarg,{reset,[Name]}}}.
-
+ logger_h_common:reset(?MODULE,Name).
%%%===================================================================
%%% logger callbacks
@@ -112,89 +72,82 @@ reset(Name) ->
%%%-----------------------------------------------------------------
%%% Handler being added
-adding_handler(#{id:=Name}=Config) ->
- case check_config(adding, Config) of
- {ok, Config1} ->
- %% create initial handler state by merging defaults with config
- HConfig = maps:get(config, Config1, #{}),
- HState = maps:merge(get_init_state(), HConfig),
- case logger_h_common:overload_levels_ok(HState) of
- true ->
- start(Name, Config1, HState);
- false ->
- #{sync_mode_qlen := SMQL,
- drop_mode_qlen := DMQL,
- flush_qlen := FQL} = HState,
- {error,{invalid_levels,{SMQL,DMQL,FQL}}}
- end;
- Error ->
- Error
- end.
+adding_handler(Config) ->
+ logger_h_common:adding_handler(Config).
%%%-----------------------------------------------------------------
%%% Updating handler config
-changing_config(OldConfig = #{id:=Name, config:=OldHConfig},
- NewConfig = #{id:=Name, config:=NewHConfig}) ->
- #{type:=Type, file:=File, max_no_files:=MaxFs,
- max_no_bytes:=MaxBytes} = OldHConfig,
- case NewHConfig of
- #{type:=Type, file:=File, max_no_files:=MaxFs,
- max_no_bytes:=MaxBytes} ->
- changing_config1(OldConfig, NewConfig);
- _ ->
- {error,{illegal_config_change,OldConfig,NewConfig}}
- end;
-changing_config(OldConfig, NewConfig) ->
- {error,{illegal_config_change,OldConfig,NewConfig}}.
-
-changing_config1(OldConfig=#{config:=OldHConfig}, NewConfig) ->
- case check_config(changing, NewConfig) of
- {ok,NewConfig1 = #{config:=NewHConfig}} ->
- #{handler_pid:=HPid,
- mode_tab:=ModeTab} = OldHConfig,
- NewHConfig1 = NewHConfig#{handler_pid=>HPid,
- mode_tab=>ModeTab},
- NewConfig2 = NewConfig1#{config=>NewHConfig1},
- try gen_server:call(HPid, {change_config,OldConfig,NewConfig2},
- ?DEFAULT_CALL_TIMEOUT) of
- ok -> {ok,NewConfig2};
- HError -> HError
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
- Error ->
- Error
- end.
+changing_config(SetOrUpdate, OldConfig, NewConfig) ->
+ logger_h_common:changing_config(SetOrUpdate, OldConfig, NewConfig).
-check_config(adding, #{id:=Name}=Config) ->
- %% merge handler specific config data
- HConfig = merge_default_logopts(Name, maps:get(config, Config, #{})),
- case check_h_config(maps:to_list(HConfig)) of
+%%%-----------------------------------------------------------------
+%%% Handler being removed
+removing_handler(Config) ->
+ logger_h_common:removing_handler(Config).
+
+%%%-----------------------------------------------------------------
+%%% Log a string or report
+-spec log(LogEvent, Config) -> ok when
+ LogEvent :: logger:log_event(),
+ Config :: logger:handler_config().
+
+log(LogEvent, Config) ->
+ logger_h_common:log(LogEvent, Config).
+
+%%%-----------------------------------------------------------------
+%%% Remove internal fields from configuration
+filter_config(Config) ->
+ logger_h_common:filter_config(Config).
+
+%%%===================================================================
+%%% logger_h_common callbacks
+%%%===================================================================
+init(Name, #{file:=File,type:=Type,max_no_bytes:=MNB,max_no_files:=MNF}) ->
+ case open_disk_log(Name, File, Type, MNB, MNF) of
ok ->
- {ok,Config#{config=>HConfig}};
+ {ok,#{log_opts => #{file => File,
+ type => Type,
+ max_no_bytes => MNB,
+ max_no_files => MNF},
+ prev_log_result => ok,
+ prev_sync_result => ok,
+ prev_disk_log_info => undefined}};
Error ->
Error
- end;
-check_config(changing, Config) ->
- HConfig = maps:get(config, Config, #{}),
- case check_h_config(maps:to_list(HConfig)) of
- ok -> {ok,Config};
- Error -> Error
end.
-merge_default_logopts(Name, HConfig) ->
- Type = maps:get(type, HConfig, wrap),
- {DefaultNoFiles,DefaultNoBytes} =
- case Type of
- halt -> {undefined,infinity};
- _wrap -> {10,1048576}
+check_config(Name,set,undefined,HConfig0) ->
+ HConfig=merge_default_logopts(Name,maps:merge(get_default_config(),HConfig0)),
+ check_config(HConfig);
+check_config(_Name,SetOrUpdate,OldHConfig,NewHConfig0) ->
+ WriteOnce = maps:with([type,file,max_no_files,max_no_bytes],OldHConfig),
+ Default =
+ case SetOrUpdate of
+ set ->
+ %% Do not reset write-once fields to defaults
+ maps:merge(get_default_config(),WriteOnce);
+ update ->
+ OldHConfig
end,
- {ok,Dir} = file:get_cwd(),
- Defaults = #{file => filename:join(Dir,Name),
- max_no_files => DefaultNoFiles,
- max_no_bytes => DefaultNoBytes,
- type => Type},
- maps:merge(Defaults, HConfig).
+
+ NewHConfig = maps:merge(Default,NewHConfig0),
+
+ %% Fail if write-once fields are changed
+ case maps:with([type,file,max_no_files,max_no_bytes],NewHConfig) of
+ WriteOnce ->
+ check_config(NewHConfig);
+ Other ->
+ {Old,New} = logger_server:diff_maps(WriteOnce,Other),
+ {error,{illegal_config_change,?MODULE,Old,New}}
+ end.
+
+check_config(HConfig) ->
+ case check_h_config(maps:to_list(HConfig)) of
+ ok ->
+ {ok,HConfig};
+ {error,{Key,Value}} ->
+ {error,{invalid_config,?MODULE,#{Key=>Value}}}
+ end.
check_h_config([{file,File}|Config]) when is_list(File) ->
check_h_config(Config);
@@ -208,447 +161,59 @@ check_h_config([{max_no_bytes,N}|Config]) when is_integer(N), N>0 ->
check_h_config(Config);
check_h_config([{type,Type}|Config]) when Type==wrap; Type==halt ->
check_h_config(Config);
-check_h_config([Other | Config]) ->
- case logger_h_common:check_common_config(Other) of
- valid ->
- check_h_config(Config);
- invalid ->
- {error,{invalid_config,?MODULE,Other}}
- end;
+check_h_config([Other | _]) ->
+ {error,Other};
check_h_config([]) ->
ok.
-%%%-----------------------------------------------------------------
-%%% Handler being removed
-removing_handler(#{id:=Name}) ->
- stop(Name).
-
-%%%-----------------------------------------------------------------
-%%% Log a string or report
--spec log(LogEvent, Config) -> ok when
- LogEvent :: logger:log_event(),
- Config :: logger:handler_config().
+get_default_config() ->
+ #{}.
-log(LogEvent, Config = #{id := Name,
- config := #{handler_pid := HPid,
- mode_tab := ModeTab}}) ->
- %% if the handler has crashed, we must drop this event
- %% and hope the handler restarts so we can try again
- true = is_process_alive(HPid),
- Bin = logger_h_common:log_to_binary(LogEvent, Config),
- logger_h_common:call_cast_or_drop(Name, HPid, ModeTab, Bin).
+merge_default_logopts(Name, HConfig) ->
+ Type = maps:get(type, HConfig, wrap),
+ {DefaultNoFiles,DefaultNoBytes} =
+ case Type of
+ halt -> {undefined,infinity};
+ _wrap -> {10,1048576}
+ end,
+ {ok,Dir} = file:get_cwd(),
+ Defaults = #{file => filename:join(Dir,Name),
+ max_no_files => DefaultNoFiles,
+ max_no_bytes => DefaultNoBytes,
+ type => Type},
+ maps:merge(Defaults, HConfig).
-%%%===================================================================
-%%% gen_server callbacks
-%%%===================================================================
+filesync(Name,_Mode,State) ->
+ Result = ?disk_log_sync(Name),
+ maybe_notify_error(Name, filesync, Result, prev_sync_result, State).
-init([Name,
- Config = #{config := HConfig = #{file:=File,
- type:=Type,
- max_no_bytes:=MNB,
- max_no_files:=MNF}},
- State = #{dl_sync_int := DLSyncInt}]) ->
-
- RegName = ?name_to_reg_name(?MODULE,Name),
- register(RegName, self()),
- process_flag(trap_exit, true),
- process_flag(message_queue_data, off_heap),
-
- ?init_test_hooks(),
- ?start_observation(Name),
-
- LogOpts = #{file=>File, type=>Type, max_no_bytes=>MNB, max_no_files=>MNF},
- case open_disk_log(Name, File, Type, MNB, MNF) of
- ok ->
- try ets:new(Name, [public]) of
- ModeTab ->
- ?set_mode(ModeTab, async),
- T0 = ?timestamp(),
- State1 =
- ?merge_with_stats(State#{
- id => Name,
- mode_tab => ModeTab,
- mode => async,
- dl_sync => DLSyncInt,
- log_opts => LogOpts,
- last_qlen => 0,
- last_log_ts => T0,
- burst_win_ts => T0,
- burst_msg_count => 0,
- last_op => sync,
- prev_log_result => ok,
- prev_sync_result => ok,
- prev_disk_log_info => undefined}),
- Config1 =
- Config#{config => HConfig#{handler_pid => self(),
- mode_tab => ModeTab}},
- proc_lib:init_ack({ok,self(),Config1}),
- gen_server:cast(self(), repeated_disk_log_sync),
- case logger_h_common:unset_restart_flag(Name, ?MODULE) of
- true ->
- %% inform about restart
- gen_server:cast(self(), {log_handler_info,
- "Handler ~p restarted",
- [Name]});
- false ->
- %% initial start
- ok
- end,
- gen_server:enter_loop(?MODULE, [], State1)
- catch
- _:Error ->
- unregister(RegName),
- logger_h_common:error_notify({open_disk_log,Name,Error}),
- proc_lib:init_ack(Error)
- end;
- Error ->
- unregister(RegName),
- logger_h_common:error_notify({open_disk_log,Name,Error}),
- proc_lib:init_ack(Error)
- end.
+write(Name, Mode, Bin, State) ->
+ Result = ?disk_log_write(Name, Mode, Bin),
+ maybe_notify_error(Name, log, Result, prev_log_result, State).
-%% This is the synchronous log event.
-handle_call({log, Bin}, _From, State) ->
- {Result,State1} = do_log(Bin, call, State),
- %% Result == ok | dropped
- {reply, Result, State1};
-
-handle_call(disk_log_sync, _From, State = #{id := Name}) ->
- State1 = #{prev_sync_result := Result} = disk_log_sync(Name, State),
- {reply, Result, State1};
-
-handle_call({change_config,_OldConfig,NewConfig}, _From,
- State = #{filesync_repeat_interval := FSyncInt0}) ->
- HConfig = maps:get(config, NewConfig, #{}),
- State1 = #{sync_mode_qlen := SMQL,
- drop_mode_qlen := DMQL,
- flush_qlen := FQL} = maps:merge(State, HConfig),
- case logger_h_common:overload_levels_ok(State1) of
- true ->
- _ =
- case maps:get(filesync_repeat_interval, HConfig, undefined) of
- undefined ->
- ok;
- no_repeat ->
- _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
- State,
- undefined));
- FSyncInt0 ->
- ok;
- _FSyncInt1 ->
- _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
- State,
- undefined)),
- _ = gen_server:cast(self(), repeated_disk_log_sync)
- end,
- {reply, ok, State1};
- false ->
- {reply, {error,{invalid_levels,{SMQL,DMQL,FQL}}}, State}
- end;
-
-handle_call(info, _From, State) ->
- {reply, State, State};
-
-handle_call(reset, _From, State) ->
- State1 = ?merge_with_stats(State),
- {reply, ok, State1#{last_qlen => 0,
- last_log_ts => ?timestamp(),
- prev_log_result => ok,
- prev_sync_result => ok,
- prev_disk_log_info => undefined}};
-
-handle_call(stop, _From, State) ->
- {stop, {shutdown,stopped}, ok, State}.
-
-
-%% This is the asynchronous log event.
-handle_cast({log, Bin}, State) ->
- {_,State1} = do_log(Bin, cast, State),
- {noreply, State1};
-
-handle_cast({log_handler_info, Format, Args}, State = #{id:=Name}) ->
- log_handler_info(Name, Format, Args, State),
- {noreply, State};
-
-%% If FILESYNC_REPEAT_INTERVAL is set to a millisec value, this
-%% clause gets called repeatedly by the handler. In order to
-%% guarantee that a filesync *always* happens after the last log
-%% event, the repeat operation must be active!
-handle_cast(repeated_disk_log_sync,
- State = #{id := Name,
- filesync_repeat_interval := FSyncInt,
- last_op := LastOp}) ->
- State1 =
- if is_integer(FSyncInt) ->
- %% only do filesync if something has been
- %% written since last time we checked
- NewState = if LastOp == sync ->
- State;
- true ->
- disk_log_sync(Name, State)
- end,
- {ok,TRef} =
- timer:apply_after(FSyncInt, gen_server,cast,
- [self(),repeated_disk_log_sync]),
- NewState#{rep_sync_tref => TRef, last_op => sync};
- true ->
- State
- end,
- {noreply,State1}.
+reset_state(_Name, State) ->
+ State#{prev_log_result => ok,
+ prev_sync_result => ok,
+ prev_disk_log_info => undefined}.
%% The disk log owner must handle status messages from disk_log.
-handle_info({disk_log, _Node, _Log, {wrap,_NoLostItems}}, State) ->
- {noreply, State};
-handle_info({disk_log, _Node, Log, Info = {truncated,_NoLostItems}},
- State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
- error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}),
- {noreply, State#{prev_disk_log_info => Info}};
-handle_info({disk_log, _Node, Log, Info = {blocked_log,_Items}},
- State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
- error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}),
- {noreply, State#{prev_disk_log_info => Info}};
-handle_info({disk_log, _Node, Log, full},
- State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
- error_notify_new(full, PrevInfo, {disk_log,Name,Log,full}),
- {noreply, State#{prev_disk_log_info => full}};
-handle_info({disk_log, _Node, Log, Info = {error_status,_Status}},
- State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
- error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}),
- {noreply, State#{prev_disk_log_info => Info}};
-
-handle_info({'EXIT',_Pid,_Why}, State = #{id := _Name}) ->
- {noreply, State};
-
-handle_info(_, State) ->
- {noreply, State}.
-
-terminate(Reason, State = #{id := Name}) ->
- _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, State,
- undefined)),
+handle_info(Name, {disk_log, _Node, Log, Info={truncated,_NoLostItems}}, State) ->
+ maybe_notify_status(Name, Log, Info, prev_disk_log_info, State);
+handle_info(Name, {disk_log, _Node, Log, Info = {blocked_log,_Items}}, State) ->
+ maybe_notify_status(Name, Log, Info, prev_disk_log_info, State);
+handle_info(Name, {disk_log, _Node, Log, Info = full}, State) ->
+ maybe_notify_status(Name, Log, Info, prev_disk_log_info, State);
+handle_info(Name, {disk_log, _Node, Log, Info = {error_status,_Status}}, State) ->
+ maybe_notify_status(Name, Log, Info, prev_disk_log_info, State);
+handle_info(_, _, State) ->
+ State.
+
+terminate(Name, _Reason, _State) ->
_ = close_disk_log(Name, normal),
- ok = logger_h_common:stop_or_restart(Name, Reason, State),
- unregister(?name_to_reg_name(?MODULE, Name)),
ok.
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
%%%-----------------------------------------------------------------
%%% Internal functions
-
-%%%-----------------------------------------------------------------
-%%%
-get_init_state() ->
- #{sync_mode_qlen => ?SYNC_MODE_QLEN,
- drop_mode_qlen => ?DROP_MODE_QLEN,
- flush_qlen => ?FLUSH_QLEN,
- burst_limit_enable => ?BURST_LIMIT_ENABLE,
- burst_limit_max_count => ?BURST_LIMIT_MAX_COUNT,
- burst_limit_window_time => ?BURST_LIMIT_WINDOW_TIME,
- overload_kill_enable => ?OVERLOAD_KILL_ENABLE,
- overload_kill_qlen => ?OVERLOAD_KILL_QLEN,
- overload_kill_mem_size => ?OVERLOAD_KILL_MEM_SIZE,
- overload_kill_restart_after => ?OVERLOAD_KILL_RESTART_AFTER,
- dl_sync_int => ?CONTROLLER_SYNC_INTERVAL,
- filesync_ok_qlen => ?FILESYNC_OK_QLEN,
- filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}.
-
-%%%-----------------------------------------------------------------
-%%% Add a disk_log handler to the logger.
-%%% This starts a dedicated handler process which should always
-%%% exist if the handler is registered with logger (and should not
-%%% exist if the handler is not registered).
-%%%
-%%% Config is the logger:handler_config() map. Handler specific parameters
-%%% should be provided with a sub map associated with a key named
-%%% 'config', e.g:
-%%%
-%%% Config = #{config => #{sync_mode_qlen => 50}
-%%%
-%%% The 'config' sub map will also contain parameters for configuring
-%%% the disk_log:
-%%%
-%%% Config = #{config => #{file => file:filename(),
-%%% max_no_bytes => integer(),
-%%% max_no_files => integer(),
-%%% type => wrap | halt}}.
-%%%
-%%% If type == halt, then max_no_files is ignored.
-%%%
-%%% The disk_log handler process is linked to logger_sup, which is
-%%% part of the kernel application's supervision tree.
-start(Name, Config, HandlerState) ->
- LoggerDLH =
- #{id => Name,
- start => {?MODULE, start_link, [Name,Config,HandlerState]},
- restart => temporary,
- shutdown => 2000,
- type => worker,
- modules => [?MODULE]},
- case supervisor:start_child(logger_sup, LoggerDLH) of
- {ok,Pid,Config1} ->
- ok = logger_handler_watcher:register_handler(Name,Pid),
- {ok,Config1};
- Error ->
- Error
- end.
-
-%%%-----------------------------------------------------------------
-%%% Stop and remove the handler.
-stop(Name) ->
- case whereis(?name_to_reg_name(?MODULE,Name)) of
- undefined ->
- ok;
- Pid ->
- %% We don't want to do supervisor:terminate_child here
- %% since we need to distinguish this explicit stop from a
- %% system termination in order to avoid circular attempts
- %% at removing the handler (implying deadlocks and
- %% timeouts).
- %% And we don't need to do supervisor:delete_child, since
- %% the restart type is temporary, which means that the
- %% child specification is automatically removed from the
- %% supervisor when the process dies.
- _ = gen_server:call(Pid, stop),
- ok
- end.
-
-%%%-----------------------------------------------------------------
-%%% Logging and overload control.
--define(update_dl_sync(C, Interval),
- if C == 0 -> Interval;
- true -> C-1 end).
-
-%% check for overload between every event (and set Mode to async,
-%% sync or drop accordingly), but never flush the whole mailbox
-%% before LogWindowSize events have been handled
-do_log(Bin, CallOrCast, State = #{id:=Name, mode := Mode0}) ->
- T1 = ?timestamp(),
-
- %% check if the handler is getting overloaded, or if it's
- %% recovering from overload (the check must be done for each
- %% event to react quickly to large bursts of events and
- %% to ensure that the handler can never end up in drop mode
- %% with an empty mailbox, which would stop operation)
- {Mode1,QLen,Mem,State1} = logger_h_common:check_load(State),
-
- if (Mode1 == drop) andalso (Mode0 =/= drop) ->
- log_handler_info(Name, "Handler ~p switched to drop mode",
- [Name], State);
- (Mode0 == drop) andalso ((Mode1 == async) orelse (Mode1 == sync)) ->
- log_handler_info(Name, "Handler ~p switched to ~w mode",
- [Name,Mode1], State);
- true ->
- ok
- end,
-
- %% kill the handler if it can't keep up with the load
- logger_h_common:kill_if_choked(Name, QLen, Mem, ?MODULE, State),
-
- if Mode1 == flush ->
- flush(Name, QLen, T1, State1);
- true ->
- write(Name, Mode1, T1, Bin, CallOrCast, State1)
- end.
-
-%% this function is called by do_log/3 after an overload check
-%% has been performed, where QLen > FlushQLen
-flush(Name, _QLen0, T1, State=#{last_log_ts := _T0, mode_tab := ModeTab}) ->
- %% flush messages in the mailbox (a limited number in
- %% order to not cause long delays)
- NewFlushed = logger_h_common:flush_log_events(?FLUSH_MAX_N),
-
- %% write info in log about flushed messages
- log_handler_info(Name, "Handler ~p flushed ~w log events",
- [Name,NewFlushed], State),
-
- %% because of the receive loop when flushing messages, the
- %% handler will be scheduled out often and the mailbox could
- %% grow very large, so we'd better check the queue again here
- {_,_QLen1} = process_info(self(), message_queue_len),
- ?observe(Name,{max_qlen,_QLen1}),
-
- %% Add 1 for the current log event
- ?observe(Name,{flushed,NewFlushed+1}),
-
- State1 = ?update_max_time(?diff_time(T1,_T0),State),
- {dropped,?update_other(flushed,FLUSHED,NewFlushed,
- State1#{mode => ?set_mode(ModeTab,async),
- last_qlen => 0,
- last_log_ts => T1})}.
-
-%% this function is called to write to disk_log
-write(Name, Mode, T1, Bin, _CallOrCast,
- State = #{mode_tab := ModeTab,
- dl_sync := DLSync,
- dl_sync_int := DLSyncInt,
- last_qlen := LastQLen,
- last_log_ts := T0}) ->
- %% check if we need to limit the number of writes
- %% during a burst of log events
- {DoWrite,BurstWinT,BurstMsgCount} = logger_h_common:limit_burst(State),
-
- %% only send a synhrounous event to the disk_log process
- %% every DLSyncInt time, to give the handler time between
- %% writes so it can keep up with incoming messages
- {Status,LastQLen1,State1} =
- if DoWrite, DLSync == 0 ->
- ?observe(Name,{_CallOrCast,1}),
- NewState = disk_log_write(Name, Bin, State),
- {ok, element(2,process_info(self(),message_queue_len)),
- NewState};
- DoWrite ->
- ?observe(Name,{_CallOrCast,1}),
- NewState = disk_log_write(Name, Bin, State),
- {ok, LastQLen, NewState};
- not DoWrite ->
- ?observe(Name,{flushed,1}),
- {dropped, LastQLen, State}
- end,
-
- %% Check if the time since the previous log event is long enough -
- %% and the queue length small enough - to assume the mailbox has
- %% been emptied, and if so, do filesync operation and reset mode to
- %% async. Note that this is the best we can do to detect an idle
- %% handler without setting a timer after each log call/cast. If the
- %% time between two consecutive log events is fast and no new
- %% event comes in after the last one, idle state won't be detected!
- Time = ?diff_time(T1,T0),
- {Mode1,BurstMsgCount1,State2} =
- if (LastQLen1 < ?FILESYNC_OK_QLEN) andalso
- (Time > ?IDLE_DETECT_TIME_USEC) ->
- {?change_mode(ModeTab,Mode,async), 0, disk_log_sync(Name,State1)};
- true ->
- {Mode, BurstMsgCount,State1}
- end,
-
- State3 =
- ?update_calls_or_casts(_CallOrCast,1,State2),
- State4 =
- ?update_max_time(Time,
- State3#{mode => Mode1,
- last_qlen := LastQLen1,
- last_log_ts => T1,
- burst_win_ts => BurstWinT,
- burst_msg_count => BurstMsgCount1,
- dl_sync => ?update_dl_sync(DLSync,DLSyncInt)}),
- {Status,State4}.
-
-
-log_handler_info(Name, Format, Args, State) ->
- Config =
- case logger:get_handler_config(Name) of
- {ok,Conf} -> Conf;
- _ -> #{formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}
- end,
- Meta = #{time=>erlang:system_time(microsecond)},
- Bin = logger_h_common:log_to_binary(#{level => notice,
- msg => {Format,Args},
- meta => Meta}, Config),
- _ = disk_log_write(Name, Bin, State),
- ok.
-
-
open_disk_log(Name, File, Type, MaxNoBytes, MaxNoFiles) ->
case filelib:ensure_dir(File) of
ok ->
@@ -681,43 +246,26 @@ close_disk_log(Name, _) ->
_ = disk_log:lclose(Name),
ok.
-disk_log_write(Name, Bin, State) ->
- case ?disk_log_blog(Name, Bin) of
- ok ->
- State#{prev_log_result => ok, last_op => write};
- LogError ->
- _ = case maps:get(prev_log_result, State) of
- LogError ->
- %% don't report same error twice
- ok;
- _ ->
- LogOpts = maps:get(log_opts, State),
- logger_h_common:error_notify({Name,log,
- LogOpts,
- LogError})
- end,
- State#{prev_log_result => LogError}
- end.
-
-disk_log_sync(Name, State) ->
- case ?disk_log_sync(Name) of
- ok ->
- State#{prev_sync_result => ok, last_op => sync};
- SyncError ->
- _ = case maps:get(prev_sync_result, State) of
- SyncError ->
- %% don't report same error twice
- ok;
- _ ->
- LogOpts = maps:get(log_opts, State),
- logger_h_common:error_notify({Name,filesync,
- LogOpts,
- SyncError})
- end,
- State#{prev_sync_result => SyncError}
- end.
+disk_log_write(Name, sync, Bin) ->
+ disk_log:blog(Name, Bin);
+disk_log_write(Name, async, Bin) ->
+ disk_log:balog(Name, Bin).
+
+%%%-----------------------------------------------------------------
+%%% Print error messages, but don't repeat the same message
+maybe_notify_error(Name, Op, Result, Key, #{log_opts:=LogOpts}=State) ->
+ {Result,error_notify_new({Name, Op, LogOpts, Result}, Result, Key, State)}.
-error_notify_new(Info,Info, _Term) ->
+maybe_notify_status(Name, Log, Info, Key, State) ->
+ error_notify_new({disk_log, Name, Log, Info}, Info, Key, State).
+
+error_notify_new(Term, What, Key, State) ->
+ error_notify_new(What, maps:get(Key,State), Term),
+ State#{Key => What}.
+
+error_notify_new(ok,_Prev,_Term) ->
+ ok;
+error_notify_new(Same,Same,_Term) ->
ok;
-error_notify_new(_Info0,_Info1, Term) ->
+error_notify_new(_New,_Prev,Term) ->
logger_h_common:error_notify(Term).
diff --git a/lib/kernel/src/logger_h_common.erl b/lib/kernel/src/logger_h_common.erl
index 38ac7d8ffc..74a2d158fc 100644
--- a/lib/kernel/src/logger_h_common.erl
+++ b/lib/kernel/src/logger_h_common.erl
@@ -18,26 +18,469 @@
%% %CopyrightEnd%
%%
-module(logger_h_common).
+-behaviour(gen_server).
-include("logger_h_common.hrl").
-include("logger_internal.hrl").
--export([log_to_binary/2,
- check_common_config/1,
- call_cast_or_drop/4,
- check_load/1,
- limit_burst/1,
- kill_if_choked/5,
- flush_log_events/0,
- flush_log_events/1,
- handler_exit/2,
- set_restart_flag/2,
- unset_restart_flag/2,
- cancel_timer/1,
- stop_or_restart/3,
- overload_levels_ok/1,
- error_notify/1,
- info_notify/1]).
+%% API
+-export([start_link/1, info/2, filesync/2, reset/2]).
+
+%% gen_server and proc_lib callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%% logger callbacks
+-export([log/2, adding_handler/1, removing_handler/1, changing_config/3,
+ filter_config/1]).
+
+%% Library functions for handlers
+-export([error_notify/1]).
+
+%%%-----------------------------------------------------------------
+-define(CONFIG_KEYS,[sync_mode_qlen,
+ drop_mode_qlen,
+ flush_qlen,
+ burst_limit_enable,
+ burst_limit_max_count,
+ burst_limit_window_time,
+ overload_kill_enable,
+ overload_kill_qlen,
+ overload_kill_mem_size,
+ overload_kill_restart_after,
+ filesync_repeat_interval]).
+-define(READ_ONLY_KEYS,[handler_pid,mode_tab]).
+
+%%%-----------------------------------------------------------------
+%%% API
+
+%% This function is called by the logger_sup supervisor
+start_link(Args) ->
+ proc_lib:start_link(?MODULE,init,[Args]).
+
+filesync(Module, Name) ->
+ call(Module, Name, filesync).
+
+info(Module, Name) ->
+ call(Module, Name, info).
+
+reset(Module, Name) ->
+ call(Module, Name, reset).
+
+%%%-----------------------------------------------------------------
+%%% Handler being added
+adding_handler(#{id:=Name,module:=Module}=Config) ->
+ HConfig0 = maps:get(config, Config, #{}),
+ HandlerConfig0 = maps:without(?CONFIG_KEYS,HConfig0),
+ case Module:check_config(Name,set,undefined,HandlerConfig0) of
+ {ok,HandlerConfig} ->
+ ModifiedCommon = maps:with(?CONFIG_KEYS,HandlerConfig),
+ CommonConfig0 = maps:with(?CONFIG_KEYS,HConfig0),
+ CommonConfig = maps:merge(
+ maps:merge(get_default_config(), CommonConfig0),
+ ModifiedCommon),
+ case check_config(CommonConfig) of
+ ok ->
+ HConfig = maps:merge(CommonConfig,HandlerConfig),
+ start(Config#{config => HConfig});
+ {error,Faulty} ->
+ {error,{invalid_config,Module,Faulty}}
+ end;
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Handler being removed
+removing_handler(#{id:=Name, module:=Module}) ->
+ case whereis(?name_to_reg_name(Module,Name)) of
+ undefined ->
+ ok;
+ Pid ->
+ %% We don't want to do supervisor:terminate_child here
+ %% since we need to distinguish this explicit stop from a
+ %% system termination in order to avoid circular attempts
+ %% at removing the handler (implying deadlocks and
+ %% timeouts).
+ %% And we don't need to do supervisor:delete_child, since
+ %% the restart type is temporary, which means that the
+ %% child specification is automatically removed from the
+ %% supervisor when the process dies.
+ _ = gen_server:call(Pid, stop),
+ ok
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Updating handler config
+changing_config(SetOrUpdate,
+ #{id:=Name,config:=OldHConfig,module:=Module},
+ NewConfig0) ->
+ NewHConfig0 = maps:get(config, NewConfig0, #{}),
+ OldHandlerConfig = maps:without(?CONFIG_KEYS++?READ_ONLY_KEYS,OldHConfig),
+ NewHandlerConfig0 = maps:without(?CONFIG_KEYS++?READ_ONLY_KEYS,NewHConfig0),
+ case Module:check_config(Name, SetOrUpdate,
+ OldHandlerConfig,NewHandlerConfig0) of
+ {ok, NewHandlerConfig} ->
+ ModifiedCommon = maps:with(?CONFIG_KEYS,NewHandlerConfig),
+ NewCommonConfig0 = maps:with(?CONFIG_KEYS,NewHConfig0),
+ CommonDefault =
+ case SetOrUpdate of
+ set ->
+ get_default_config();
+ update ->
+ maps:with(?CONFIG_KEYS,OldHConfig)
+ end,
+ NewCommonConfig = maps:merge(
+ maps:merge(CommonDefault,NewCommonConfig0),
+ ModifiedCommon),
+ case check_config(NewCommonConfig) of
+ ok ->
+ ReadOnly = maps:with(?READ_ONLY_KEYS,OldHConfig),
+ NewHConfig = maps:merge(
+ maps:merge(NewCommonConfig,NewHandlerConfig),
+ ReadOnly),
+ NewConfig = NewConfig0#{config=>NewHConfig},
+ HPid = maps:get(handler_pid,OldHConfig),
+ case call(HPid, {change_config,NewConfig}) of
+ ok -> {ok,NewConfig};
+ Error -> Error
+ end;
+ {error,Faulty} ->
+ {error,{invalid_config,Module,Faulty}}
+ end;
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Log a string or report
+-spec log(LogEvent, Config) -> ok when
+ LogEvent :: logger:log_event(),
+ Config :: logger:handler_config().
+
+log(LogEvent, Config = #{id := Name,
+ config := #{handler_pid := HPid,
+ mode_tab := ModeTab}}) ->
+ %% if the handler has crashed, we must drop this event
+ %% and hope the handler restarts so we can try again
+ true = is_process_alive(HPid),
+ Bin = log_to_binary(LogEvent, Config),
+ call_cast_or_drop(Name, HPid, ModeTab, Bin).
+
+%%%-----------------------------------------------------------------
+%%% Remove internal fields from configuration
+filter_config(#{config:=HConfig}=Config) ->
+ Config#{config=>maps:without(?READ_ONLY_KEYS,HConfig)}.
+
+%%%-----------------------------------------------------------------
+%%% Start the handler process
+%%%
+%%% The process must always exist if the handler is registered with
+%%% logger (and must not exist if the handler is not registered).
+%%%
+%%% The handler process is linked to logger_sup, which is part of the
+%%% kernel application's supervision tree.
+start(#{id := Name} = Config0) ->
+ ChildSpec =
+ #{id => Name,
+ start => {?MODULE, start_link, [Config0]},
+ restart => temporary,
+ shutdown => 2000,
+ type => worker,
+ modules => [?MODULE]},
+ case supervisor:start_child(logger_sup, ChildSpec) of
+ {ok,Pid,Config} ->
+ ok = logger_handler_watcher:register_handler(Name,Pid),
+ {ok,Config};
+ Error ->
+ Error
+ end.
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+init(#{id := Name, module := Module,
+ formatter := Formatter, config := HConfig0} = Config0) ->
+ RegName = ?name_to_reg_name(Module,Name),
+ register(RegName, self()),
+ process_flag(trap_exit, true),
+ process_flag(message_queue_data, off_heap),
+
+ ?init_test_hooks(),
+ ?start_observation(Name),
+
+ case Module:init(Name, HConfig0) of
+ {ok,HState} ->
+ try ets:new(Name, [public]) of
+ ModeTab ->
+ ?set_mode(ModeTab, async),
+ T0 = ?timestamp(),
+ HConfig = HConfig0#{handler_pid => self(),
+ mode_tab => ModeTab},
+ Config = Config0#{config => HConfig},
+ proc_lib:init_ack({ok,self(),Config}),
+ %% Storing common config in state to avoid copying
+ %% (sending) the config data for each log message
+ CommonConfig = maps:with(?CONFIG_KEYS,HConfig),
+ State =
+ ?merge_with_stats(
+ CommonConfig#{id => Name,
+ module => Module,
+ mode_tab => ModeTab,
+ mode => async,
+ ctrl_sync_count =>
+ ?CONTROLLER_SYNC_INTERVAL,
+ last_qlen => 0,
+ last_log_ts => T0,
+ last_op => sync,
+ burst_win_ts => T0,
+ burst_msg_count => 0,
+ formatter => Formatter,
+ handler_state => HState}),
+ State1 = set_repeated_filesync(State),
+ unset_restart_flag(State1),
+ gen_server:enter_loop(?MODULE, [], State1)
+ catch
+ _:Error ->
+ unregister(RegName),
+ error_notify({init_handler,Name,Error}),
+ proc_lib:init_ack(Error)
+ end;
+ Error ->
+ unregister(RegName),
+ error_notify({init_handler,Name,Error}),
+ proc_lib:init_ack(Error)
+ end.
+
+%% This is the synchronous log event.
+handle_call({log, Bin}, _From, State) ->
+ {Result,State1} = do_log(Bin, call, State),
+ %% Result == ok | dropped
+ {reply,Result, State1};
+
+handle_call(filesync, _From, State = #{id := Name,
+ module := Module,
+ handler_state := HandlerState}) ->
+ {Result,HandlerState1} = Module:filesync(Name,sync,HandlerState),
+ {reply, Result, State#{handler_state=>HandlerState1, last_op=>sync}};
+
+handle_call({change_config, #{formatter:=Formatter, config:=NewHConfig}}, _From,
+ State = #{filesync_repeat_interval := FSyncInt0}) ->
+ %% In the future, if handler_state must be updated due to config
+ %% change, then we need to add a callback to Module here.
+ CommonConfig = maps:with(?CONFIG_KEYS,NewHConfig),
+ State1 = maps:merge(State, CommonConfig),
+ State2 =
+ case maps:get(filesync_repeat_interval, NewHConfig) of
+ FSyncInt0 ->
+ State1;
+ _FSyncInt1 ->
+ set_repeated_filesync(cancel_repeated_filesync(State1))
+ end,
+ {reply, ok, State2#{formatter:=Formatter}};
+
+handle_call(info, _From, State) ->
+ {reply, State, State};
+
+handle_call(reset, _From,
+ #{id:=Name,module:=Module,handler_state:=HandlerState}=State) ->
+ State1 = ?merge_with_stats(State),
+ {reply, ok, State1#{last_qlen => 0,
+ last_log_ts => ?timestamp(),
+ handler_state => Module:reset_state(Name,HandlerState)}};
+
+handle_call(stop, _From, State) ->
+ {stop, {shutdown,stopped}, ok, State}.
+
+%% This is the asynchronous log event.
+handle_cast({log, Bin}, State) ->
+ {_,State1} = do_log(Bin, cast, State),
+ {noreply, State1};
+
+%% If FILESYNC_REPEAT_INTERVAL is set to a millisec value, this
+%% clause gets called repeatedly by the handler. In order to
+%% guarantee that a filesync *always* happens after the last log
+%% event, the repeat operation must be active!
+handle_cast(repeated_filesync,State = #{filesync_repeat_interval := no_repeat}) ->
+ %% This clause handles a race condition which may occur when
+ %% config changes filesync_repeat_interval from an integer value
+ %% to no_repeat.
+ {noreply,State};
+handle_cast(repeated_filesync,
+ State = #{id := Name,
+ module := Module,
+ handler_state := HandlerState,
+ last_op := LastOp}) ->
+ State1 =
+ if LastOp == sync ->
+ State;
+ true ->
+ {_,HS} = Module:filesync(Name, async, HandlerState),
+ State#{handler_state => HS, last_op => sync}
+ end,
+ {noreply,set_repeated_filesync(State1)}.
+
+handle_info(Info, #{id := Name, module := Module,
+ handler_state := HandlerState} = State) ->
+ {noreply,State#{handler_state => Module:handle_info(Name,Info,HandlerState)}}.
+
+terminate(Reason, State = #{id := Name,
+ module := Module,
+ handler_state := HandlerState}) ->
+ _ = cancel_repeated_filesync(State),
+ _ = Module:terminate(Name, Reason, HandlerState),
+ ok = stop_or_restart(Name, Reason, State),
+ unregister(?name_to_reg_name(Module, Name)),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+
+%%%-----------------------------------------------------------------
+%%% Internal functions
+call(Module, Name, Op) when is_atom(Name) ->
+ call(?name_to_reg_name(Module,Name), Op);
+call(_, Name, Op) ->
+ {error,{badarg,{Op,[Name]}}}.
+
+call(Server, Msg) ->
+ try
+ gen_server:call(Server, Msg, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end.
+
+%% check for overload between every event (and set Mode to async,
+%% sync or drop accordingly), but never flush the whole mailbox
+%% before LogWindowSize events have been handled
+do_log(Bin, CallOrCast, State = #{id:=Name, mode:=Mode0}) ->
+ T1 = ?timestamp(),
+
+ %% check if the handler is getting overloaded, or if it's
+ %% recovering from overload (the check must be done for each
+ %% event to react quickly to large bursts of events and
+ %% to ensure that the handler can never end up in drop mode
+ %% with an empty mailbox, which would stop operation)
+ {Mode1,QLen,Mem,State1} = check_load(State),
+
+ if (Mode1 == drop) andalso (Mode0 =/= drop) ->
+ log_handler_info(Name, "Handler ~p switched to drop mode",
+ [Name], State);
+ (Mode0 == drop) andalso ((Mode1 == async) orelse (Mode1 == sync)) ->
+ log_handler_info(Name, "Handler ~p switched to ~w mode",
+ [Name,Mode1], State);
+ true ->
+ ok
+ end,
+
+ %% kill the handler if it can't keep up with the load
+ kill_if_choked(Name, QLen, Mem, State),
+
+ if Mode1 == flush ->
+ flush(Name, QLen, T1, State1);
+ true ->
+ write(Name, Mode1, T1, Bin, CallOrCast, State1)
+ end.
+
+%% this clause is called by do_log/3 after an overload check
+%% has been performed, where QLen > FlushQLen
+flush(Name, _QLen0, T1, State=#{last_log_ts := _T0, mode_tab := ModeTab}) ->
+ %% flush messages in the mailbox (a limited number in
+ %% order to not cause long delays)
+ NewFlushed = flush_log_events(?FLUSH_MAX_N),
+
+ %% write info in log about flushed messages
+ log_handler_info(Name, "Handler ~p flushed ~w log events",
+ [Name,NewFlushed], State),
+
+ %% because of the receive loop when flushing messages, the
+ %% handler will be scheduled out often and the mailbox could
+ %% grow very large, so we'd better check the queue again here
+ {_,_QLen1} = process_info(self(), message_queue_len),
+ ?observe(Name,{max_qlen,_QLen1}),
+
+ %% Add 1 for the current log event
+ ?observe(Name,{flushed,NewFlushed+1}),
+
+ State1 = ?update_max_time(?diff_time(T1,_T0),State),
+ State2 = ?update_max_qlen(_QLen1,State1),
+ {dropped,?update_other(flushed,FLUSHED,NewFlushed,
+ State2#{mode => ?set_mode(ModeTab,async),
+ last_qlen => 0,
+ last_log_ts => T1})}.
+
+%% this clause is called to write to file
+write(Name, Mode, T1, Bin, _CallOrCast,
+ State = #{module := Module,
+ handler_state := HandlerState,
+ mode_tab := ModeTab,
+ ctrl_sync_count := CtrlSync,
+ last_qlen := LastQLen,
+ last_log_ts := T0}) ->
+ %% check if we need to limit the number of writes
+ %% during a burst of log events
+ {DoWrite,State1} = limit_burst(State),
+
+ %% only log synhrounously every ?CONTROLLER_SYNC_INTERVAL time, to
+ %% give the handler time between writes so it can keep up with
+ %% incoming messages
+ {Result,LastQLen1,HandlerState1} =
+ if DoWrite, CtrlSync == 0 ->
+ ?observe(Name,{_CallOrCast,1}),
+ {_,HS1} = Module:write(Name, sync, Bin, HandlerState),
+ {ok,element(2, process_info(self(), message_queue_len)),HS1};
+ DoWrite ->
+ ?observe(Name,{_CallOrCast,1}),
+ {_,HS1} = Module:write(Name, async, Bin, HandlerState),
+ {ok,LastQLen,HS1};
+ not DoWrite ->
+ ?observe(Name,{flushed,1}),
+ {dropped,LastQLen,HandlerState}
+ end,
+
+ %% Check if the time since the previous log event is long enough -
+ %% and the queue length small enough - to assume the mailbox has
+ %% been emptied, and if so, do filesync operation and reset mode to
+ %% async. Note that this is the best we can do to detect an idle
+ %% handler without setting a timer after each log call/cast. If the
+ %% time between two consecutive log events is fast and no new
+ %% event comes in after the last one, idle state won't be detected!
+ Time = ?diff_time(T1,T0),
+ State2 =
+ if (LastQLen1 < ?FILESYNC_OK_QLEN) andalso
+ (Time > ?IDLE_DETECT_TIME_USEC) ->
+ {_,HS2} = Module:filesync(Name,async,HandlerState),
+ State1#{mode => ?change_mode(ModeTab, Mode, async),
+ burst_msg_count => 0,
+ handler_state => HS2};
+ true ->
+ State1#{mode => Mode, handler_state => HandlerState1}
+ end,
+ State3 = ?update_calls_or_casts(_CallOrCast,1,State2),
+ State4 = ?update_max_qlen(LastQLen1,State3),
+ State5 =
+ ?update_max_time(Time,
+ State4#{last_qlen := LastQLen1,
+ last_log_ts => T1,
+ last_op => write,
+ ctrl_sync_count =>
+ if CtrlSync==0 -> ?CONTROLLER_SYNC_INTERVAL;
+ true -> CtrlSync-1
+ end}),
+ {Result,State5}.
+
+log_handler_info(Name, Format, Args, #{module:=Module,
+ formatter:=Formatter,
+ handler_state:=HandlerState}) ->
+ Config = #{formatter=>Formatter},
+ Meta = #{time=>erlang:system_time(microsecond)},
+ Bin = log_to_binary(#{level => notice,
+ msg => {Format,Args},
+ meta => Meta}, Config),
+ _ = Module:write(Name, async, Bin, HandlerState),
+ ok.
%%%-----------------------------------------------------------------
%%% Convert log data on any form to binary
@@ -94,46 +537,65 @@ string_to_binary(String) ->
throw(Error)
end.
-
%%%-----------------------------------------------------------------
%%% Check that the configuration term is valid
-check_common_config({mode_tab,_Tid}) ->
- valid;
-check_common_config({handler_pid,Pid}) when is_pid(Pid) ->
- valid;
-
-check_common_config({sync_mode_qlen,N}) when is_integer(N) ->
- valid;
-check_common_config({drop_mode_qlen,N}) when is_integer(N) ->
- valid;
-check_common_config({flush_qlen,N}) when is_integer(N) ->
- valid;
-
-check_common_config({burst_limit_enable,Bool}) when Bool == true;
- Bool == false ->
- valid;
-check_common_config({burst_limit_max_count,N}) when is_integer(N) ->
- valid;
-check_common_config({burst_limit_window_time,N}) when is_integer(N) ->
- valid;
-
-check_common_config({overload_kill_enable,Bool}) when Bool == true;
- Bool == false ->
- valid;
-check_common_config({overload_kill_qlen,N}) when is_integer(N) ->
- valid;
-check_common_config({overload_kill_mem_size,N}) when is_integer(N) ->
- valid;
-check_common_config({overload_kill_restart_after,NorA}) when is_integer(NorA);
- NorA == infinity ->
- valid;
-
-check_common_config({filesync_repeat_interval,NorA}) when is_integer(NorA);
- NorA == no_repeat ->
- valid;
-check_common_config(_) ->
- invalid.
+check_config(Config) when is_map(Config) ->
+ case check_common_config(maps:to_list(Config)) of
+ ok ->
+ case overload_levels_ok(Config) of
+ true ->
+ ok;
+ false ->
+ Faulty = maps:with([sync_mode_qlen,
+ drop_mode_qlen,
+ flush_qlen],Config),
+ {error,{invalid_levels,Faulty}}
+ end;
+ Error ->
+ Error
+ end.
+
+check_common_config([{sync_mode_qlen,N}|Config]) when is_integer(N) ->
+ check_common_config(Config);
+check_common_config([{drop_mode_qlen,N}|Config]) when is_integer(N) ->
+ check_common_config(Config);
+check_common_config([{flush_qlen,N}|Config]) when is_integer(N) ->
+ check_common_config(Config);
+check_common_config([{burst_limit_enable,Bool}|Config]) when is_boolean(Bool) ->
+ check_common_config(Config);
+check_common_config([{burst_limit_max_count,N}|Config]) when is_integer(N) ->
+ check_common_config(Config);
+check_common_config([{burst_limit_window_time,N}|Config]) when is_integer(N) ->
+ check_common_config(Config);
+check_common_config([{overload_kill_enable,Bool}|Config]) when is_boolean(Bool) ->
+ check_common_config(Config);
+check_common_config([{overload_kill_qlen,N}|Config]) when is_integer(N) ->
+ check_common_config(Config);
+check_common_config([{overload_kill_mem_size,N}|Config]) when is_integer(N) ->
+ check_common_config(Config);
+check_common_config([{overload_kill_restart_after,NorA}|Config])
+ when is_integer(NorA); NorA == infinity ->
+ check_common_config(Config);
+check_common_config([{filesync_repeat_interval,NorA}|Config])
+ when is_integer(NorA); NorA == no_repeat ->
+ check_common_config(Config);
+check_common_config([{Key,Value}|_]) ->
+ {error,#{Key=>Value}};
+check_common_config([]) ->
+ ok.
+get_default_config() ->
+ #{sync_mode_qlen => ?SYNC_MODE_QLEN,
+ drop_mode_qlen => ?DROP_MODE_QLEN,
+ flush_qlen => ?FLUSH_QLEN,
+ burst_limit_enable => ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count => ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time => ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable => ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen => ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size => ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after => ?OVERLOAD_KILL_RESTART_AFTER,
+ filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}.
%%%-----------------------------------------------------------------
%%% Overload Protection
@@ -146,15 +608,13 @@ call_cast_or_drop(_Name, HandlerPid, ModeTab, Bin) ->
async ->
gen_server:cast(HandlerPid, {log,Bin});
sync ->
- try gen_server:call(HandlerPid, {log,Bin}, ?DEFAULT_CALL_TIMEOUT) of
- %% if return value from call == dropped, the
- %% message has been flushed by handler and should
- %% therefore not be counted as dropped in stats
- ok -> ok;
- dropped -> ok
- catch
- _:{timeout,_} ->
- ?observe(_Name,{dropped,1})
+ case call(HandlerPid, {log,Bin}) of
+ ok ->
+ ok;
+ _Other ->
+ %% dropped or {error,handler_busy}
+ ?observe(_Name,{dropped,1}),
+ ok
end;
drop ->
?observe(_Name,{dropped,1})
@@ -165,10 +625,8 @@ call_cast_or_drop(_Name, HandlerPid, ModeTab, Bin) ->
end,
ok.
-handler_exit(_Name, Reason) ->
- exit(Reason).
-
-set_restart_flag(Name, Module) ->
+set_restart_flag(#{id := Name, module := Module} = State) ->
+ log_handler_info(Name, "Handler ~p overloaded and stopping", [Name], State),
Flag = list_to_atom(lists:concat([Module,"_",Name,"_restarting"])),
spawn(fun() ->
register(Flag, self()),
@@ -176,14 +634,14 @@ set_restart_flag(Name, Module) ->
end),
ok.
-unset_restart_flag(Name, Module) ->
+unset_restart_flag(#{id := Name, module := Module} = State) ->
Flag = list_to_atom(lists:concat([Module,"_",Name,"_restarting"])),
case whereis(Flag) of
undefined ->
- false;
+ ok;
Pid ->
exit(Pid, kill),
- true
+ log_handler_info(Name, "Handler ~p restarted", [Name], State)
end.
check_load(State = #{id:=_Name, mode_tab := ModeTab, mode := Mode,
@@ -221,46 +679,41 @@ check_load(State = #{id:=_Name, mode_tab := ModeTab, mode := Mode,
?update_other(flushes,FLUSHES,_NewFlushes,
State1#{last_qlen => QLen})}.
-limit_burst(#{burst_limit_enable := false}) ->
- {true,0,0};
+limit_burst(#{burst_limit_enable := false}=State) ->
+ {true,State};
limit_burst(#{burst_win_ts := BurstWinT0,
burst_msg_count := BurstMsgCount,
burst_limit_window_time := BurstLimitWinTime,
- burst_limit_max_count := BurstLimitMaxCnt}) ->
+ burst_limit_max_count := BurstLimitMaxCnt} = State) ->
if (BurstMsgCount >= BurstLimitMaxCnt) ->
%% the limit for allowed messages has been reached
BurstWinT1 = ?timestamp(),
case ?diff_time(BurstWinT1,BurstWinT0) of
BurstCheckTime when BurstCheckTime < (BurstLimitWinTime*1000) ->
%% we're still within the burst time frame
- {false,BurstWinT0,BurstMsgCount};
+ {false,?update_other(burst_drops,BURSTS,1,State)};
_BurstCheckTime ->
%% burst time frame passed, reset counters
- {true,BurstWinT1,0}
+ {true,State#{burst_win_ts => BurstWinT1,
+ burst_msg_count => 0}}
end;
true ->
%% the limit for allowed messages not yet reached
- {true,BurstWinT0,BurstMsgCount+1}
+ {true,State#{burst_win_ts => BurstWinT0,
+ burst_msg_count => BurstMsgCount+1}}
end.
-kill_if_choked(Name, QLen, Mem, HandlerMod,
- State = #{overload_kill_enable := KillIfOL,
- overload_kill_qlen := OLKillQLen,
- overload_kill_mem_size := OLKillMem}) ->
+kill_if_choked(Name, QLen, Mem, State = #{overload_kill_enable := KillIfOL,
+ overload_kill_qlen := OLKillQLen,
+ overload_kill_mem_size := OLKillMem}) ->
if KillIfOL andalso
((QLen > OLKillQLen) orelse (Mem > OLKillMem)) ->
- HandlerMod:log_handler_info(Name,
- "Handler ~p overloaded and stopping",
- [Name], State),
- set_restart_flag(Name, HandlerMod),
- handler_exit(Name, {shutdown,{overloaded,Name,QLen,Mem}});
+ set_restart_flag(State),
+ exit({shutdown,{overloaded,Name,QLen,Mem}});
true ->
ok
end.
-flush_log_events() ->
- flush_log_events(-1).
-
flush_log_events(Limit) ->
process_flag(priority, high),
Flushed = flush_log_events(0, Limit),
@@ -283,16 +736,29 @@ flush_log_events(N, Limit) ->
0 -> N
end.
-cancel_timer(TRef) when is_atom(TRef) -> ok;
-cancel_timer(TRef) -> timer:cancel(TRef).
+set_repeated_filesync(#{filesync_repeat_interval:=FSyncInt} = State)
+ when is_integer(FSyncInt) ->
+ {ok,TRef} = timer:apply_after(FSyncInt, gen_server, cast,
+ [self(),repeated_filesync]),
+ State#{rep_sync_tref=>TRef};
+set_repeated_filesync(State) ->
+ State.
+cancel_repeated_filesync(State) ->
+ case maps:take(rep_sync_tref,State) of
+ {TRef,State1} ->
+ _ = timer:cancel(TRef),
+ State1;
+ error ->
+ State
+ end.
stop_or_restart(Name, {shutdown,Reason={overloaded,_Name,_QLen,_Mem}},
#{overload_kill_restart_after := RestartAfter}) ->
%% If we're terminating because of an overload situation (see
- %% logger_h_common:kill_if_choked/4), we need to remove the handler
- %% and set a restart timer. A separate process must perform this
- %% in order to avoid deadlock.
+ %% kill_if_choked/4), we need to remove the handler and set a
+ %% restart timer. A separate process must perform this in order to
+ %% avoid deadlock.
HandlerPid = self(),
ConfigResult = logger:get_handler_config(Name),
RemoveAndRestart =
@@ -306,8 +772,11 @@ stop_or_restart(Name, {shutdown,Reason={overloaded,_Name,_QLen,_Mem}},
exit(HandlerPid, kill)
end,
case ConfigResult of
- {ok,#{module:=HMod}=HConfig} when is_integer(RestartAfter) ->
+ {ok,#{module:=HMod}=HConfig0} when is_integer(RestartAfter) ->
_ = logger:remove_handler(Name),
+ HConfig = try HMod:filter_config(HConfig0)
+ catch _:_ -> HConfig0
+ end,
_ = timer:apply_after(RestartAfter, logger, add_handler,
[Name,HMod,HConfig]);
{ok,_} ->
@@ -331,6 +800,3 @@ overload_levels_ok(HandlerConfig) ->
error_notify(Term) ->
?internal_log(error, Term).
-
-info_notify(Term) ->
- ?internal_log(info, Term).
diff --git a/lib/kernel/src/logger_h_common.hrl b/lib/kernel/src/logger_h_common.hrl
index e0a7b6e3ca..261b0a6246 100644
--- a/lib/kernel/src/logger_h_common.hrl
+++ b/lib/kernel/src/logger_h_common.hrl
@@ -137,7 +137,7 @@
ets:insert(?TEST_HOOKS_TAB, {internal_log,{logger,internal_log}}),
ets:insert(?TEST_HOOKS_TAB, {file_write,ok}),
ets:insert(?TEST_HOOKS_TAB, {file_datasync,ok}),
- ets:insert(?TEST_HOOKS_TAB, {disk_log_blog,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_write,ok}),
ets:insert(?TEST_HOOKS_TAB, {disk_log_sync,ok})).
-define(set_internal_log(MOD_FUNC),
@@ -150,7 +150,7 @@
ets:insert(?TEST_HOOKS_TAB, {internal_log,{logger,internal_log}}),
ets:insert(?TEST_HOOKS_TAB, {file_write,ok}),
ets:insert(?TEST_HOOKS_TAB, {file_datasync,ok}),
- ets:insert(?TEST_HOOKS_TAB, {disk_log_blog,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_write,ok}),
ets:insert(?TEST_HOOKS_TAB, {disk_log_sync,ok})).
-define(internal_log(TYPE, TERM),
@@ -171,11 +171,11 @@
[{_,ERROR}] -> ERROR
catch _:_ -> file:datasync(DEVICE) end).
- -define(disk_log_blog(LOG, DATA),
- try ets:lookup(?TEST_HOOKS_TAB, disk_log_blog) of
- [{_,ok}] -> disk_log:blog(LOG, DATA);
+ -define(disk_log_write(LOG, MODE, DATA),
+ try ets:lookup(?TEST_HOOKS_TAB, disk_log_write) of
+ [{_,ok}] -> disk_log_write(LOG, MODE, DATA);
[{_,ERROR}] -> ERROR
- catch _:_ -> disk_log:blog(LOG, DATA) end).
+ catch _:_ -> disk_log_write(LOG, MODE, DATA) end).
-define(disk_log_sync(LOG),
try ets:lookup(?TEST_HOOKS_TAB, disk_log_sync) of
@@ -194,7 +194,7 @@
-define(internal_log(TYPE, TERM), logger:internal_log(TYPE, TERM)).
-define(file_write(DEVICE, DATA), file:write(DEVICE, DATA)).
-define(file_datasync(DEVICE), file:datasync(DEVICE)).
- -define(disk_log_blog(LOG, DATA), disk_log:blog(LOG, DATA)).
+ -define(disk_log_write(LOG, MODE, DATA), disk_log_write(LOG, MODE, DATA)).
-define(disk_log_sync(LOG), disk_log:sync(LOG)).
-define(DEFAULT_CALL_TIMEOUT, 10000).
-endif.
@@ -210,7 +210,7 @@
-ifdef(SAVE_STATS).
-define(merge_with_stats(STATE),
STATE#{flushes => 0, flushed => 0, drops => 0,
- casts => 0, calls => 0,
+ burst_drops => 0, casts => 0, calls => 0,
max_qlen => 0, max_time => 0}).
-define(update_max_qlen(QLEN, STATE),
diff --git a/lib/kernel/src/logger_server.erl b/lib/kernel/src/logger_server.erl
index a1d40f1123..b7735dbcf7 100644
--- a/lib/kernel/src/logger_server.erl
+++ b/lib/kernel/src/logger_server.erl
@@ -27,9 +27,13 @@
add_filter/2, remove_filter/2,
set_module_level/2, unset_module_level/0,
unset_module_level/1, cache_module_level/1,
- set_config/2, set_config/3, update_config/2,
+ set_config/2, set_config/3,
+ update_config/2, update_config/3,
update_formatter_config/2]).
+%% Helper
+-export([diff_maps/2]).
+
%% gen_server callbacks
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
terminate/2]).
@@ -105,12 +109,25 @@ cache_module_level(Module) ->
gen_server:cast(?SERVER,{cache_module_level,Module}).
set_config(Owner,Key,Value) ->
- update_config(Owner,#{Key=>Value}).
+ case sanity_check(Owner,Key,Value) of
+ ok ->
+ call({change_config,set,Owner,Key,Value});
+ Error ->
+ Error
+ end.
set_config(Owner,Config) ->
case sanity_check(Owner,Config) of
ok ->
- call({set_config,Owner,Config});
+ call({change_config,set,Owner,Config});
+ Error ->
+ Error
+ end.
+
+update_config(Owner,Key,Value) ->
+ case sanity_check(Owner,Key,Value) of
+ ok ->
+ call({change_config,update,Owner,Key,Value});
Error ->
Error
end.
@@ -118,7 +135,7 @@ set_config(Owner,Config) ->
update_config(Owner, Config) ->
case sanity_check(Owner,Config) of
ok ->
- call({update_config,Owner,Config});
+ call({change_config,update,Owner,Config});
Error ->
Error
end.
@@ -204,46 +221,72 @@ handle_call({add_filter,Id,Filter}, _From,#state{tid=Tid}=State) ->
handle_call({remove_filter,Id,FilterId}, _From, #state{tid=Tid}=State) ->
Reply = do_remove_filter(Tid,Id,FilterId),
{reply,Reply,State};
-handle_call({update_config,primary,NewConfig}, _From, #state{tid=Tid}=State) ->
+handle_call({change_config,SetOrUpd,primary,Config0}, _From,
+ #state{tid=Tid}=State) ->
+ {ok,#{handlers:=Handlers}=OldConfig} = logger_config:get(Tid,primary),
+ Default =
+ case SetOrUpd of
+ set -> default_config(primary);
+ update -> OldConfig
+ end,
+ Config = maps:merge(Default,Config0),
+ Reply = logger_config:set(Tid,primary,Config#{handlers=>Handlers}),
+ {reply,Reply,State};
+handle_call({change_config,_SetOrUpd,primary,Key,Value}, _From,
+ #state{tid=Tid}=State) ->
{ok,OldConfig} = logger_config:get(Tid,primary),
- Config = maps:merge(OldConfig,NewConfig),
- {reply,logger_config:set(Tid,primary,Config),State};
-handle_call({update_config,HandlerId,NewConfig}, From, #state{tid=Tid}=State) ->
+ Reply = logger_config:set(Tid,primary,OldConfig#{Key=>Value}),
+ {reply,Reply,State};
+handle_call({change_config,SetOrUpd,HandlerId,Config0}, From,
+ #state{tid=Tid}=State) ->
case logger_config:get(Tid,HandlerId) of
{ok,#{module:=Module}=OldConfig} ->
- Config = maps:merge(OldConfig,NewConfig),
- call_h_async(
- fun() ->
- call_h(Module,changing_config,[OldConfig,Config],
- {ok,Config})
- end,
- fun({ok,Config1}) ->
- logger_config:set(Tid,HandlerId,Config1);
- (Error) ->
- Error
- end,From,State);
- Error ->
- {reply,Error,State}
+ Default =
+ case SetOrUpd of
+ set -> default_config(HandlerId,Module);
+ update -> OldConfig
+ end,
+ Config = maps:merge(Default,Config0),
+ case check_config_change(OldConfig,Config) of
+ ok ->
+ call_h_async(
+ fun() ->
+ call_h(Module,changing_config,
+ [SetOrUpd,OldConfig,Config],
+ {ok,Config})
+ end,
+ fun({ok,Config1}) ->
+ logger_config:set(Tid,HandlerId,Config1);
+ (Error) ->
+ Error
+ end,From,State);
+ Error ->
+ {reply,Error,State}
+ end;
+ _ ->
+ {reply,{error,{not_found,HandlerId}},State}
end;
-handle_call({set_config,primary,Config0}, _From, #state{tid=Tid}=State) ->
- Config = maps:merge(default_config(primary),Config0),
- {ok,#{handlers:=Handlers}} = logger_config:get(Tid,primary),
- Reply = logger_config:set(Tid,primary,Config#{handlers=>Handlers}),
- {reply,Reply,State};
-handle_call({set_config,HandlerId,Config0}, From, #state{tid=Tid}=State) ->
+handle_call({change_config,SetOrUpd,HandlerId,Key,Value}, From,
+ #state{tid=Tid}=State) ->
case logger_config:get(Tid,HandlerId) of
{ok,#{module:=Module}=OldConfig} ->
- Config = maps:merge(default_config(HandlerId,Module),Config0),
- call_h_async(
- fun() ->
- call_h(Module,changing_config,[OldConfig,Config],
- {ok,Config})
- end,
- fun({ok,Config1}) ->
- logger_config:set(Tid,HandlerId,Config1);
- (Error) ->
- Error
- end,From,State);
+ Config = OldConfig#{Key=>Value},
+ case check_config_change(OldConfig,Config) of
+ ok ->
+ call_h_async(
+ fun() ->
+ call_h(Module,changing_config,
+ [SetOrUpd,OldConfig,Config],
+ {ok,Config})
+ end,
+ fun({ok,Config1}) ->
+ logger_config:set(Tid,HandlerId,Config1);
+ (Error) ->
+ Error
+ end,From,State);
+ Error ->
+ {reply,Error,State}
+ end;
_ ->
{reply,{error,{not_found,HandlerId}},State}
end;
@@ -320,7 +363,7 @@ call(Request) ->
true when
Action == add_handler; Action == remove_handler;
Action == add_filter; Action == remove_filter;
- Action == update_config; Action == set_config ->
+ Action == change_config ->
{error,{attempting_syncronous_call_to_self,Request}};
_ ->
gen_server:call(?SERVER,Request,?DEFAULT_LOGGER_CALL_TIMEOUT)
@@ -458,6 +501,15 @@ check_formatter({Mod,Config}) ->
check_formatter(Formatter) ->
throw({invalid_formatter,Formatter}).
+%% When changing configuration for a handler, the id and module fields
+%% can not be changed.
+check_config_change(#{id:=Id,module:=Module},#{id:=Id,module:=Module}) ->
+ ok;
+check_config_change(OldConfig,NewConfig) ->
+ {Old,New} = logger_server:diff_maps(maps:with([id,module],OldConfig),
+ maps:with([id,module],NewConfig)),
+ {error,{illegal_config_change,Old,New}}.
+
call_h(Module, Function, Args, DefRet) ->
%% Not calling code:ensure_loaded + erlang:function_exported here,
%% since in some rare terminal cases, the code_server might not
@@ -466,6 +518,11 @@ call_h(Module, Function, Args, DefRet) ->
catch
C:R:S ->
case {C,R,S} of
+ {error,undef,[{Module,Function=changing_config,Args,_}|_]}
+ when length(Args)=:=3 ->
+ %% Backwards compatible call, if changing_config/3
+ %% did not exist.
+ call_h(Module, Function, tl(Args), DefRet);
{error,undef,[{Module,Function,Args,_}|_]} ->
DefRet;
_ ->
@@ -525,3 +582,14 @@ call_h_reply(Unexpected,State) ->
{process,?SERVER},
{message,Unexpected}]),
{noreply,State}.
+
+%% Return two maps containing only the fields that differ.
+diff_maps(M1,M2) ->
+ diffs(lists:sort(maps:to_list(M1)),lists:sort(maps:to_list(M2)),#{},#{}).
+
+diffs([H|T1],[H|T2],D1,D2) ->
+ diffs(T1,T2,D1,D2);
+diffs([{K,V1}|T1],[{K,V2}|T2],D1,D2) ->
+ diffs(T1,T2,D1#{K=>V1},D2#{K=>V2});
+diffs([],[],D1,D2) ->
+ {D1,D2}.
diff --git a/lib/kernel/src/logger_simple_h.erl b/lib/kernel/src/logger_simple_h.erl
index 8b51dd8569..fe181722f3 100644
--- a/lib/kernel/src/logger_simple_h.erl
+++ b/lib/kernel/src/logger_simple_h.erl
@@ -50,7 +50,6 @@ removing_handler(#{id:=simple}) ->
ok;
Pid ->
Ref = erlang:monitor(process,Pid),
- unlink(Pid),
Pid ! stop,
receive {'DOWN',Ref,process,Pid,_} ->
ok
@@ -99,7 +98,11 @@ loop(Buffer) ->
replay_buffer(Buffer);
_ ->
ok
- end;
+ end,
+ %% Before stopping, we unlink the logger process to avoid
+ %% an unexpected EXIT message
+ unlink(whereis(logger)),
+ ok;
{log,#{msg:=_,meta:=#{time:=_}}=Log} ->
do_log(Log),
loop(update_buffer(Buffer,Log));
diff --git a/lib/kernel/src/logger_std_h.erl b/lib/kernel/src/logger_std_h.erl
index 66fa6b6ab6..63d1dbaba2 100644
--- a/lib/kernel/src/logger_std_h.erl
+++ b/lib/kernel/src/logger_std_h.erl
@@ -19,8 +19,6 @@
%%
-module(logger_std_h).
--behaviour(gen_server).
-
-include("logger.hrl").
-include("logger_internal.hrl").
-include("logger_h_common.hrl").
@@ -28,51 +26,28 @@
-include_lib("kernel/include/file.hrl").
%% API
--export([start_link/3, info/1, filesync/1, reset/1]).
+-export([info/1, filesync/1, reset/1]).
-%% gen_server and proc_lib callbacks
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
+%% logger_h_common callbacks
+-export([init/2, check_config/4, reset_state/2,
+ filesync/3, write/4, handle_info/3, terminate/3]).
%% logger callbacks
--export([log/2, adding_handler/1, removing_handler/1, changing_config/2]).
-
-%% handler internal
--export([log_handler_info/4]).
+-export([log/2, adding_handler/1, removing_handler/1, changing_config/3,
+ filter_config/1]).
%%%===================================================================
%%% API
%%%===================================================================
%%%-----------------------------------------------------------------
-%%% Start a standard handler process and link to caller.
-%%% This function is called by the kernel supervisor when this
-%%% handler process gets added
--spec start_link(Name, Config, HandlerState) -> {ok,Pid} | {error,Reason} when
- Name :: atom(),
- Config :: logger:handler_config(),
- HandlerState :: map(),
- Pid :: pid(),
- Reason :: term().
-
-start_link(Name, Config, HandlerState) ->
- proc_lib:start_link(?MODULE,init,[[Name,Config,HandlerState]]).
-
-%%%-----------------------------------------------------------------
%%%
-spec filesync(Name) -> ok | {error,Reason} when
Name :: atom(),
Reason :: handler_busy | {badarg,term()}.
-filesync(Name) when is_atom(Name) ->
- try
- gen_server:call(?name_to_reg_name(?MODULE,Name),
- filesync, ?DEFAULT_CALL_TIMEOUT)
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
filesync(Name) ->
- {error,{badarg,{filesync,[Name]}}}.
+ logger_h_common:filesync(?MODULE,Name).
%%%-----------------------------------------------------------------
%%%
@@ -81,15 +56,8 @@ filesync(Name) ->
Info :: term(),
Reason :: handler_busy | {badarg,term()}.
-info(Name) when is_atom(Name) ->
- try
- gen_server:call(?name_to_reg_name(?MODULE,Name),
- info, ?DEFAULT_CALL_TIMEOUT)
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
info(Name) ->
- {error,{badarg,{info,[Name]}}}.
+ logger_h_common:info(?MODULE,Name).
%%%-----------------------------------------------------------------
%%%
@@ -97,116 +65,42 @@ info(Name) ->
Name :: atom(),
Reason :: handler_busy | {badarg,term()}.
-reset(Name) when is_atom(Name) ->
- try
- gen_server:call(?name_to_reg_name(?MODULE,Name),
- reset, ?DEFAULT_CALL_TIMEOUT)
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
reset(Name) ->
- {error,{badarg,{reset,[Name]}}}.
-
+ logger_h_common:reset(?MODULE,Name).
%%%===================================================================
-%%% logger callbacks
+%%% logger callbacks - just forward to logger_h_common
%%%===================================================================
%%%-----------------------------------------------------------------
%%% Handler being added
-adding_handler(#{id:=Name}=Config) ->
- case check_config(adding, Config) of
- {ok, Config1} ->
- %% create initial handler state by merging defaults with config
- HConfig = maps:get(config, Config1, #{}),
- HState = maps:merge(get_init_state(), HConfig),
- case logger_h_common:overload_levels_ok(HState) of
- true ->
- start(Name, Config1, HState);
- false ->
- #{sync_mode_qlen := SMQL,
- drop_mode_qlen := DMQL,
- flush_qlen := FQL} = HState,
- {error,{invalid_levels,{SMQL,DMQL,FQL}}}
- end;
- Error ->
- Error
- end.
+-spec adding_handler(Config) -> {ok,Config} | {error,Reason} when
+ Config :: logger:handler_config(),
+ Reason :: term().
+
+adding_handler(Config) ->
+ logger_h_common:adding_handler(Config).
%%%-----------------------------------------------------------------
%%% Updating handler config
-changing_config(OldConfig=#{id:=Name, config:=OldHConfig},
- NewConfig=#{id:=Name}) ->
- #{type:=Type, handler_pid:=HPid, mode_tab:=ModeTab} = OldHConfig,
- NewHConfig = maps:get(config, NewConfig, #{}),
- case maps:get(type, NewHConfig, Type) of
- Type ->
- NewHConfig1 = NewHConfig#{type=>Type,
- handler_pid=>HPid,
- mode_tab=>ModeTab},
- changing_config1(HPid, OldConfig,
- NewConfig#{config=>NewHConfig1});
- _ ->
- {error,{illegal_config_change,OldConfig,NewConfig}}
- end;
-changing_config(OldConfig, NewConfig) ->
- {error,{illegal_config_change,OldConfig,NewConfig}}.
-
-changing_config1(HPid, OldConfig, NewConfig) ->
- case check_config(changing, NewConfig) of
- Result = {ok,NewConfig1} ->
- try gen_server:call(HPid, {change_config,OldConfig,NewConfig1},
- ?DEFAULT_CALL_TIMEOUT) of
- ok -> Result;
- HError -> HError
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
- Error ->
- Error
- end.
-
-check_config(adding, Config) ->
- %% Merge in defaults on handler level
- HConfig0 = maps:get(config, Config, #{}),
- HConfig = maps:merge(#{type => standard_io},
- HConfig0),
- case check_h_config(maps:to_list(HConfig)) of
- ok ->
- {ok,Config#{config=>HConfig}};
- Error ->
- Error
- end;
-check_config(changing, Config) ->
- HConfig = maps:get(config, Config, #{}),
- case check_h_config(maps:to_list(HConfig)) of
- ok -> {ok,Config};
- Error -> Error
- end.
-
-check_h_config([{type,Type} | Config]) when Type == standard_io;
- Type == standard_error ->
- check_h_config(Config);
-check_h_config([{type,{file,File}} | Config]) when is_list(File) ->
- check_h_config(Config);
-check_h_config([{type,{file,File,Modes}} | Config]) when is_list(File),
- is_list(Modes) ->
- check_h_config(Config);
-check_h_config([Other | Config]) ->
- case logger_h_common:check_common_config(Other) of
- valid ->
- check_h_config(Config);
- invalid ->
- {error,{invalid_config,?MODULE,Other}}
- end;
-check_h_config([]) ->
- ok.
+-spec changing_config(SetOrUpdate, OldConfig, NewConfig) ->
+ {ok,Config} | {error,Reason} when
+ SetOrUpdate :: set | update,
+ OldConfig :: logger:handler_config(),
+ NewConfig :: logger:handler_config(),
+ Config :: logger:handler_config(),
+ Reason :: term().
+changing_config(SetOrUpdate, OldConfig, NewConfig) ->
+ logger_h_common:changing_config(SetOrUpdate, OldConfig, NewConfig).
%%%-----------------------------------------------------------------
%%% Handler being removed
-removing_handler(#{id:=Name}) ->
- stop(Name).
+-spec removing_handler(Config) -> ok when
+ Config :: logger:handler_config().
+
+removing_handler(Config) ->
+ logger_h_common:removing_handler(Config).
%%%-----------------------------------------------------------------
%%% Log a string or report
@@ -214,192 +108,103 @@ removing_handler(#{id:=Name}) ->
LogEvent :: logger:log_event(),
Config :: logger:handler_config().
-log(LogEvent, Config = #{id := Name,
- config := #{handler_pid := HPid,
- mode_tab := ModeTab}}) ->
- %% if the handler has crashed, we must drop this event
- %% and hope the handler restarts so we can try again
- true = is_process_alive(HPid),
- Bin = logger_h_common:log_to_binary(LogEvent, Config),
- logger_h_common:call_cast_or_drop(Name, HPid, ModeTab, Bin).
+log(LogEvent, Config) ->
+ logger_h_common:log(LogEvent, Config).
-%%%===================================================================
-%%% gen_server callbacks
-%%%===================================================================
-
-init([Name, Config = #{config := HConfig},
- State0 = #{type := Type, file_ctrl_sync_int := FileCtrlSyncInt}]) ->
- RegName = ?name_to_reg_name(?MODULE,Name),
- register(RegName, self()),
- process_flag(trap_exit, true),
- process_flag(message_queue_data, off_heap),
+%%%-----------------------------------------------------------------
+%%% Remove internal fields from configuration
+-spec filter_config(Config) -> Config when
+ Config :: logger:handler_config().
- ?init_test_hooks(),
- ?start_observation(Name),
-
- case do_init(Name, Type) of
- {ok,InitState} ->
- try ets:new(Name, [public]) of
- ModeTab ->
- ?set_mode(ModeTab, async),
- State = maps:merge(State0, InitState),
- T0 = ?timestamp(),
- State1 =
- ?merge_with_stats(State#{
- mode_tab => ModeTab,
- mode => async,
- file_ctrl_sync => FileCtrlSyncInt,
- last_qlen => 0,
- last_log_ts => T0,
- last_op => sync,
- burst_win_ts => T0,
- burst_msg_count => 0}),
- Config1 =
- Config#{config => HConfig#{handler_pid => self(),
- mode_tab => ModeTab}},
- proc_lib:init_ack({ok,self(),Config1}),
- gen_server:cast(self(), repeated_filesync),
- gen_server:enter_loop(?MODULE, [], State1)
- catch
- _:Error ->
- unregister(RegName),
- logger_h_common:error_notify({init_handler,Name,Error}),
- proc_lib:init_ack(Error)
- end;
- Error ->
- unregister(RegName),
- logger_h_common:error_notify({init_handler,Name,Error}),
- proc_lib:init_ack(Error)
- end.
+filter_config(Config) ->
+ logger_h_common:filter_config(Config).
-do_init(Name, Type) ->
+%%%===================================================================
+%%% logger_h_common callbacks
+%%%===================================================================
+init(Name, #{type := Type}) ->
case open_log_file(Name, Type) of
{ok,FileCtrlPid} ->
- case logger_h_common:unset_restart_flag(Name, ?MODULE) of
- true ->
- %% inform about restart
- gen_server:cast(self(), {log_handler_info,
- "Handler ~p restarted",
- [Name]});
- false ->
- %% initial start
- ok
- end,
- {ok,#{id=>Name,type=>Type,file_ctrl_pid=>FileCtrlPid}};
+ {ok,#{type=>Type,file_ctrl_pid=>FileCtrlPid}};
Error ->
Error
end.
-%% This is the synchronous log event.
-handle_call({log, Bin}, _From, State) ->
- {Result,State1} = do_log(Bin, call, State),
- %% Result == ok | dropped
- {reply,Result, State1};
-
-handle_call(filesync, _From, State = #{type := Type,
- file_ctrl_pid := FileCtrlPid}) ->
- if is_atom(Type) ->
- {reply, ok, State};
- true ->
- {reply, file_ctrl_filesync_sync(FileCtrlPid), State#{last_op=>sync}}
- end;
+check_config(_Name,set,undefined,NewHConfig) ->
+ check_config(maps:merge(get_default_config(),NewHConfig));
+check_config(_Name,SetOrUpdate,OldHConfig,NewHConfig0) ->
+ WriteOnce = maps:with([type],OldHConfig),
+ Default =
+ case SetOrUpdate of
+ set ->
+ %% Do not reset write-once fields to defaults
+ maps:merge(get_default_config(),WriteOnce);
+ update ->
+ OldHConfig
+ end,
-handle_call({change_config,_OldConfig,NewConfig}, _From,
- State = #{filesync_repeat_interval := FSyncInt0}) ->
- HConfig = maps:get(config, NewConfig, #{}),
- State1 = maps:merge(State, HConfig),
- case logger_h_common:overload_levels_ok(State1) of
- true ->
- _ =
- case maps:get(filesync_repeat_interval, HConfig, undefined) of
- undefined ->
- ok;
- no_repeat ->
- _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
- State,
- undefined));
- FSyncInt0 ->
- ok;
- _FSyncInt1 ->
- _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
- State,
- undefined)),
- gen_server:cast(self(), repeated_filesync)
- end,
- {reply, ok, State1};
- false ->
- #{sync_mode_qlen := SMQL,
- drop_mode_qlen := DMQL,
- flush_qlen := FQL} = State1,
- {reply, {error,{invalid_levels,{SMQL,DMQL,FQL}}}, State}
- end;
+ NewHConfig = maps:merge(Default, NewHConfig0),
-handle_call(info, _From, State) ->
- {reply, State, State};
-
-handle_call(reset, _From, State) ->
- State1 = ?merge_with_stats(State),
- {reply, ok, State1#{last_qlen => 0,
- last_log_ts => ?timestamp()}};
-
-handle_call(stop, _From, State) ->
- {stop, {shutdown,stopped}, ok, State}.
-
-%% This is the asynchronous log event.
-handle_cast({log, Bin}, State) ->
- {_,State1} = do_log(Bin, cast, State),
- {noreply, State1};
-
-handle_cast({log_handler_info, Format, Args}, State = #{id:=Name}) ->
- log_handler_info(Name, Format, Args, State),
- {noreply, State};
-
-%% If FILESYNC_REPEAT_INTERVAL is set to a millisec value, this
-%% clause gets called repeatedly by the handler. In order to
-%% guarantee that a filesync *always* happens after the last log
-%% event, the repeat operation must be active!
-handle_cast(repeated_filesync,
- State = #{type := Type,
- file_ctrl_pid := FileCtrlPid,
- filesync_repeat_interval := FSyncInt,
- last_op := LastOp}) ->
- State1 =
- if not is_atom(Type), is_integer(FSyncInt) ->
- %% only do filesync if something has been
- %% written since last time we checked
- if LastOp == sync ->
- ok;
- true ->
- file_ctrl_filesync_async(FileCtrlPid)
- end,
- {ok,TRef} =
- timer:apply_after(FSyncInt, gen_server,cast,
- [self(),repeated_filesync]),
- State#{rep_sync_tref => TRef, last_op => sync};
- true ->
- State
- end,
- {noreply,State1}.
-
-handle_info({'EXIT',Pid,Why}, State = #{id := Name, type := FileInfo}) ->
- case maps:get(file_ctrl_pid, State, undefined) of
- Pid ->
- %% file error, terminate handler
- logger_h_common:handler_exit(Name,
- {error,{write_failed,FileInfo,Why}});
- _Other ->
- %% ignore EXIT
- ok
- end,
- {noreply, State};
+ %% Fail if write-once fields are changed
+ case maps:with([type],NewHConfig) of
+ WriteOnce ->
+ check_config(NewHConfig);
+ Other ->
+ {error,{illegal_config_change,?MODULE,WriteOnce,Other}}
+ end.
-handle_info(_Info, State) ->
- {noreply, State}.
+check_config(#{type:=Type}=HConfig) ->
+ case check_h_config(maps:to_list(HConfig)) of
+ ok when is_atom(Type) ->
+ {ok,HConfig#{filesync_repeat_interval=>no_repeat}};
+ ok ->
+ {ok,HConfig};
+ {error,{Key,Value}} ->
+ {error,{invalid_config,?MODULE,#{Key=>Value}}}
+ end.
+
+check_h_config([{type,Type} | Config]) when Type == standard_io;
+ Type == standard_error ->
+ check_h_config(Config);
+check_h_config([{type,{file,File}} | Config]) when is_list(File) ->
+ check_h_config(Config);
+check_h_config([{type,{file,File,Modes}} | Config]) when is_list(File),
+ is_list(Modes) ->
+ check_h_config(Config);
+check_h_config([Other | _]) ->
+ {error,Other};
+check_h_config([]) ->
+ ok.
-terminate(Reason, State = #{id:=Name, file_ctrl_pid:=FWPid,
- type:=_FileInfo}) ->
- _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, State,
- undefined)),
+get_default_config() ->
+ #{type => standard_io}.
+
+filesync(_Name, _Mode, #{type := Type}=State) when is_atom(Type) ->
+ {ok,State};
+filesync(_Name, async, #{file_ctrl_pid := FileCtrlPid} = State) ->
+ ok = file_ctrl_filesync_async(FileCtrlPid),
+ {ok,State};
+filesync(_Name, sync, #{file_ctrl_pid := FileCtrlPid} = State) ->
+ Result = file_ctrl_filesync_sync(FileCtrlPid),
+ {Result,State}.
+
+write(_Name, async, Bin, #{file_ctrl_pid:=FileCtrlPid} = State) ->
+ ok = file_write_async(FileCtrlPid, Bin),
+ {ok,State};
+write(_Name, sync, Bin, #{file_ctrl_pid:=FileCtrlPid} = State) ->
+ Result = file_write_sync(FileCtrlPid, Bin),
+ {Result,State}.
+
+reset_state(_Name, State) ->
+ State.
+
+handle_info(_Name, {'EXIT',Pid,Why}, #{type := FileInfo, file_ctrl_pid := Pid}) ->
+ %% file_ctrl_pid died, file error, terminate handler
+ exit({error,{write_failed,FileInfo,Why}});
+handle_info(_, _, State) ->
+ State.
+
+terminate(_Name, _Reason, #{file_ctrl_pid:=FWPid}) ->
case is_process_alive(FWPid) of
true ->
unlink(FWPid),
@@ -410,17 +215,12 @@ terminate(Reason, State = #{id:=Name, file_ctrl_pid:=FWPid,
ok
after
?DEFAULT_CALL_TIMEOUT ->
- exit(FWPid, kill)
+ exit(FWPid, kill),
+ ok
end;
false ->
ok
- end,
- ok = logger_h_common:stop_or_restart(Name, Reason, State),
- unregister(?name_to_reg_name(?MODULE, Name)),
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
+ end.
%%%===================================================================
%%% Internal functions
@@ -428,200 +228,6 @@ code_change(_OldVsn, State, _Extra) ->
%%%-----------------------------------------------------------------
%%%
-get_init_state() ->
- #{sync_mode_qlen => ?SYNC_MODE_QLEN,
- drop_mode_qlen => ?DROP_MODE_QLEN,
- flush_qlen => ?FLUSH_QLEN,
- burst_limit_enable => ?BURST_LIMIT_ENABLE,
- burst_limit_max_count => ?BURST_LIMIT_MAX_COUNT,
- burst_limit_window_time => ?BURST_LIMIT_WINDOW_TIME,
- overload_kill_enable => ?OVERLOAD_KILL_ENABLE,
- overload_kill_qlen => ?OVERLOAD_KILL_QLEN,
- overload_kill_mem_size => ?OVERLOAD_KILL_MEM_SIZE,
- overload_kill_restart_after => ?OVERLOAD_KILL_RESTART_AFTER,
- file_ctrl_sync_int => ?CONTROLLER_SYNC_INTERVAL,
- filesync_ok_qlen => ?FILESYNC_OK_QLEN,
- filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}.
-
-%%%-----------------------------------------------------------------
-%%% Add a standard handler to the logger.
-%%% This starts a dedicated handler process which should always
-%%% exist if the handler is registered with logger (and should not
-%%% exist if the handler is not registered).
-%%%
-%%% Handler specific config should be provided with a sub map associated
-%%% with a key named 'config', e.g:
-%%%
-%%% Config = #{config => #{sync_mode_qlen => 50}
-%%%
-%%% The standard handler process is linked to logger_sup, which is
-%%% part of the kernel application's supervision tree.
-start(Name, Config, HandlerState) ->
- LoggerStdH =
- #{id => Name,
- start => {?MODULE, start_link, [Name,Config,HandlerState]},
- restart => temporary,
- shutdown => 2000,
- type => worker,
- modules => [?MODULE]},
- case supervisor:start_child(logger_sup, LoggerStdH) of
- {ok,Pid,Config1} ->
- ok = logger_handler_watcher:register_handler(Name,Pid),
- {ok,Config1};
- Error ->
- Error
- end.
-
-%%%-----------------------------------------------------------------
-%%% Stop and remove the handler.
-stop(Name) ->
- case whereis(?name_to_reg_name(?MODULE,Name)) of
- undefined ->
- ok;
- Pid ->
- %% We don't want to do supervisor:terminate_child here
- %% since we need to distinguish this explicit stop from a
- %% system termination in order to avoid circular attempts
- %% at removing the handler (implying deadlocks and
- %% timeouts).
- %% And we don't need to do supervisor:delete_child, since
- %% the restart type is temporary, which means that the
- %% child specification is automatically removed from the
- %% supervisor when the process dies.
- _ = gen_server:call(Pid, stop),
- ok
- end.
-
-%%%-----------------------------------------------------------------
-%%% Logging and overload control.
--define(update_file_ctrl_sync(C, Interval),
- if C == 0 -> Interval;
- true -> C-1 end).
-
-%% check for overload between every event (and set Mode to async,
-%% sync or drop accordingly), but never flush the whole mailbox
-%% before LogWindowSize events have been handled
-do_log(Bin, CallOrCast, State = #{id:=Name, mode:=Mode0}) ->
- T1 = ?timestamp(),
-
- %% check if the handler is getting overloaded, or if it's
- %% recovering from overload (the check must be done for each
- %% event to react quickly to large bursts of events and
- %% to ensure that the handler can never end up in drop mode
- %% with an empty mailbox, which would stop operation)
- {Mode1,QLen,Mem,State1} = logger_h_common:check_load(State),
-
- if (Mode1 == drop) andalso (Mode0 =/= drop) ->
- log_handler_info(Name, "Handler ~p switched to drop mode",
- [Name], State);
- (Mode0 == drop) andalso ((Mode1 == async) orelse (Mode1 == sync)) ->
- log_handler_info(Name, "Handler ~p switched to ~w mode",
- [Name,Mode1], State);
- true ->
- ok
- end,
-
- %% kill the handler if it can't keep up with the load
- logger_h_common:kill_if_choked(Name, QLen, Mem, ?MODULE, State),
-
- if Mode1 == flush ->
- flush(Name, QLen, T1, State1);
- true ->
- write(Name, Mode1, T1, Bin, CallOrCast, State1)
- end.
-
-%% this clause is called by do_log/3 after an overload check
-%% has been performed, where QLen > FlushQLen
-flush(Name, _QLen0, T1, State=#{last_log_ts := _T0, mode_tab := ModeTab}) ->
- %% flush messages in the mailbox (a limited number in
- %% order to not cause long delays)
- NewFlushed = logger_h_common:flush_log_events(?FLUSH_MAX_N),
-
- %% write info in log about flushed messages
- log_handler_info(Name, "Handler ~p flushed ~w log events",
- [Name,NewFlushed], State),
-
- %% because of the receive loop when flushing messages, the
- %% handler will be scheduled out often and the mailbox could
- %% grow very large, so we'd better check the queue again here
- {_,_QLen1} = process_info(self(), message_queue_len),
- ?observe(Name,{max_qlen,_QLen1}),
-
- %% Add 1 for the current log event
- ?observe(Name,{flushed,NewFlushed+1}),
-
- State1 = ?update_max_time(?diff_time(T1,_T0),State),
- {dropped,?update_other(flushed,FLUSHED,NewFlushed,
- State1#{mode => ?set_mode(ModeTab,async),
- last_qlen => 0,
- last_log_ts => T1})}.
-
-%% this clause is called to write to file
-write(_Name, Mode, T1, Bin, _CallOrCast,
- State = #{mode_tab := ModeTab,
- file_ctrl_pid := FileCtrlPid,
- file_ctrl_sync := FileCtrlSync,
- last_qlen := LastQLen,
- last_log_ts := T0,
- file_ctrl_sync_int := FileCtrlSyncInt}) ->
- %% check if we need to limit the number of writes
- %% during a burst of log events
- {DoWrite,BurstWinT,BurstMsgCount} = logger_h_common:limit_burst(State),
-
- %% only send a synhrounous event to the file controller process
- %% every FileCtrlSyncInt time, to give the handler time between
- %% file writes so it can keep up with incoming messages
- {Result,LastQLen1} =
- if DoWrite, FileCtrlSync == 0 ->
- ?observe(_Name,{_CallOrCast,1}),
- file_write_sync(FileCtrlPid, Bin, false),
- {ok,element(2, process_info(self(), message_queue_len))};
- DoWrite ->
- ?observe(_Name,{_CallOrCast,1}),
- file_write_async(FileCtrlPid, Bin),
- {ok,LastQLen};
- not DoWrite ->
- ?observe(_Name,{flushed,1}),
- {dropped,LastQLen}
- end,
-
- %% Check if the time since the previous log event is long enough -
- %% and the queue length small enough - to assume the mailbox has
- %% been emptied, and if so, do filesync operation and reset mode to
- %% async. Note that this is the best we can do to detect an idle
- %% handler without setting a timer after each log call/cast. If the
- %% time between two consecutive log events is fast and no new
- %% event comes in after the last one, idle state won't be detected!
- Time = ?diff_time(T1,T0),
- {Mode1,BurstMsgCount1} =
- if (LastQLen1 < ?FILESYNC_OK_QLEN) andalso
- (Time > ?IDLE_DETECT_TIME_USEC) ->
- %% do filesync if necessary
- case maps:get(type, State) of
- Std when is_atom(Std) ->
- ok;
- _File ->
- file_ctrl_filesync_async(FileCtrlPid)
- end,
- {?change_mode(ModeTab, Mode, async),0};
- true ->
- {Mode,BurstMsgCount}
- end,
- State1 =
- ?update_calls_or_casts(_CallOrCast,1,State),
- State2 =
- ?update_max_time(Time,
- State1#{mode => Mode1,
- last_qlen := LastQLen1,
- last_log_ts => T1,
- last_op => write,
- burst_win_ts => BurstWinT,
- burst_msg_count => BurstMsgCount1,
- file_ctrl_sync =>
- ?update_file_ctrl_sync(FileCtrlSync,
- FileCtrlSyncInt)}),
- {Result,State2}.
-
open_log_file(HandlerName, FileInfo) ->
case file_ctrl_start(HandlerName, FileInfo) of
OK = {ok,_FileCtrlPid} -> OK;
@@ -653,19 +259,6 @@ close_log_file(Fd) ->
_ = file:close(Fd).
-log_handler_info(Name, Format, Args, #{file_ctrl_pid := FileCtrlPid}) ->
- Config =
- case logger:get_handler_config(Name) of
- {ok,Conf} -> Conf;
- _ -> #{formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}
- end,
- Meta = #{time=>erlang:system_time(microsecond)},
- Bin = logger_h_common:log_to_binary(#{level => notice,
- msg => {Format,Args},
- meta => Meta}, Config),
- _ = file_write_async(FileCtrlPid, Bin),
- ok.
-
%%%-----------------------------------------------------------------
%%% File control process
@@ -692,24 +285,19 @@ file_write_async(Pid, Bin) ->
Pid ! {log,Bin},
ok.
-file_write_sync(Pid, Bin, FileSync) ->
- case file_ctrl_call(Pid, {log,self(),Bin,FileSync}) of
- {error,Reason} ->
- {error,{write_failed,Bin,Reason}};
- Result ->
- Result
- end.
+file_write_sync(Pid, Bin) ->
+ file_ctrl_call(Pid, {log,Bin}).
file_ctrl_filesync_async(Pid) ->
Pid ! filesync,
ok.
file_ctrl_filesync_sync(Pid) ->
- file_ctrl_call(Pid, {filesync,self()}).
+ file_ctrl_call(Pid, filesync).
file_ctrl_call(Pid, Msg) ->
MRef = monitor(process, Pid),
- Pid ! {Msg,MRef},
+ Pid ! {Msg,{self(),MRef}},
receive
{MRef,Result} ->
demonitor(MRef, [flush]),
@@ -727,74 +315,43 @@ file_ctrl_init(HandlerName, FileInfo, Starter) when is_tuple(FileInfo) ->
case do_open_log_file(FileInfo) of
{ok,Fd} ->
Starter ! {self(),ok},
- file_ctrl_loop(Fd, file, FileName, false, ok, ok, HandlerName);
+ file_ctrl_loop(Fd, FileName, false, ok, ok, HandlerName);
{error,Reason} ->
Starter ! {self(),{error,{open_failed,FileName,Reason}}}
end;
file_ctrl_init(HandlerName, StdDev, Starter) ->
Starter ! {self(),ok},
- file_ctrl_loop(StdDev, standard_io, StdDev, false, ok, ok, HandlerName).
+ file_ctrl_loop(StdDev, StdDev, false, ok, ok, HandlerName).
-file_ctrl_loop(Fd, Type, DevName, Synced,
+file_ctrl_loop(Fd, DevName, Synced,
PrevWriteResult, PrevSyncResult, HandlerName) ->
receive
%% asynchronous event
{log,Bin} ->
- Result = if Type == file ->
- write_to_dev(Fd, Bin, DevName,
- PrevWriteResult, HandlerName);
- true ->
- io:put_chars(Fd, Bin)
- end,
- file_ctrl_loop(Fd, Type, DevName, false,
+ Fd1 = ensure(Fd, DevName),
+ Result = write_to_dev(Fd1, Bin, DevName, PrevWriteResult, HandlerName),
+ file_ctrl_loop(Fd1, DevName, false,
Result, PrevSyncResult, HandlerName);
%% synchronous event
- {{log,From,Bin,FileSync},MRef} ->
- if Type == file ->
- %% check that file hasn't been deleted
- CheckFile =
- fun() -> {ok,_} = file:read_file_info(DevName) end,
- spawn_link(CheckFile),
- WResult = write_to_dev(Fd, Bin, DevName,
- PrevWriteResult, HandlerName),
- {Synced1,SResult} =
- if not FileSync ->
- {false,PrevSyncResult};
- true ->
- case sync_dev(Fd, DevName,
- PrevSyncResult, HandlerName) of
- ok -> {true,ok};
- Error -> {false,Error}
- end
- end,
- From ! {MRef,ok},
- file_ctrl_loop(Fd, Type, DevName, Synced1,
- WResult, SResult, HandlerName);
- true ->
- _ = io:put_chars(Fd, Bin),
- From ! {MRef,ok},
- file_ctrl_loop(Fd, Type, DevName, false,
- ok, PrevSyncResult, HandlerName)
- end;
+ {{log,Bin},{From,MRef}} ->
+ Fd1 = ensure(Fd, DevName),
+ Result = write_to_dev(Fd1, Bin, DevName, PrevWriteResult, HandlerName),
+ From ! {MRef,ok},
+ file_ctrl_loop(Fd1, DevName, false,
+ Result, PrevSyncResult, HandlerName);
- filesync when not Synced ->
- Result = sync_dev(Fd, DevName, PrevSyncResult, HandlerName),
- file_ctrl_loop(Fd, Type, DevName, true,
+ filesync ->
+ Fd1 = ensure(Fd, DevName),
+ Result = sync_dev(Fd1, DevName, Synced, PrevSyncResult, HandlerName),
+ file_ctrl_loop(Fd1, DevName, true,
PrevWriteResult, Result, HandlerName);
- filesync ->
- file_ctrl_loop(Fd, Type, DevName, true,
- PrevWriteResult, PrevSyncResult, HandlerName);
-
- {{filesync,From},MRef} ->
- Result = if not Synced ->
- sync_dev(Fd, DevName, PrevSyncResult, HandlerName);
- true ->
- ok
- end,
+ {filesync,{From,MRef}} ->
+ Fd1 = ensure(Fd, DevName),
+ Result = sync_dev(Fd1, DevName, Synced, PrevSyncResult, HandlerName),
From ! {MRef,ok},
- file_ctrl_loop(Fd, Type, DevName, true,
+ file_ctrl_loop(Fd1, DevName, true,
PrevWriteResult, Result, HandlerName);
stop ->
@@ -802,27 +359,44 @@ file_ctrl_loop(Fd, Type, DevName, Synced,
stopped
end.
-write_to_dev(Fd, Bin, FileName, PrevWriteResult, HandlerName) ->
- case ?file_write(Fd, Bin) of
- ok ->
- ok;
- PrevWriteResult ->
- %% don't report same error twice
- PrevWriteResult;
- Error ->
- logger_h_common:error_notify({HandlerName,write,FileName,Error}),
- Error
+%% In order to play well with tools like logrotate, we need to be able
+%% to re-create the file if it has disappeared (e.g. if rotated by
+%% logrotate)
+ensure(Fd,DevName) when is_atom(DevName) ->
+ Fd;
+ensure(Fd,FileName) ->
+ case file:read_file_info(FileName) of
+ {ok,_} ->
+ Fd;
+ _ ->
+ _ = file:close(Fd),
+ _ = file:close(Fd), % delayed_write cause close not to close
+ case do_open_log_file({file,FileName}) of
+ {ok,Fd1} ->
+ Fd1;
+ Error ->
+ exit({could_not_reopen_file,Error})
+ end
end.
-sync_dev(Fd, DevName, PrevSyncResult, HandlerName) ->
- case ?file_datasync(Fd) of
- ok ->
- ok;
- PrevSyncResult ->
- %% don't report same error twice
- PrevSyncResult;
- Error ->
- logger_h_common:error_notify({HandlerName,filesync,DevName,Error}),
- Error
- end.
+write_to_dev(DevName, Bin, _DevName, _PrevWriteResult, _HandlerName)
+ when is_atom(DevName) ->
+ io:put_chars(DevName, Bin);
+write_to_dev(Fd, Bin, FileName, PrevWriteResult, HandlerName) ->
+ Result = ?file_write(Fd, Bin),
+ maybe_notify_error(write,Result,PrevWriteResult,FileName,HandlerName).
+sync_dev(_Fd, _FileName, true, PrevSyncResult, _HandlerName) ->
+ PrevSyncResult;
+sync_dev(Fd, FileName, false, PrevSyncResult, HandlerName) ->
+ Result = ?file_datasync(Fd),
+ maybe_notify_error(filesync,Result,PrevSyncResult,FileName,HandlerName).
+
+maybe_notify_error(_Op, ok, _PrevResult, _FileName, _HandlerName) ->
+ ok;
+maybe_notify_error(_Op, PrevResult, PrevResult, _FileName, _HandlerName) ->
+ %% don't report same error twice
+ PrevResult;
+maybe_notify_error(Op, Error, _PrevResult, FileName, HandlerName) ->
+ logger_h_common:error_notify({HandlerName,Op,FileName,Error}),
+ Error.
diff --git a/lib/kernel/src/net_kernel.erl b/lib/kernel/src/net_kernel.erl
index 3cf11fd7b1..a9dc77837e 100644
--- a/lib/kernel/src/net_kernel.erl
+++ b/lib/kernel/src/net_kernel.erl
@@ -808,7 +808,8 @@ handle_info({AcceptPid, {accept_pending,MyNode,Node,Address,Type}}, State) ->
_:_ ->
error_logger:error_msg("~n** Cannot get connection id for node ~w~n",
[Node]),
- AcceptPid ! {self(),{accept_pending,nok_pending}}
+ AcceptPid ! {self(),{accept_pending,nok_pending}},
+ {noreply, State}
end
end;
diff --git a/lib/kernel/test/gen_tcp_misc_SUITE.erl b/lib/kernel/test/gen_tcp_misc_SUITE.erl
index 194522c009..244bd7e2a0 100644
--- a/lib/kernel/test/gen_tcp_misc_SUITE.erl
+++ b/lib/kernel/test/gen_tcp_misc_SUITE.erl
@@ -52,7 +52,8 @@
several_accepts_in_one_go/1, accept_system_limit/1,
active_once_closed/1, send_timeout/1, send_timeout_active/1,
otp_7731/1, zombie_sockets/1, otp_7816/1, otp_8102/1,
- wrapping_oct/0, wrapping_oct/1, otp_9389/1, otp_13939/1]).
+ wrapping_oct/0, wrapping_oct/1, otp_9389/1, otp_13939/1,
+ otp_12242/1]).
%% Internal exports.
-export([sender/3, not_owner/1, passive_sockets_server/2, priority_server/1,
@@ -95,7 +96,8 @@ all() ->
killing_multi_acceptors2, several_accepts_in_one_go, accept_system_limit,
active_once_closed, send_timeout, send_timeout_active, otp_7731,
wrapping_oct,
- zombie_sockets, otp_7816, otp_8102, otp_9389].
+ zombie_sockets, otp_7816, otp_8102, otp_9389,
+ otp_12242].
groups() ->
[].
@@ -1967,20 +1969,20 @@ recvtclass(_Config) ->
{skip,{ipv6_not_supported,IFs}}
end.
-%% These version numbers are the highest noted in daily tests
-%% where the test fails for a plausible reason, so
-%% skip on that platform.
+%% These version numbers are above the highest noted
+%% in daily tests where the test fails for a plausible reason,
+%% so skip on platforms of lower version, i.e they are future
+%% versions where it is possible that it might not fail.
%%
-%% On newer versions it might be fixed, but we'll see about that
-%% when machines with newer versions gets installed...
-%% If the test still fails for a plausible reason these
+%% When machines with newer versions gets installed,
+%% if the test still fails for a plausible reason these
%% version numbers simply should be increased.
%% Or maybe we should change to only test on known good
%% platforms - change {unix,_} to false?
%% pktoptions is not supported for IPv4
recvtos_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,4,0});
-recvtos_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {17,6,0});
+recvtos_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {19,0,0});
%% Using the option returns einval, so it is not implemented.
recvtos_ok({unix,freebsd}, OSVer) -> not semver_lt(OSVer, {11,2,0});
recvtos_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
@@ -1992,18 +1994,19 @@ recvtos_ok(_, _) -> false.
%% pktoptions is not supported for IPv4
recvttl_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,4,0});
-recvttl_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {17,6,0});
+recvttl_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {19,0,0});
%% Using the option returns einval, so it is not implemented.
recvttl_ok({unix,freebsd}, OSVer) -> not semver_lt(OSVer, {11,2,0});
recvttl_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
+%% Does not return any value - not implemented for pktoptions
+recvttl_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {2,7,0});
%%
-recvttl_ok({unix,linux}, _) -> true;
recvttl_ok({unix,_}, _) -> true;
recvttl_ok(_, _) -> false.
%% pktoptions is not supported for IPv6
recvtclass_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,4,0});
-recvtclass_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {17,6,0});
+recvtclass_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {19,0,0});
recvtclass_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
%% Using the option returns einval, so it is not implemented.
recvtclass_ok({unix,freebsd}, OSVer) -> not semver_lt(OSVer, {11,2,0});
@@ -3284,3 +3287,143 @@ otp_13939(Config) when is_list(Config) ->
exit(Pid, normal),
ct:fail("Server process blocked on send.")
end.
+
+otp_12242(Config) when is_list(Config) ->
+ case os:type() of
+ {win32,_} ->
+ %% Even if we set sndbuf and recbuf to small sizes
+ %% Windows either happily accepts to send GBytes of data
+ %% in no time, so the second send below that is supposed
+ %% to time out just succedes, or the first send that
+ %% is supposed to fill the inet_drv I/O queue and
+ %% start waiting for when more data can be sent
+ %% instead sends all data but suffers a send
+ %% failure that closes the socket
+ {skipped,backpressure_broken_on_win32};
+ _ ->
+ %% Find the IPv4 address of an up and running interface
+ %% that is not loopback nor pointtopoint
+ {ok,IFList} = inet:getifaddrs(),
+ ct:pal("IFList ~p~n", [IFList]),
+ case
+ lists:flatten(
+ [lists:filtermap(
+ fun ({addr,Addr}) when tuple_size(Addr) =:= 4 ->
+ {true,Addr};
+ (_) ->
+ false
+ end, Opts)
+ || {_,Opts} <- IFList,
+ case lists:keyfind(flags, 1, Opts) of
+ {_,Flags} ->
+ lists:member(up, Flags)
+ andalso
+ lists:member(running, Flags)
+ andalso
+ not lists:member(loopback, Flags)
+ andalso
+ not lists:member(pointtopoint, Flags);
+ false ->
+ false
+ end])
+ of
+ [Addr|_] ->
+ otp_12242(Addr);
+ Other ->
+ {skipped,{no_external_address,Other}}
+ end
+ end;
+%%
+otp_12242(Addr) when tuple_size(Addr) =:= 4 ->
+ ct:timetrap(30000),
+ ct:pal("Using address ~p~n", [Addr]),
+ Bufsize = 16 * 1024,
+ Datasize = 128 * 1024 * 1024, % At least 1 s on GBit interface
+ Blob = binary:copy(<<$x>>, Datasize),
+ LOpts =
+ [{backlog,4},{reuseaddr,true},{ip,Addr},
+ binary,{active,false},
+ {recbuf,Bufsize},{sndbuf,Bufsize},{buffer,Bufsize}],
+ COpts =
+ [binary,{active,false},{ip,Addr},
+ {linger,{true,1}}, % 1 s
+ {send_timeout,500},
+ {recbuf,Bufsize},{sndbuf,Bufsize},{buffer,Bufsize}],
+ Dir = filename:dirname(code:which(?MODULE)),
+ {ok,ListenerNode} =
+ test_server:start_node(
+ ?UNIQ_NODE_NAME, slave, [{args,"-pa " ++ Dir}]),
+ Tester = self(),
+ Listener =
+ spawn(
+ ListenerNode,
+ fun () ->
+ {ok,L} = gen_tcp:listen(0, LOpts),
+ {ok,LPort} = inet:port(L),
+ Tester ! {self(),port,LPort},
+ {ok,A} = gen_tcp:accept(L),
+ ok = gen_tcp:close(L),
+ receive
+ {Tester,stop} ->
+ ok = gen_tcp:close(A)
+ end
+ end),
+ ListenerMref = monitor(process, Listener),
+ LPort = receive {Listener,port,P} -> P end,
+ {ok,C} = gen_tcp:connect(Addr, LPort, COpts, infinity),
+ {ok,ReadCOpts} = inet:getopts(C, [recbuf,sndbuf,buffer]),
+ ct:pal("ReadCOpts ~p~n", [ReadCOpts]),
+ %%
+ %% Fill the buffers
+ ct:pal("Sending ~p bytes~n", [Datasize]),
+ ok = gen_tcp:send(C, Blob),
+ ct:pal("Sent ~p bytes~n", [Datasize]),
+ %% Spawn the Closer,
+ %% try to ensure that the close call is in progress
+ %% before the owner proceeds with sending
+ Owner = self(),
+ {_Closer,CloserMref} =
+ spawn_opt(
+ fun () ->
+ Owner ! {tref, erlang:start_timer(50, Owner, closing)},
+ ct:pal("Calling gen_tcp:close(C)~n"),
+ try gen_tcp:close(C) of
+ Result ->
+ ct:pal("gen_tcp:close(C) -> ~p~n", [Result]),
+ ok = Result
+ catch
+ Class:Reason:Stacktrace ->
+ ct:pal(
+ "gen_tcp:close(C) >< ~p:~p~n ~p~n",
+ [Class,Reason,Stacktrace]),
+ erlang:raise(Class, Reason, Stacktrace)
+ end
+ end, [link,monitor]),
+ receive
+ {tref,Tref} ->
+ receive {timeout,Tref,_} -> ok end,
+ ct:pal("Sending ~p bytes again~n", [Datasize]),
+ %% Now should the close be in progress...
+ %% All buffers are full, remote end is not reading,
+ %% and the send timeout is 1 s so this will timeout:
+ {error,timeout} = gen_tcp:send(C, Blob),
+ ct:pal("Sending ~p bytes again timed out~n", [Datasize]),
+ ok = inet:setopts(C, [{send_timeout,10000}]),
+ %% There is a hidden timeout here. Port close is sampled
+ %% every 5 s by prim_inet:send_recv_reply.
+ %% Linger is 3 s so the Closer will finish this send:
+ ct:pal("Sending ~p bytes with 10 s timeout~n", [Datasize]),
+ {error,closed} = gen_tcp:send(C, Blob),
+ ct:pal("Sending ~p bytes with 10 s timeout was closed~n",
+ [Datasize]),
+ normal = wait(CloserMref),
+ ct:pal("The Closer has exited~n"),
+ Listener ! {Tester,stop},
+ receive {'DOWN',ListenerMref,_,_,_} -> ok end,
+ ct:pal("The Listener has exited~n"),
+ test_server:stop_node(ListenerNode),
+ ok
+ end.
+
+wait(Mref) ->
+ receive {'DOWN',Mref,_,_,Reason} -> Reason end.
diff --git a/lib/kernel/test/inet_SUITE.erl b/lib/kernel/test/inet_SUITE.erl
index 2e5f8c7d2c..f436eafad3 100644
--- a/lib/kernel/test/inet_SUITE.erl
+++ b/lib/kernel/test/inet_SUITE.erl
@@ -1060,28 +1060,26 @@ getservbyname_overflow(Config) when is_list(Config) ->
getifaddrs(Config) when is_list (Config) ->
{ok,IfAddrs} = inet:getifaddrs(),
io:format("IfAddrs = ~p.~n", [IfAddrs]),
- case
- {os:type(),
- [If ||
- {If,Opts} <- IfAddrs,
- lists:keymember(hwaddr, 1, Opts)]} of
- {{unix,sunos},[]} -> ok;
- {OT,[]} ->
- ct:fail({should_have_hwaddr,OT});
- _ -> ok
+ case [If || {If,Opts} <- IfAddrs, lists:keymember(hwaddr, 1, Opts)] of
+ [] ->
+ case os:type() of
+ {unix,sunos} -> ok;
+ OT ->
+ ct:fail({should_have_hwaddr,OT})
+ end;
+ [_|_] -> ok
end,
- Addrs =
- [element(1, A) || A <- ifaddrs(IfAddrs)],
+ Addrs = ifaddrs(IfAddrs),
io:format("Addrs = ~p.~n", [Addrs]),
[check_addr(Addr) || Addr <- Addrs],
ok.
-check_addr({addr,Addr})
+check_addr(Addr)
when tuple_size(Addr) =:= 8,
element(1, Addr) band 16#FFC0 =:= 16#FE80 ->
io:format("Addr: ~p link local; SKIPPED!~n", [Addr]),
ok;
-check_addr({addr,Addr}) ->
+check_addr(Addr) ->
io:format("Addr: ~p.~n", [Addr]),
Ping = "ping",
Pong = "pong",
@@ -1097,78 +1095,86 @@ check_addr({addr,Addr}) ->
ok = gen_tcp:close(S2),
ok = gen_tcp:close(L).
--record(ifopts, {name,flags,addrs=[],hwaddr}).
-
-ifaddrs([]) -> [];
-ifaddrs([{If,Opts}|IOs]) ->
- #ifopts{flags=F} = Ifopts = check_ifopts(Opts, #ifopts{name=If}),
- case F of
- {flags,Flags} ->
- case lists:member(running, Flags) of
- true -> Ifopts#ifopts.addrs;
- false -> []
- end ++ ifaddrs(IOs);
- undefined ->
- ifaddrs(IOs)
+ifaddrs(IfOpts) ->
+ IfMap = collect_ifopts(IfOpts),
+ ChkFun =
+ fun Self({{_,Flags} = Key, Opts}, ok) ->
+ Broadcast = lists:member(broadcast, Flags),
+ P2P = lists:member(pointtopoint, Flags),
+ case Opts of
+ [{addr,_},{netmask,_},{broadaddr,_}|Os]
+ when Broadcast ->
+ Self({Key, Os}, ok);
+ [{addr,_},{netmask,_},{dstaddr,_}|Os]
+ when P2P ->
+ Self({Key, Os}, ok);
+ [{addr,_},{netmask,_}|Os] ->
+ Self({Key, Os}, ok);
+ [{hwaddr,_}|Os] ->
+ Self({Key, Os}, ok);
+ [] ->
+ ok
+ end
+ end,
+ fold_ifopts(ChkFun, ok, IfMap),
+ AddrsFun =
+ fun ({{_,Flags}, Opts}, Acc) ->
+ case
+ lists:member(running, Flags)
+ andalso (not lists:member(pointtopoint, Flags))
+ of
+ true ->
+ lists:reverse(
+ [Addr || {addr,Addr} <- Opts],
+ Acc);
+ false ->
+ Acc
+ end
+ end,
+ fold_ifopts(AddrsFun, [], IfMap).
+
+collect_ifopts(IfOpts) ->
+ collect_ifopts(IfOpts, #{}).
+%%
+collect_ifopts(IfOpts, IfMap) ->
+ case IfOpts of
+ [{If,[{flags,Flags}|Opts]}|IfOs] ->
+ Key = {If,Flags},
+ case maps:is_key(Key, IfMap) of
+ true ->
+ ct:fail({unexpected_ifopts,IfOpts,IfMap});
+ false ->
+ collect_ifopts(IfOs, IfMap, Opts, Key, [])
+ end;
+ [] ->
+ IfMap;
+ _ ->
+ ct:fail({unexpected_ifopts,IfOpts,IfMap})
+ end.
+%%
+collect_ifopts(IfOpts, IfMap, Opts, Key, R) ->
+ case Opts of
+ [{flags,_}|_] ->
+ {If,_} = Key,
+ collect_ifopts(
+ [{If,Opts}|IfOpts], maps:put(Key, lists:reverse(R), IfMap));
+ [OptVal|Os] ->
+ collect_ifopts(IfOpts, IfMap, Os, Key, [OptVal|R]);
+ [] ->
+ collect_ifopts(IfOpts, maps:put(Key, lists:reverse(R), IfMap))
end.
-check_ifopts([], #ifopts{flags=F,addrs=Raddrs}=Ifopts) ->
- Addrs = lists:reverse(Raddrs),
- R = Ifopts#ifopts{addrs=Addrs},
- io:format("~p.~n", [R]),
- %% See how we did...
- {flags,Flags} = F,
- case lists:member(broadcast, Flags) of
- true ->
- [case A of
- {{addr,_},{netmask,_},{broadaddr,_}} ->
- A;
- {{addr,T},{netmask,_}} when tuple_size(T) =:= 8 ->
- A
- end || A <- Addrs];
- false ->
- case lists:member(pointtopoint, Flags) of
- true ->
- [case A of
- {{addr,_},{netmask,_},{dstaddr,_}} ->
- A
- end || A <- Addrs];
- false ->
- [case A of
- {{addr,_},{netmask,_}} ->
- A
- end || A <- Addrs]
- end
- end,
- R;
-check_ifopts([{flags,_}=F|Opts], #ifopts{flags=undefined}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{flags=F});
-check_ifopts([{flags,_}=F|Opts], #ifopts{flags=Flags}=Ifopts) ->
- case F of
- Flags ->
- check_ifopts(Opts, Ifopts);
- _ ->
- ct:fail({multiple_flags,F,Ifopts})
- end;
-check_ifopts(
- [{addr,_}=A,{netmask,_}=N,{dstaddr,_}=D|Opts],
- #ifopts{addrs=Addrs}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{addrs=[{A,N,D}|Addrs]});
-check_ifopts(
- [{addr,_}=A,{netmask,_}=N,{broadaddr,_}=B|Opts],
- #ifopts{addrs=Addrs}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{addrs=[{A,N,B}|Addrs]});
-check_ifopts(
- [{addr,_}=A,{netmask,_}=N|Opts],
- #ifopts{addrs=Addrs}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{addrs=[{A,N}|Addrs]});
-check_ifopts([{addr,_}=A|Opts], #ifopts{addrs=Addrs}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{addrs=[{A}|Addrs]});
-check_ifopts([{hwaddr,Hwaddr}=H|Opts], #ifopts{hwaddr=undefined}=Ifopts)
- when is_list(Hwaddr) ->
- check_ifopts(Opts, Ifopts#ifopts{hwaddr=H});
-check_ifopts([{hwaddr,_}=H|_], #ifopts{}=Ifopts) ->
- ct:fail({multiple_hwaddrs,H,Ifopts}).
+fold_ifopts(Fun, Acc, IfMap) ->
+ fold_ifopts(Fun, Acc, IfMap, maps:keys(IfMap)).
+%%
+fold_ifopts(Fun, Acc, IfMap, Keys) ->
+ case Keys of
+ [Key|Ks] ->
+ Opts = maps:get(Key, IfMap),
+ fold_ifopts(Fun, Fun({Key,Opts}, Acc), IfMap, Ks);
+ [] ->
+ Acc
+ end.
%% Works just like lists:member/2, except that any {127,_,_,_} tuple
%% matches any other {127,_,_,_}. We do this to handle Linux systems
diff --git a/lib/kernel/test/inet_sockopt_SUITE.erl b/lib/kernel/test/inet_sockopt_SUITE.erl
index ada9c2689c..27ff74e309 100644
--- a/lib/kernel/test/inet_sockopt_SUITE.erl
+++ b/lib/kernel/test/inet_sockopt_SUITE.erl
@@ -110,9 +110,14 @@ simple(Config) when is_list(Config) ->
{S1,S2} = create_socketpair(Opt, Opt),
{ok,Opt} = inet:getopts(S1,OptTags),
{ok,Opt} = inet:getopts(S2,OptTags),
- COpt = [{X,case X of nodelay -> false;_ -> Y end} || {X,Y} <- Opt],
+ NoPushOpt = case os:type() of
+ {unix, Osname} when Osname =:= linux; Osname =:= freebsd -> {nopush, true};
+ {_,_} -> {nopush, false}
+ end,
+ COpt = [{X,case X of nodelay -> false;_ -> Y end} || {X,Y} <- [NoPushOpt|Opt]],
+ COptTags = [X || {X,_} <- COpt],
inet:setopts(S1,COpt),
- {ok,COpt} = inet:getopts(S1,OptTags),
+ {ok,COpt} = inet:getopts(S1,COptTags),
{ok,Opt} = inet:getopts(S2,OptTags),
gen_tcp:close(S1),
gen_tcp:close(S2),
diff --git a/lib/kernel/test/logger_SUITE.erl b/lib/kernel/test/logger_SUITE.erl
index b7ccba8e70..d831d0d108 100644
--- a/lib/kernel/test/logger_SUITE.erl
+++ b/lib/kernel/test/logger_SUITE.erl
@@ -246,6 +246,18 @@ change_config(_Config) ->
{ok,C4} = logger:get_handler_config(h1),
C4 = C3#{custom:=new_custom},
+ %% Change handler config: Id and module can not be changed
+ {error,{illegal_config_change,Old,New}} =
+ logger:set_handler_config(h1,id,newid),
+ %% Check that only the faulty field is included in return
+ [{id,h1}] = maps:to_list(Old),
+ [{id,newid}] = maps:to_list(New),
+ %% Check that both fields are included when both are changed
+ {error,{illegal_config_change,
+ #{id:=h1,module:=?MODULE},
+ #{id:=newid,module:=newmodule}}} =
+ logger:set_handler_config(h1,#{id=>newid,module=>newmodule}),
+
%% Change primary config: Single key
PConfig0 = logger:get_primary_config(),
ok = logger:set_primary_config(level,warning),
diff --git a/lib/kernel/test/logger_disk_log_h_SUITE.erl b/lib/kernel/test/logger_disk_log_h_SUITE.erl
index a815db14e9..87b8250781 100644
--- a/lib/kernel/test/logger_disk_log_h_SUITE.erl
+++ b/lib/kernel/test/logger_disk_log_h_SUITE.erl
@@ -92,6 +92,7 @@ all() ->
disk_log_opts,
default_formatter,
logging,
+ filter_config,
errors,
formatter_fail,
config_fail,
@@ -302,6 +303,20 @@ logging(cleanup, _Config) ->
Name = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
remove_and_stop(Name).
+filter_config(_Config) ->
+ ok = logger:add_handler(?MODULE,logger_disk_log_h,#{}),
+ {ok,#{config:=HConfig}=Config} = logger:get_handler_config(?MODULE),
+ HConfig = maps:without([handler_pid,mode_tab],HConfig),
+
+ FakeFullHConfig = HConfig#{handler_pid=>self(),mode_tab=>erlang:make_ref()},
+ #{config:=HConfig} =
+ logger_disk_log_h:filter_config(Config#{config=>FakeFullHConfig}),
+ ok.
+
+filter_config(cleanup,_Config) ->
+ logger:remove_handler(?MODULE),
+ ok.
+
errors(Config) ->
PrivDir = ?config(priv_dir,Config),
Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
@@ -316,13 +331,31 @@ errors(Config) ->
%%! TODO:
%%! Check how bad log_opts are handled!
- {error,{illegal_config_change,_,_}} =
- logger:set_handler_config(Name1,
- config,
- #{file=>LogFile1,
- type=>halt}),
- {error,{illegal_config_change,_,_}} =
- logger:set_handler_config(Name1,id,new),
+ {error,{illegal_config_change,
+ logger_disk_log_h,
+ #{type:=wrap},
+ #{type:=halt}}} =
+ logger:update_handler_config(Name1,
+ config,
+ #{type=>halt,
+ file=>LogFile1}),
+
+ {error,{illegal_config_change,
+ logger_disk_log_h,
+ #{file:=LogFile1},
+ #{file:="newfilename"}}} =
+ logger:update_handler_config(Name1,
+ config,
+ #{file=>"newfilename"}),
+
+ %% Read-only fields may (accidentially) be included in the change,
+ %% but it won't take effect
+ {ok,C} = logger:get_handler_config(Name1),
+ ok = logger:set_handler_config(Name1,config,
+ #{handler_pid=>self(),
+ mode_tab=>erlang:make_ref()}),
+ {ok,C} = logger:get_handler_config(Name1),
+
ok = logger:remove_handler(Name1),
{error,{not_found,Name1}} = logger:remove_handler(Name1),
@@ -380,20 +413,25 @@ formatter_fail(cleanup,_Config) ->
ok.
config_fail(_Config) ->
- {error,{handler_not_added,{invalid_config,logger_disk_log_h,{bad,bad}}}} =
+ {error,{handler_not_added,{invalid_config,logger_disk_log_h,#{bad:=bad}}}} =
logger:add_handler(?MODULE,logger_disk_log_h,
#{config => #{bad => bad},
filter_default=>log,
formatter=>{?MODULE,self()}}),
- {error,{handler_not_added,{invalid_levels,{_,1,_}}}} =
+ {error,{handler_not_added,{invalid_config,logger_disk_log_h,
+ {invalid_levels,#{drop_mode_qlen:=1}}}}} =
logger:add_handler(?MODULE,logger_disk_log_h,
#{config => #{drop_mode_qlen=>1}}),
- {error,{handler_not_added,{invalid_levels,{43,42,_}}}} =
+ {error,{handler_not_added,{invalid_config,logger_disk_log_h,
+ {invalid_levels,#{sync_mode_qlen:=43,
+ drop_mode_qlen:=42}}}}} =
logger:add_handler(?MODULE,logger_disk_log_h,
#{config => #{sync_mode_qlen=>43,
drop_mode_qlen=>42}}),
- {error,{handler_not_added,{invalid_levels,{_,43,42}}}} =
+ {error,{handler_not_added,{invalid_config,logger_disk_log_h,
+ {invalid_levels,#{drop_mode_qlen:=43,
+ flush_qlen:=42}}}}} =
logger:add_handler(?MODULE,logger_disk_log_h,
#{config => #{drop_mode_qlen=>43,
flush_qlen=>42}}),
@@ -402,22 +440,19 @@ config_fail(_Config) ->
#{filter_default=>log,
formatter=>{?MODULE,self()}}),
%% can't change the disk log options for a log already in use
- {error,{illegal_config_change,_,_}} =
- logger:set_handler_config(?MODULE,config,
- #{max_no_files=>2}),
- %% can't change name of an existing handler
- {error,{illegal_config_change,_,_}} =
- logger:set_handler_config(?MODULE,id,bad),
+ {error,{illegal_config_change,logger_disk_log_h,_,_}} =
+ logger:update_handler_config(?MODULE,config,
+ #{max_no_files=>2}),
%% incorrect values of OP params
{ok,#{config := HConfig}} = logger:get_handler_config(?MODULE),
- {error,{invalid_levels,_}} =
- logger:set_handler_config(?MODULE,config,
- HConfig#{sync_mode_qlen=>100,
- flush_qlen=>99}),
+ {error,{invalid_config,logger_disk_log_h,{invalid_levels,_}}} =
+ logger:update_handler_config(?MODULE,config,
+ HConfig#{sync_mode_qlen=>100,
+ flush_qlen=>99}),
%% invalid name of config parameter
- {error,{invalid_config,logger_disk_log_h,{filesync_rep_int,2000}}} =
- logger:set_handler_config(?MODULE, config,
- HConfig#{filesync_rep_int => 2000}),
+ {error,{invalid_config,logger_disk_log_h,#{filesync_rep_int:=2000}}} =
+ logger:update_handler_config(?MODULE, config,
+ HConfig#{filesync_rep_int => 2000}),
ok.
config_fail(cleanup,_Config) ->
logger:remove_handler(?MODULE).
@@ -456,13 +491,30 @@ reconfig(Config) ->
overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL,
- log_opts := #{type := ?DISK_LOG_TYPE,
- max_no_files := ?DISK_LOG_MAX_NO_FILES,
- max_no_bytes := ?DISK_LOG_MAX_NO_BYTES,
- file := _DiskLogFile}} =
+ handler_state :=
+ #{log_opts := #{type := ?DISK_LOG_TYPE,
+ max_no_files := ?DISK_LOG_MAX_NO_FILES,
+ max_no_bytes := ?DISK_LOG_MAX_NO_BYTES,
+ file := DiskLogFile}}} =
logger_disk_log_h:info(?MODULE),
+ {ok,#{config :=
+ #{sync_mode_qlen := ?SYNC_MODE_QLEN,
+ drop_mode_qlen := ?DROP_MODE_QLEN,
+ flush_qlen := ?FLUSH_QLEN,
+ burst_limit_enable := ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable := ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
+ filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL,
+ file := DiskLogFile,
+ max_no_files := ?DISK_LOG_MAX_NO_FILES,
+ max_no_bytes := ?DISK_LOG_MAX_NO_BYTES,
+ type := wrap} = HConfig0}} =
+ logger:get_handler_config(?MODULE),
- {ok,#{config := HConfig0}} = logger:get_handler_config(?MODULE),
HConfig1 = HConfig0#{sync_mode_qlen => 1,
drop_mode_qlen => 2,
flush_qlen => 3,
@@ -488,6 +540,29 @@ reconfig(Config) ->
overload_kill_restart_after := infinity,
filesync_repeat_interval := no_repeat} =
logger_disk_log_h:info(?MODULE),
+ {ok,#{config:=HConfig1}} = logger:get_handler_config(?MODULE),
+
+ ok = logger:update_handler_config(?MODULE, config,
+ #{flush_qlen => ?FLUSH_QLEN}),
+ {ok,#{config:=C1}} = logger:get_handler_config(?MODULE),
+ ct:log("C1: ~p",[C1]),
+ C1 = HConfig1#{flush_qlen => ?FLUSH_QLEN},
+
+ ok = logger:set_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C2}} = logger:get_handler_config(?MODULE),
+ ct:log("C2: ~p",[C2]),
+ C2 = HConfig0#{sync_mode_qlen => 1},
+
+ ok = logger:set_handler_config(?MODULE, config, #{drop_mode_qlen => 100}),
+ {ok,#{config:=C3}} = logger:get_handler_config(?MODULE),
+ ct:log("C3: ~p",[C3]),
+ C3 = HConfig0#{drop_mode_qlen => 100},
+
+ ok = logger:update_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C4}} = logger:get_handler_config(?MODULE),
+ ct:log("C4: ~p",[C4]),
+ C4 = HConfig0#{sync_mode_qlen => 1,
+ drop_mode_qlen => 100},
ok = logger:remove_handler(?MODULE),
@@ -502,11 +577,49 @@ reconfig(Config) ->
max_no_files => 1,
max_no_bytes => 1024,
file => File}}),
- #{log_opts := #{type := halt,
- max_no_files := 1,
- max_no_bytes := 1024,
- file := File}} =
+ #{handler_state :=
+ #{log_opts := #{type := halt,
+ max_no_files := 1,
+ max_no_bytes := 1024,
+ file := File}}} =
logger_disk_log_h:info(?MODULE),
+ {ok,#{config :=
+ #{type := halt,
+ max_no_files := 1,
+ max_no_bytes := 1024,
+ file := File}=HaltHConfig} = Config2} =
+ logger:get_handler_config(?MODULE),
+
+ ok = logger:update_handler_config(?MODULE, level, notice),
+ {ok,C5} = logger:get_handler_config(?MODULE),
+ ct:log("C5: ~p",[C5]),
+ C5 = Config2#{level => notice},
+
+ ok = logger:set_handler_config(?MODULE, level, info),
+ {ok,C6} = logger:get_handler_config(?MODULE),
+ ct:log("C6: ~p",[C6]),
+ C6 = Config2#{level => info},
+
+ %% You are not allowed to actively set the write once fields
+ %% (type, max_no_files, max_no_bytes, file) in runtime.
+ {error, {illegal_config_change,_,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{type=>wrap}),
+ {error, {illegal_config_change,_,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{max_no_files=>2}),
+ {error, {illegal_config_change,_,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{max_no_bytes=>2048}),
+ {error, {illegal_config_change,_,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{file=>"otherfile.log"}),
+ {ok,C7} = logger:get_handler_config(?MODULE),
+ ct:log("C7: ~p",[C7]),
+ C7 = C6,
+
+ %% ... but if you don't specify the write once fields, then
+ %% set_handler_config shall NOT reset them to their default value
+ ok = logger:set_handler_config(?MODULE,config,#{sync_mode_qlen=>1}),
+ {ok,#{config:=C8}} = logger:get_handler_config(?MODULE),
+ ct:log("C8: ~p",[C8]),
+ C8 = HaltHConfig#{sync_mode_qlen=>1},
ok.
reconfig(cleanup, _Config) ->
@@ -523,20 +636,20 @@ sync(Config) ->
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,nl}}),
- start_tracer([{disk_log,blog,2},
- {logger_disk_log_h,disk_log_sync,2}],
- [{disk_log,blog,<<"first\n">>},
- {logger_disk_log_h,disk_log_sync}]),
+ start_tracer([{logger_disk_log_h,disk_log_write,3},
+ {disk_log,sync,1}],
+ [{logger_disk_log_h,disk_log_write,<<"first\n">>},
+ {disk_log,sync}]),
logger:notice("first", ?domain),
%% wait for automatic disk_log_sync
check_tracer(?FILESYNC_REPEAT_INTERVAL*2),
- %% check that if there's no repeated disk_log_sync active,
+ %% check that if there's no repeated filesync active,
%% a disk_log_sync is still performed when handler goes idle
{ok,#{config := HConfig}} = logger:get_handler_config(?MODULE),
HConfig1 = HConfig#{filesync_repeat_interval => no_repeat},
- ok = logger:set_handler_config(?MODULE, config, HConfig1),
+ ok = logger:update_handler_config(?MODULE, config, HConfig1),
no_repeat = maps:get(filesync_repeat_interval,
logger_disk_log_h:info(?MODULE)),
@@ -545,12 +658,12 @@ sync(Config) ->
%% triggered by the idle timeout between "fourth" and "fifth".
timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
- start_tracer([{disk_log,blog,2},
- {logger_disk_log_h,disk_log_sync,2}],
- [{disk_log,blog,<<"second\n">>},
- {logger_disk_log_h,disk_log_sync},
- {disk_log,blog,<<"third\n">>},
- {logger_disk_log_h,disk_log_sync}]),
+ start_tracer([{logger_disk_log_h,disk_log_write,3},
+ {disk_log,sync,1}],
+ [{logger_disk_log_h,disk_log_write,<<"second\n">>},
+ {disk_log,sync},
+ {logger_disk_log_h,disk_log_write,<<"third\n">>},
+ {disk_log,sync}]),
logger:notice("second", ?domain),
timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
@@ -560,22 +673,22 @@ sync(Config) ->
try_read_file(Log, {ok,<<"first\nsecond\nthird\n">>}, 1000),
- %% switch repeated disk_log_sync on and verify that the looping works
+ %% switch repeated filesync on and verify that the looping works
SyncInt = 1000,
WaitT = 4500,
- OneSync = {logger_disk_log_h,handle_cast,repeated_disk_log_sync},
- %% receive 1 initial repeated_disk_log_sync, then 1 per sec
- start_tracer([{logger_disk_log_h,handle_cast,2}],
- [OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]),
+ OneSync = {logger_h_common,handle_cast,repeated_filesync},
+ %% receive 1 repeated_filesync per sec
+ start_tracer([{logger_h_common,handle_cast,2}],
+ [OneSync || _ <- lists:seq(1, trunc(WaitT/SyncInt))]),
HConfig2 = HConfig#{filesync_repeat_interval => SyncInt},
- ok = logger:set_handler_config(?MODULE, config, HConfig2),
+ ok = logger:update_handler_config(?MODULE, config, HConfig2),
SyncInt = maps:get(filesync_repeat_interval,
logger_disk_log_h:info(?MODULE)),
timer:sleep(WaitT),
HConfig3 = HConfig#{filesync_repeat_interval => no_repeat},
- ok = logger:set_handler_config(?MODULE, config, HConfig3),
+ ok = logger:update_handler_config(?MODULE, config, HConfig3),
check_tracer(100),
ok.
sync(cleanup,_Config) ->
@@ -609,7 +722,7 @@ disk_log_wrap(Config) ->
end,
{ok,_} = dbg:tracer(process, {TraceFun, Tester}),
{ok,_} = dbg:p(whereis(h_proc_name()), [c]),
- {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 3, []),
Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,MaxBytes)],
ct:pal("String = ~p (~w)", [Text, erts_debug:size(Text)]),
@@ -627,7 +740,7 @@ disk_log_wrap(Config) ->
timer:sleep(1000),
dbg:stop_clear(),
Received = lists:flatmap(fun({trace,_M,handle_info,
- [{disk_log,_Node,_Name,What},_]}) ->
+ [_,{disk_log,_Node,_Name,What},_]}) ->
[{trace,What}];
({log,_}) ->
[]
@@ -663,7 +776,7 @@ disk_log_full(Config) ->
end,
{ok,_} = dbg:tracer(process, {TraceFun, Tester}),
{ok,_} = dbg:p(whereis(h_proc_name()), [c]),
- {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 3, []),
NoOfChars = 5,
Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,NoOfChars)],
@@ -673,14 +786,18 @@ disk_log_full(Config) ->
timer:sleep(2000),
dbg:stop_clear(),
Received = lists:flatmap(fun({trace,_M,handle_info,
- [{disk_log,_Node,_Name,What},_]}) ->
+ [_,{disk_log,_Node,_Name,What},_]}) ->
[{trace,What}];
({log,_}) ->
[]
end, test_server:messages_get()),
ct:pal("Trace =~n~p", [Received]),
- [{trace,full},
- {trace,{error_status,{error,{full,_}}}}] = Received,
+
+ %% The tail here could be an error_status notification, if the
+ %% last write was synchronous, but in most cases it will not be
+ [{trace,full}|_] = Received,
+ %% [{trace,full},
+ %% {trace,{error_status,{error,{full,_}}}}] = Received,
ok.
disk_log_full(cleanup, _Config) ->
dbg:stop_clear(),
@@ -712,14 +829,14 @@ disk_log_events(Config) ->
end,
{ok,_} = dbg:tracer(process, {TraceFun, Tester}),
{ok,_} = dbg:p(whereis(h_proc_name()), [c]),
- {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 3, []),
[whereis(h_proc_name()) ! E || E <- Events],
%% wait for trace messages
timer:sleep(2000),
dbg:stop_clear(),
Received = lists:map(fun({trace,_M,handle_info,
- [Got,_]}) -> Got
+ [_,Got,_]}) -> Got
end, test_server:messages_get()),
ct:pal("Trace =~n~p", [Received]),
NoOfEvents = length(Events),
@@ -742,13 +859,15 @@ write_failure(Config) ->
false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
- rpc:call(Node, ?MODULE, set_result, [disk_log_blog,ok]),
+ rpc:call(Node, ?MODULE, set_result, [disk_log_write,ok]),
HState = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]),
- ct:pal("LogOpts = ~p", [LogOpts = maps:get(log_opts, HState)]),
+ ct:pal("LogOpts = ~p", [LogOpts = maps:get(log_opts,
+ maps:get(handler_state,HState))]),
+ %% ?check and ?check_no_log in this test only check for internal log events
ok = log_on_remote_node(Node, "Logged1"),
rpc:call(Node, logger_disk_log_h, filesync, [?STANDARD_HANDLER]),
- ?check_no_log,
+ ?check_no_log, % no internal log when write ok
SyncRepInt = case (fun() -> is_atom(?FILESYNC_REPEAT_INTERVAL) end)() of
true -> 5500;
@@ -757,24 +876,26 @@ write_failure(Config) ->
try_read_file(Log, {ok,<<"Logged1\n">>}, SyncRepInt),
- rpc:call(Node, ?MODULE, set_result, [disk_log_blog,{error,no_such_log}]),
+ rpc:call(Node, ?MODULE, set_result, [disk_log_write,{error,no_such_log}]),
ok = log_on_remote_node(Node, "Cause simple error printout"),
+ %% this should have caused an internal log
?check({error,{?STANDARD_HANDLER,log,LogOpts,{error,no_such_log}}}),
-
+
ok = log_on_remote_node(Node, "No second error printout"),
- ?check_no_log,
+ ?check_no_log, % but don't log same error twice
- rpc:call(Node, ?MODULE, set_result, [disk_log_blog,
+ rpc:call(Node, ?MODULE, set_result, [disk_log_write,
{error,{full,?STANDARD_HANDLER}}]),
ok = log_on_remote_node(Node, "Cause simple error printout"),
+ %% this was a different error, so it should be logged
?check({error,{?STANDARD_HANDLER,log,LogOpts,
{error,{full,?STANDARD_HANDLER}}}}),
- rpc:call(Node, ?MODULE, set_result, [disk_log_blog,ok]),
+ rpc:call(Node, ?MODULE, set_result, [disk_log_write,ok]),
ok = log_on_remote_node(Node, "Logged2"),
rpc:call(Node, logger_disk_log_h, filesync, [?STANDARD_HANDLER]),
- ?check_no_log,
+ ?check_no_log, % no internal log when write ok
try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, SyncRepInt),
ok.
write_failure(cleanup, _Config) ->
@@ -794,10 +915,10 @@ sync_failure(Config) ->
rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]),
HState = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]),
- LogOpts = maps:get(log_opts, HState),
+ LogOpts = maps:get(log_opts, maps:get(handler_state,HState)),
SyncInt = 500,
- ok = rpc:call(Node, logger, set_handler_config,
+ ok = rpc:call(Node, logger, update_handler_config,
[?STANDARD_HANDLER, config,
#{filesync_repeat_interval => SyncInt}]),
Info = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]),
@@ -872,7 +993,7 @@ op_switch_to_sync(Config) ->
drop_mode_qlen => NumOfReqs+1,
flush_qlen => 2*NumOfReqs,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Lines = count_lines(Log),
NumOfReqs = Lines,
@@ -897,7 +1018,7 @@ op_switch_to_drop(Config) ->
drop_mode_qlen => 2,
flush_qlen => Procs*NumOfReqs*Bursts,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
%% It sometimes happens that the handler either gets
%% the requests in a slow enough pace so that dropping
%% never occurs. Therefore, lets generate a number of
@@ -943,7 +1064,7 @@ op_switch_to_flush(Config) ->
drop_mode_qlen => 300,
flush_qlen => 300,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 1500,
Procs = 10,
Bursts = 10,
@@ -985,7 +1106,7 @@ limit_burst_disabled(Config) ->
burst_limit_window_time => 2000,
drop_mode_qlen => 200,
flush_qlen => 300}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -1005,7 +1126,7 @@ limit_burst_enabled_one(Config) ->
burst_limit_window_time => 2000,
drop_mode_qlen => 200,
flush_qlen => 300}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -1026,7 +1147,7 @@ limit_burst_enabled_period(Config) ->
burst_limit_window_time => BurstTWin,
drop_mode_qlen => 20000,
flush_qlen => 20001}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
Windows = 3,
Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, notice),
@@ -1046,7 +1167,7 @@ kill_disabled(Config) ->
HConfig#{config=>DLHConfig#{overload_kill_enable=>false,
overload_kill_qlen=>10,
overload_kill_mem_size=>100}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -1068,7 +1189,7 @@ qlen_kill_new(Config) ->
overload_kill_qlen=>10,
overload_kill_mem_size=>Mem0+50000,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
MRef = erlang:monitor(process, Pid0),
NumOfReqs = 100,
Procs = 4,
@@ -1105,7 +1226,7 @@ mem_kill_new(Config) ->
overload_kill_qlen=>50000,
overload_kill_mem_size=>Mem0+500,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
MRef = erlang:monitor(process, Pid0),
NumOfReqs = 100,
Procs = 4,
@@ -1139,7 +1260,7 @@ restart_after(Config) ->
HConfig#{config=>DLHConfig#{overload_kill_enable=>true,
overload_kill_qlen=>10,
overload_kill_restart_after=>infinity}},
- ok = logger:set_handler_config(?MODULE, NewHConfig1),
+ ok = logger:update_handler_config(?MODULE, NewHConfig1),
MRef1 = erlang:monitor(process, whereis(h_proc_name())),
%% kill handler
send_burst({n,100}, {spawn,4,0}, {chars,79}, notice),
@@ -1161,7 +1282,7 @@ restart_after(Config) ->
HConfig#{config=>DLHConfig#{overload_kill_enable=>true,
overload_kill_qlen=>10,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig2),
+ ok = logger:update_handler_config(?MODULE, NewHConfig2),
Pid0 = whereis(h_proc_name()),
MRef2 = erlang:monitor(process, Pid0),
%% kill handler
@@ -1194,7 +1315,7 @@ handler_requests_under_load(Config) ->
drop_mode_qlen => 1000,
flush_qlen => 2000,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]},
{info,[]},
{reset,[]},
@@ -1227,9 +1348,9 @@ send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) ->
Result =
case Req of
change_config ->
- logger:set_handler_config(HName, logger_disk_log_h,
- #{overload_kill_enable =>
- false});
+ logger:update_handler_config(HName, logger_disk_log_h,
+ #{overload_kill_enable =>
+ false});
Func ->
logger_disk_log_h:Func(HName)
end,
@@ -1499,10 +1620,10 @@ tpl([{M,F,A}|Trace]) ->
tpl([]) ->
ok.
-tracer({trace,_,call,{logger_disk_log_h,handle_cast,[Op|_]},Caller},
+tracer({trace,_,call,{logger_h_common,handle_cast,[Op|_]},Caller},
{Pid,[{Mod,Func,Op}|Expected]}) ->
maybe_tracer_done(Pid,Expected,{Mod,Func,Op},Caller);
-tracer({trace,_,call,{Mod=disk_log,Func=blog,[_,Data]},Caller}, {Pid,[{Mod,Func,Data}|Expected]}) ->
+tracer({trace,_,call,{Mod=logger_disk_log_h,Func=disk_log_write,[_,_,Data]},Caller}, {Pid,[{Mod,Func,Data}|Expected]}) ->
maybe_tracer_done(Pid,Expected,{Mod,Func,Data},Caller);
tracer({trace,_,call,{Mod,Func,_},Caller}, {Pid,[{Mod,Func}|Expected]}) ->
maybe_tracer_done(Pid,Expected,{Mod,Func},Caller);
diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl
index 3426567bbf..eb17a6d857 100644
--- a/lib/kernel/test/logger_std_h_SUITE.erl
+++ b/lib/kernel/test/logger_std_h_SUITE.erl
@@ -108,6 +108,7 @@ all() ->
add_remove_instance_file1,
add_remove_instance_file2,
default_formatter,
+ filter_config,
errors,
formatter_fail,
config_fail,
@@ -135,11 +136,12 @@ all() ->
mem_kill_new,
mem_kill_std,
restart_after,
- handler_requests_under_load
+ handler_requests_under_load,
+ recreate_deleted_log
].
add_remove_instance_tty(_Config) ->
- {error,{handler_not_added,{invalid_config,logger_std_h,{type,tty}}}} =
+ {error,{handler_not_added,{invalid_config,logger_std_h,#{type:=tty}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{type => tty},
filter_default=>log,
@@ -204,6 +206,20 @@ default_formatter(_Config) ->
match = re:run(Msg,"=NOTICE REPORT====.*\n"++M1,[{capture,none}]),
ok.
+filter_config(_Config) ->
+ ok = logger:add_handler(?MODULE,logger_std_h,#{}),
+ {ok,#{config:=HConfig}=Config} = logger:get_handler_config(?MODULE),
+ HConfig = maps:without([handler_pid,mode_tab],HConfig),
+
+ FakeFullHConfig = HConfig#{handler_pid=>self(),mode_tab=>erlang:make_ref()},
+ #{config:=HConfig} =
+ logger_std_h:filter_config(Config#{config=>FakeFullHConfig}),
+ ok.
+
+filter_config(cleanup,_Config) ->
+ logger:remove_handler(?MODULE),
+ ok.
+
errors(Config) ->
Dir = ?config(priv_dir,Config),
Log = filename:join(Dir,?FUNCTION_NAME),
@@ -219,7 +235,7 @@ errors(Config) ->
{error,
{handler_not_added,
- {invalid_config,logger_std_h,{type,faulty_type}}}} =
+ {invalid_config,logger_std_h,#{type:=faulty_type}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{type => faulty_type}}),
@@ -293,25 +309,30 @@ formatter_fail(cleanup,_Config) ->
logger:remove_handler(?MODULE).
config_fail(_Config) ->
- {error,{handler_not_added,{invalid_config,logger_std_h,{bad,bad}}}} =
+ {error,{handler_not_added,{invalid_config,logger_std_h,#{bad:=bad}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{bad => bad},
filter_default=>log,
formatter=>{?MODULE,self()}}),
{error,{handler_not_added,{invalid_config,logger_std_h,
- {restart_type,bad}}}} =
+ #{restart_type:=bad}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{restart_type => bad},
filter_default=>log,
formatter=>{?MODULE,self()}}),
- {error,{handler_not_added,{invalid_levels,{_,1,_}}}} =
+ {error,{handler_not_added,{invalid_config,logger_std_h,
+ {invalid_levels,#{drop_mode_qlen:=1}}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{drop_mode_qlen=>1}}),
- {error,{handler_not_added,{invalid_levels,{43,42,_}}}} =
+ {error,{handler_not_added,{invalid_config,logger_std_h,
+ {invalid_levels,#{sync_mode_qlen:=43,
+ drop_mode_qlen:=42}}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{sync_mode_qlen=>43,
drop_mode_qlen=>42}}),
- {error,{handler_not_added,{invalid_levels,{_,43,42}}}} =
+ {error,{handler_not_added,{invalid_config,logger_std_h,
+ {invalid_levels,#{drop_mode_qlen:=43,
+ flush_qlen:=42}}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{drop_mode_qlen=>43,
flush_qlen=>42}}),
@@ -319,18 +340,26 @@ config_fail(_Config) ->
ok = logger:add_handler(?MODULE,logger_std_h,
#{filter_default=>log,
formatter=>{?MODULE,self()}}),
- {error,{illegal_config_change,_,_}} =
+ {error,{illegal_config_change,logger_std_h,#{type:=_},#{type:=_}}} =
logger:set_handler_config(?MODULE,config,
#{type=>{file,"file"}}),
- {error,{illegal_config_change,_,_}} =
- logger:set_handler_config(?MODULE,id,bad),
- {error,{invalid_levels,_}} =
+
+ {error,{invalid_config,logger_std_h,{invalid_levels,_}}} =
logger:set_handler_config(?MODULE,config,
#{sync_mode_qlen=>100,
flush_qlen=>99}),
- {error,{invalid_config,logger_std_h,{filesync_rep_int,2000}}} =
+ {error,{invalid_config,logger_std_h,#{filesync_rep_int:=2000}}} =
logger:set_handler_config(?MODULE, config,
#{filesync_rep_int => 2000}),
+
+ %% Read-only fields may (accidentially) be included in the change,
+ %% but it won't take effect
+ {ok,C} = logger:get_handler_config(?MODULE),
+ ok = logger:set_handler_config(?MODULE,config,
+ #{handler_pid=>self(),
+ mode_tab=>erlang:make_ref()}),
+ {ok,C} = logger:get_handler_config(?MODULE),
+
ok.
config_fail(cleanup,_Config) ->
@@ -445,8 +474,8 @@ reconfig(Config) ->
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,self()}}),
#{id := ?MODULE,
- type := standard_io,
- file_ctrl_pid := FileCtrlPid,
+ handler_state := #{type := standard_io,
+ file_ctrl_pid := FileCtrlPid},
sync_mode_qlen := ?SYNC_MODE_QLEN,
drop_mode_qlen := ?DROP_MODE_QLEN,
flush_qlen := ?FLUSH_QLEN,
@@ -457,9 +486,26 @@ reconfig(Config) ->
overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
- filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL} =
+ filesync_repeat_interval := no_repeat} = DefaultInfo =
logger_std_h:info(?MODULE),
+ {ok,
+ #{config:=
+ #{type := standard_io,
+ sync_mode_qlen := ?SYNC_MODE_QLEN,
+ drop_mode_qlen := ?DROP_MODE_QLEN,
+ flush_qlen := ?FLUSH_QLEN,
+ burst_limit_enable := ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable := ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
+ filesync_repeat_interval := no_repeat} =
+ DefaultHConf}}
+ = logger:get_handler_config(?MODULE),
+
ok = logger:set_handler_config(?MODULE, config,
#{sync_mode_qlen => 1,
drop_mode_qlen => 2,
@@ -471,10 +517,10 @@ reconfig(Config) ->
overload_kill_qlen => 100000,
overload_kill_mem_size => 10000000,
overload_kill_restart_after => infinity,
- filesync_repeat_interval => no_repeat}),
+ filesync_repeat_interval => 5000}),
#{id := ?MODULE,
- type := standard_io,
- file_ctrl_pid := FileCtrlPid,
+ handler_state := #{type := standard_io,
+ file_ctrl_pid := FileCtrlPid},
sync_mode_qlen := 1,
drop_mode_qlen := 2,
flush_qlen := 3,
@@ -485,7 +531,77 @@ reconfig(Config) ->
overload_kill_qlen := 100000,
overload_kill_mem_size := 10000000,
overload_kill_restart_after := infinity,
- filesync_repeat_interval := no_repeat} = logger_std_h:info(?MODULE),
+ filesync_repeat_interval := no_repeat} = Info = logger_std_h:info(?MODULE),
+
+ {ok,#{config :=
+ #{type := standard_io,
+ sync_mode_qlen := 1,
+ drop_mode_qlen := 2,
+ flush_qlen := 3,
+ burst_limit_enable := false,
+ burst_limit_max_count := 10,
+ burst_limit_window_time := 10,
+ overload_kill_enable := true,
+ overload_kill_qlen := 100000,
+ overload_kill_mem_size := 10000000,
+ overload_kill_restart_after := infinity,
+ filesync_repeat_interval := no_repeat} = HConf}} =
+ logger:get_handler_config(?MODULE),
+
+ ok = logger:update_handler_config(?MODULE, config,
+ #{flush_qlen => ?FLUSH_QLEN}),
+ {ok,#{config:=C1}} = logger:get_handler_config(?MODULE),
+ ct:log("C1: ~p",[C1]),
+ C1 = HConf#{flush_qlen => ?FLUSH_QLEN},
+
+ ok = logger:set_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C2}} = logger:get_handler_config(?MODULE),
+ ct:log("C2: ~p",[C2]),
+ C2 = DefaultHConf#{sync_mode_qlen => 1},
+
+ ok = logger:set_handler_config(?MODULE, config, #{drop_mode_qlen => 100}),
+ {ok,#{config:=C3}} = logger:get_handler_config(?MODULE),
+ ct:log("C3: ~p",[C3]),
+ C3 = DefaultHConf#{drop_mode_qlen => 100},
+
+ ok = logger:update_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C4}} = logger:get_handler_config(?MODULE),
+ ct:log("C4: ~p",[C4]),
+ C4 = DefaultHConf#{sync_mode_qlen => 1,
+ drop_mode_qlen => 100},
+
+ ok = logger:remove_handler(?MODULE),
+
+ File = filename:join(Dir,lists:concat([?FUNCTION_NAME,".log"])),
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => {file,File}},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+
+ {ok,#{config:=#{filesync_repeat_interval:=FSI}=FileHConfig}} =
+ logger:get_handler_config(?MODULE),
+ ok = logger:update_handler_config(?MODULE,config,
+ #{filesync_repeat_interval=>FSI+2000}),
+ {ok,#{config:=C5}} = logger:get_handler_config(?MODULE),
+ ct:log("C5: ~p",[C5]),
+ C5 = FileHConfig#{filesync_repeat_interval=>FSI+2000},
+
+ %% You are not allowed to actively set 'type' in runtime, since
+ %% this is a write once field.
+ {error, {illegal_config_change,logger_std_h,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{type=>standard_io}),
+ {ok,#{config:=C6}} = logger:get_handler_config(?MODULE),
+ ct:log("C6: ~p",[C6]),
+ C6 = C5,
+
+ %% ... but if you don't specify 'type', then set_handler_config shall
+ %% NOT reset it to its default value
+ ok = logger:set_handler_config(?MODULE,config,#{sync_mode_qlen=>1}),
+ {ok,#{config:=C7}} = logger:get_handler_config(?MODULE),
+ ct:log("C7: ~p",[C7]),
+ C7 = FileHConfig#{sync_mode_qlen=>1},
ok.
reconfig(cleanup, _Config) ->
@@ -510,7 +626,7 @@ file_opts(Config) ->
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,self()}}),
- #{type := OkType} = logger_std_h:info(?MODULE),
+ #{handler_state := #{type := OkType}} = logger_std_h:info(?MODULE),
logger:notice(M1=?msg,?domain),
?check(M1),
B1 = ?bin(M1),
@@ -533,10 +649,8 @@ sync(Config) ->
%% check repeated filesync happens
start_tracer([{logger_std_h, write_to_dev, 5},
- {logger_std_h, sync_dev, 4},
{file, datasync, 1}],
[{logger_std_h, write_to_dev, <<"first\n">>},
- {logger_std_h, sync_dev},
{file,datasync}]),
logger:notice("first", ?domain),
@@ -545,10 +659,8 @@ sync(Config) ->
%% check that explicit filesync is only done once
start_tracer([{logger_std_h, write_to_dev, 5},
- {logger_std_h, sync_dev, 4},
{file, datasync, 1}],
[{logger_std_h, write_to_dev, <<"second\n">>},
- {logger_std_h, sync_dev},
{file,datasync},
{no_more,500}
]),
@@ -561,21 +673,18 @@ sync(Config) ->
%% check that if there's no repeated filesync active,
%% a filesync is still performed when handler goes idle
- logger:set_handler_config(?MODULE, config,
- #{filesync_repeat_interval => no_repeat}),
+ ok = logger:update_handler_config(?MODULE, config,
+ #{filesync_repeat_interval => no_repeat}),
no_repeat = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)),
%% The following timer is to make sure the time from last log
%% ("second") to next ("third") is long enough, so the a flush is
%% triggered by the idle timeout between "thrid" and "fourth".
timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
start_tracer([{logger_std_h, write_to_dev, 5},
- {logger_std_h, sync_dev, 4},
{file, datasync, 1}],
[{logger_std_h, write_to_dev, <<"third\n">>},
- {logger_std_h, sync_dev},
{file,datasync},
{logger_std_h, write_to_dev, <<"fourth\n">>},
- {logger_std_h, sync_dev},
{file,datasync}]),
logger:notice("third", ?domain),
%% wait for automatic filesync
@@ -587,17 +696,17 @@ sync(Config) ->
%% switch repeated filesync on and verify that the looping works
SyncInt = 1000,
WaitT = 4500,
- OneSync = {logger_std_h,handle_cast,repeated_filesync},
- %% receive 1 initial repeated_filesync, then 1 per sec
- start_tracer([{logger_std_h,handle_cast,2}],
- [OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]),
+ OneSync = {logger_h_common,handle_cast,repeated_filesync},
+ %% receive 1 repeated_filesync per sec
+ start_tracer([{logger_h_common,handle_cast,2}],
+ [OneSync || _ <- lists:seq(1, trunc(WaitT/SyncInt))]),
- logger:set_handler_config(?MODULE, config,
- #{filesync_repeat_interval => SyncInt}),
+ ok = logger:update_handler_config(?MODULE, config,
+ #{filesync_repeat_interval => SyncInt}),
SyncInt = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)),
timer:sleep(WaitT),
- logger:set_handler_config(?MODULE, config,
- #{filesync_repeat_interval => no_repeat}),
+ ok = logger:update_handler_config(?MODULE, config,
+ #{filesync_repeat_interval => no_repeat}),
check_tracer(100),
ok.
sync(cleanup, _Config) ->
@@ -652,7 +761,7 @@ sync_failure(Config) ->
rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]),
SyncInt = 500,
- ok = rpc:call(Node, logger, set_handler_config,
+ ok = rpc:call(Node, logger, update_handler_config,
[?STANDARD_HANDLER, config,
#{filesync_repeat_interval => SyncInt}]),
Info = rpc:call(Node, logger_std_h, info, [?STANDARD_HANDLER]),
@@ -718,7 +827,7 @@ op_switch_to_sync_file(Config) ->
drop_mode_qlen => NumOfReqs+1,
flush_qlen => 2*NumOfReqs,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
%% TRecvPid = start_op_trace(),
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Lines = count_lines(Log),
@@ -747,7 +856,7 @@ op_switch_to_sync_tty(Config) ->
drop_mode_qlen => NumOfReqs+1,
flush_qlen => 2*NumOfReqs,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
ok.
op_switch_to_sync_tty(cleanup, _Config) ->
@@ -770,7 +879,7 @@ op_switch_to_drop_file(Config) ->
flush_qlen =>
Procs*NumOfReqs*Bursts,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
%% It sometimes happens that the handler gets the
%% requests in a slow enough pace so that dropping
%% never occurs. Therefore, lets generate a number of
@@ -807,7 +916,7 @@ op_switch_to_drop_tty(Config) ->
flush_qlen =>
Procs*NumOfReqs+1,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
ok.
op_switch_to_drop_tty(cleanup, _Config) ->
@@ -832,7 +941,7 @@ op_switch_to_flush_file(Config) ->
drop_mode_qlen => 300,
flush_qlen => 300,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 1500,
Procs = 10,
Bursts = 10,
@@ -879,7 +988,7 @@ op_switch_to_flush_tty(Config) ->
drop_mode_qlen => 100,
flush_qlen => 100,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 1000,
Procs = 100,
send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
@@ -895,7 +1004,7 @@ limit_burst_disabled(Config) ->
burst_limit_window_time => 2000,
drop_mode_qlen => 200,
flush_qlen => 300}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -915,7 +1024,7 @@ limit_burst_enabled_one(Config) ->
burst_limit_window_time => 2000,
drop_mode_qlen => 200,
flush_qlen => 300}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -936,7 +1045,7 @@ limit_burst_enabled_period(Config) ->
burst_limit_window_time => BurstTWin,
drop_mode_qlen => 20000,
flush_qlen => 20001}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
Windows = 3,
Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, notice),
@@ -956,7 +1065,7 @@ kill_disabled(Config) ->
HConfig#{config=>StdHConfig#{overload_kill_enable=>false,
overload_kill_qlen=>10,
overload_kill_mem_size=>100}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -977,7 +1086,7 @@ qlen_kill_new(Config) ->
overload_kill_qlen=>10,
overload_kill_mem_size=>Mem0+50000,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
MRef = erlang:monitor(process, Pid0),
NumOfReqs = 100,
Procs = 4,
@@ -1011,7 +1120,7 @@ qlen_kill_std(_Config) ->
%% File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]),
%% Log = filename:join(Dir, File),
%% Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log),
- %% ok = rpc:call(Node, logger, set_handler_config,
+ %% ok = rpc:call(Node, logger, update_handler_config,
%% [?STANDARD_HANDLER, config,
%% #{overload_kill_enable=>true,
%% overload_kill_qlen=>10,
@@ -1028,7 +1137,7 @@ mem_kill_new(Config) ->
overload_kill_qlen=>50000,
overload_kill_mem_size=>Mem0+500,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
MRef = erlang:monitor(process, Pid0),
NumOfReqs = 100,
Procs = 4,
@@ -1067,7 +1176,7 @@ restart_after(Config) ->
HConfig#{config=>StdHConfig#{overload_kill_enable=>true,
overload_kill_qlen=>10,
overload_kill_restart_after=>infinity}},
- ok = logger:set_handler_config(?MODULE, NewHConfig1),
+ ok = logger:update_handler_config(?MODULE, NewHConfig1),
MRef1 = erlang:monitor(process, whereis(h_proc_name())),
%% kill handler
send_burst({n,100}, {spawn,4,0}, {chars,79}, notice),
@@ -1082,14 +1191,15 @@ restart_after(Config) ->
ct:pal("Handler state = ~p", [Info1]),
ct:fail("Handler not dead! It should not have survived this!")
end,
-
+
{Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER,
+
NewHConfig2 =
HConfig#{config=>StdHConfig#{overload_kill_enable=>true,
overload_kill_qlen=>10,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig2),
+ ok = logger:update_handler_config(?MODULE, NewHConfig2),
Pid0 = whereis(h_proc_name()),
MRef2 = erlang:monitor(process, Pid0),
%% kill handler
@@ -1123,7 +1233,7 @@ handler_requests_under_load(Config) ->
drop_mode_qlen => 1000,
flush_qlen => 2000,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]},
{info,[]},
{reset,[]},
@@ -1146,6 +1256,22 @@ handler_requests_under_load(Config) ->
handler_requests_under_load(cleanup, _Config) ->
ok = stop_handler(?MODULE).
+recreate_deleted_log(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ logger:notice("first",?domain),
+ logger_std_h:filesync(?MODULE),
+ ok = file:rename(Log,Log++".old"),
+ logger:notice("second",?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,<<"first\n">>} = file:read_file(Log++".old"),
+ {ok,<<"second\n">>} = file:read_file(Log),
+ ok.
+recreate_deleted_log(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+%%%-----------------------------------------------------------------
+%%%
send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) ->
receive
{From,finish} ->
@@ -1155,9 +1281,9 @@ send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) ->
Result =
case Req of
change_config ->
- logger:set_handler_config(HName, config,
- #{overload_kill_enable =>
- false});
+ logger:update_handler_config(HName, config,
+ #{overload_kill_enable =>
+ false});
Func ->
logger_std_h:Func(HName)
end,
@@ -1167,8 +1293,8 @@ send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) ->
%%%-----------------------------------------------------------------
%%%
-start_handler(Name, TTY, Config) when TTY == standard_io;
- TTY == standard_error->
+start_handler(Name, TTY, _Config) when TTY == standard_io;
+ TTY == standard_error->
ok = logger:add_handler(Name,
logger_std_h,
#{config => #{type => TTY},
@@ -1422,7 +1548,7 @@ start_op_trace() ->
{ok,_} = dbg:p(self(), [c]),
MS1 = dbg:fun2ms(fun([_]) -> return_trace() end),
- {ok,_} = dbg:tp(logger_h_common, check_load, 1, MS1),
+ {ok,_} = dbg:tpl(logger_h_common, check_load, 1, MS1),
{ok,_} = dbg:tpl(logger_h_common, flush_log_requests, 2, []),
@@ -1496,7 +1622,9 @@ analyse(Msgs) ->
start_tracer(Trace,Expected) ->
Pid = self(),
- FileCtrlPid = maps:get(file_ctrl_pid, logger_std_h:info(?MODULE)),
+ FileCtrlPid = maps:get(file_ctrl_pid,
+ maps:get(handler_state,
+ logger_std_h:info(?MODULE))),
dbg:tracer(process,{fun tracer/2,{Pid,Expected}}),
dbg:p(whereis(h_proc_name()),[c]),
dbg:p(FileCtrlPid,[c]),
@@ -1517,7 +1645,7 @@ tpl([{M,F,A}|Trace]) ->
tpl([]) ->
ok.
-tracer({trace,_,call,{logger_std_h,handle_cast,[Op|_]}},
+tracer({trace,_,call,{logger_h_common,handle_cast,[Op|_]}},
{Pid,[{Mod,Func,Op}|Expected]}) ->
maybe_tracer_done(Pid,Expected,{Mod,Func,Op});
tracer({trace,_,call,{Mod=logger_std_h,Func=write_to_dev,[_,Data,_,_,_]}},
diff --git a/lib/kernel/test/sendfile_SUITE.erl b/lib/kernel/test/sendfile_SUITE.erl
index 0c0b1cbcb6..ad060aa05c 100644
--- a/lib/kernel/test/sendfile_SUITE.erl
+++ b/lib/kernel/test/sendfile_SUITE.erl
@@ -341,7 +341,21 @@ t_sendfile_closeduring(Config) ->
-1
end,
- ok = sendfile_send({127,0,0,1}, Send, 0).
+ ok = sendfile_send({127,0,0,1}, Send, 0, [{active,false}]),
+ [] = flush(),
+ ok = sendfile_send({127,0,0,1}, Send, 0, [{active,true}]),
+ [] = flush(),
+ ok.
+
+flush() ->
+ lists:reverse(flush([])).
+
+flush(Acc) ->
+ receive M ->
+ flush([M | Acc])
+ after 0 ->
+ Acc
+ end.
t_sendfile_crashduring(Config) ->
Filename = proplists:get_value(big_file, Config),
@@ -409,12 +423,16 @@ sendfile_send(Send) ->
sendfile_send(Host, Send) ->
sendfile_send(Host, Send, []).
sendfile_send(Host, Send, Orig) ->
+ sendfile_send(Host, Send, Orig, [{active,false}]).
+
+sendfile_send(Host, Send, Orig, SockOpts) ->
+
SFServer = spawn_link(?MODULE, sendfile_server, [self(), Orig]),
receive
{server, Port} ->
- {ok, Sock} = gen_tcp:connect(Host, Port,
- [binary,{packet,0},
- {active,false}]),
+ Opts = [binary,{packet,0}|SockOpts],
+ io:format("connect with opts = ~p\n", [Opts]),
+ {ok, Sock} = gen_tcp:connect(Host, Port, Opts),
Data = case proplists:get_value(arity,erlang:fun_info(Send)) of
1 ->
Send(Sock);
diff --git a/lib/kernel/test/seq_trace_SUITE.erl b/lib/kernel/test/seq_trace_SUITE.erl
index cf4bf11328..ee8f4e94f8 100644
--- a/lib/kernel/test/seq_trace_SUITE.erl
+++ b/lib/kernel/test/seq_trace_SUITE.erl
@@ -25,7 +25,8 @@
-export([token_set_get/1, tracer_set_get/1, print/1,
send/1, distributed_send/1, recv/1, distributed_recv/1,
trace_exit/1, distributed_exit/1, call/1, port/1,
- match_set_seq_token/1, gc_seq_token/1, label_capability_mismatch/1]).
+ match_set_seq_token/1, gc_seq_token/1, label_capability_mismatch/1,
+ send_literal/1]).
%% internal exports
-export([simple_tracer/2, one_time_receiver/0, one_time_receiver/1,
@@ -44,7 +45,7 @@ suite() ->
{timetrap,{minutes,1}}].
all() ->
- [token_set_get, tracer_set_get, print, send,
+ [token_set_get, tracer_set_get, print, send, send_literal,
distributed_send, recv, distributed_recv, trace_exit,
distributed_exit, call, port, match_set_seq_token,
gc_seq_token, label_capability_mismatch].
@@ -158,23 +159,51 @@ do_print(TsType) ->
{0,{print,_,_,[],print3}, Ts1}] = stop_tracer(2),
check_ts(TsType, Ts0),
check_ts(TsType, Ts1).
-
+
send(Config) when is_list(Config) ->
lists:foreach(fun do_send/1, ?TIMESTAMP_MODES).
do_send(TsType) ->
+ do_send(TsType, send).
+
+do_send(TsType, Msg) ->
seq_trace:reset_trace(),
start_tracer(),
Receiver = spawn(?MODULE,one_time_receiver,[]),
Label = make_ref(),
seq_trace:set_token(label,Label),
set_token_flags([send, TsType]),
- Receiver ! send,
+ Receiver ! Msg,
Self = self(),
seq_trace:reset_trace(),
- [{Label,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1),
+ [{Label,{send,_,Self,Receiver,Msg}, Ts}] = stop_tracer(1),
check_ts(TsType, Ts).
+%% This testcase tests that we do not segfault when we have a
+%% literal as the message and the message is copied onto the
+%% heap during a GC.
+send_literal(Config) when is_list(Config) ->
+ lists:foreach(fun do_send_literal/1,
+ [atom, make_ref(), ets:new(hej,[]), 1 bsl 64,
+ "gurka", {tuple,test,with,#{}}, #{}]).
+
+do_send_literal(Msg) ->
+ N = 10000,
+ seq_trace:reset_trace(),
+ start_tracer(),
+ Label = make_ref(),
+ seq_trace:set_token(label,Label),
+ set_token_flags([send, 'receive', no_timestamp]),
+ Receiver = spawn_link(fun() -> receive ok -> ok end end),
+ [Receiver ! Msg || _ <- lists:seq(1, N)],
+ erlang:garbage_collect(Receiver),
+ [Receiver ! Msg || _ <- lists:seq(1, N)],
+ erlang:garbage_collect(Receiver),
+ Self = self(),
+ seq_trace:reset_trace(),
+ [{Label,{send,_,Self,Receiver,Msg}, Ts} | _] = stop_tracer(N),
+ check_ts(no_timestamp, Ts).
+
distributed_send(Config) when is_list(Config) ->
lists:foreach(fun do_distributed_send/1, ?TIMESTAMP_MODES).
diff --git a/lib/kernel/vsn.mk b/lib/kernel/vsn.mk
index df95174c9f..35e6a16a49 100644
--- a/lib/kernel/vsn.mk
+++ b/lib/kernel/vsn.mk
@@ -1 +1 @@
-KERNEL_VSN = 6.1
+KERNEL_VSN = 6.1.1
diff --git a/lib/observer/src/Makefile b/lib/observer/src/Makefile
index ff2bcbdb99..f9f239db37 100644
--- a/lib/observer/src/Makefile
+++ b/lib/observer/src/Makefile
@@ -50,6 +50,7 @@ MODULES= \
cdv_mem_cb \
cdv_mod_cb \
cdv_multi_wx \
+ cdv_persistent_cb \
cdv_port_cb \
cdv_proc_cb \
cdv_sched_cb \
diff --git a/lib/observer/src/cdv_html_wx.erl b/lib/observer/src/cdv_html_wx.erl
index d9efa7fc2f..ffef83227c 100644
--- a/lib/observer/src/cdv_html_wx.erl
+++ b/lib/observer/src/cdv_html_wx.erl
@@ -33,13 +33,17 @@
{panel,
app, %% which tool is the user
expand_table,
- expand_wins=[]}).
+ expand_wins=[],
+ delayed_fetch,
+ trunc_warn=[]}).
start_link(ParentWin, Info) ->
wx_object:start_link(?MODULE, [ParentWin, Info], []).
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+init([ParentWin, Callback]) when is_atom(Callback) ->
+ init(ParentWin, Callback);
init([ParentWin, {App, Fun}]) when is_function(Fun) ->
init([ParentWin, {App, Fun()}]);
init([ParentWin, {expand,HtmlText,Tab}]) ->
@@ -60,9 +64,29 @@ init(ParentWin, HtmlText, Tab, App) ->
wx_misc:endBusyCursor(),
{HtmlWin, #state{panel=HtmlWin,expand_table=Tab,app=App}}.
+init(ParentWin, Callback) ->
+ {HtmlWin, State} = init(ParentWin, "", undefined, cdv),
+ {HtmlWin, State#state{delayed_fetch=Callback}}.
+
%%%%%%%%%%%%%%%%%%%%%%% Callbacks %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+handle_info(active, #state{panel=HtmlWin,delayed_fetch=Callback}=State)
+ when Callback=/=undefined ->
+ observer_lib:display_progress_dialog(HtmlWin,
+ "Crashdump Viewer",
+ "Reading data"),
+ {{expand,HtmlText,Tab},TW} = Callback:get_info(),
+ observer_lib:sync_destroy_progress_dialog(),
+ wx_misc:beginBusyCursor(),
+ wxHtmlWindow:setPage(HtmlWin,HtmlText),
+ cdv_wx:set_status(TW),
+ wx_misc:endBusyCursor(),
+ {noreply, State#state{expand_table=Tab,
+ delayed_fetch=undefined,
+ trunc_warn=TW}};
+
handle_info(active, State) ->
+ cdv_wx:set_status(State#state.trunc_warn),
{noreply, State};
handle_info(Info, State) ->
diff --git a/lib/observer/src/cdv_persistent_cb.erl b/lib/observer/src/cdv_persistent_cb.erl
new file mode 100644
index 0000000000..d5da18f7fc
--- /dev/null
+++ b/lib/observer/src/cdv_persistent_cb.erl
@@ -0,0 +1,32 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+
+-module(cdv_persistent_cb).
+
+-export([get_info/0]).
+
+-include_lib("wx/include/wx.hrl").
+
+get_info() ->
+ Tab = ets:new(pt_expand,[set,public]),
+ {ok,PT,TW} = crashdump_viewer:persistent_terms(),
+ {{expand,
+ observer_html_lib:expandable_term("Persistent Terms",PT,Tab),
+ Tab},
+ TW}.
diff --git a/lib/observer/src/cdv_wx.erl b/lib/observer/src/cdv_wx.erl
index 78a897111c..1e9cef8952 100644
--- a/lib/observer/src/cdv_wx.erl
+++ b/lib/observer/src/cdv_wx.erl
@@ -51,6 +51,7 @@
-define(DIST_STR, "Nodes").
-define(MOD_STR, "Modules").
-define(MEM_STR, "Memory").
+-define(PERSISTENT_STR, "Persistent Terms").
-define(INT_STR, "Internal Tables").
%% Records
@@ -74,6 +75,7 @@
dist_panel,
mod_panel,
mem_panel,
+ persistent_panel,
int_panel,
active_tab
}).
@@ -193,6 +195,10 @@ setup(#state{frame=Frame, notebook=Notebook}=State) ->
%% Memory Panel
MemPanel = add_page(Notebook, ?MEM_STR, cdv_multi_wx, cdv_mem_cb),
+ %% Persistent Terms Panel
+ PersistentPanel = add_page(Notebook, ?PERSISTENT_STR,
+ cdv_html_wx, cdv_persistent_cb),
+
%% Memory Panel
IntPanel = add_page(Notebook, ?INT_STR, cdv_multi_wx, cdv_int_tab_cb),
@@ -215,6 +221,7 @@ setup(#state{frame=Frame, notebook=Notebook}=State) ->
dist_panel = DistPanel,
mod_panel = ModPanel,
mem_panel = MemPanel,
+ persistent_panel = PersistentPanel,
int_panel = IntPanel,
active_tab = GenPid
}}.
@@ -250,6 +257,7 @@ handle_event(#wx{id = ?wxID_OPEN,
State#state.dist_panel,
State#state.mod_panel,
State#state.mem_panel,
+ State#state.persistent_panel,
State#state.int_panel],
_ = [wx_object:call(Panel,new_dump) || Panel<-Panels],
wxNotebook:setSelection(State#state.notebook,0),
@@ -343,8 +351,8 @@ check_page_title(Notebook) ->
get_active_pid(#state{notebook=Notebook, gen_panel=Gen, pro_panel=Pro,
port_panel=Ports, ets_panel=Ets, timer_panel=Timers,
fun_panel=Funs, atom_panel=Atoms, dist_panel=Dist,
- mod_panel=Mods, mem_panel=Mem, int_panel=Int,
- sched_panel=Sched
+ mod_panel=Mods, mem_panel=Mem, persistent_panel=Persistent,
+ int_panel=Int, sched_panel=Sched
}) ->
Panel = case check_page_title(Notebook) of
?GEN_STR -> Gen;
@@ -358,6 +366,7 @@ get_active_pid(#state{notebook=Notebook, gen_panel=Gen, pro_panel=Pro,
?DIST_STR -> Dist;
?MOD_STR -> Mods;
?MEM_STR -> Mem;
+ ?PERSISTENT_STR -> Persistent;
?INT_STR -> Int
end,
wx_object:get_pid(Panel).
@@ -365,7 +374,7 @@ get_active_pid(#state{notebook=Notebook, gen_panel=Gen, pro_panel=Pro,
pid2panel(Pid, #state{gen_panel=Gen, pro_panel=Pro, port_panel=Ports,
ets_panel=Ets, timer_panel=Timers, fun_panel=Funs,
atom_panel=Atoms, dist_panel=Dist, mod_panel=Mods,
- mem_panel=Mem, int_panel=Int}) ->
+ mem_panel=Mem, persistent_panel=Persistent, int_panel=Int}) ->
case Pid of
Gen -> ?GEN_STR;
Pro -> ?PRO_STR;
@@ -377,6 +386,7 @@ pid2panel(Pid, #state{gen_panel=Gen, pro_panel=Pro, port_panel=Ports,
Dist -> ?DIST_STR;
Mods -> ?MOD_STR;
Mem -> ?MEM_STR;
+ ?PERSISTENT_STR -> Persistent;
Int -> ?INT_STR;
_ -> "unknown"
end.
diff --git a/lib/observer/src/crashdump_viewer.erl b/lib/observer/src/crashdump_viewer.erl
index 14b086ff58..97bb344cbf 100644
--- a/lib/observer/src/crashdump_viewer.erl
+++ b/lib/observer/src/crashdump_viewer.erl
@@ -74,6 +74,7 @@
loaded_modules/0,
loaded_mod_details/1,
memory/0,
+ persistent_terms/0,
allocated_areas/0,
allocator_info/0,
hash_tables/0,
@@ -139,6 +140,7 @@
-define(node,node).
-define(not_connected,not_connected).
-define(old_instr_data,old_instr_data).
+-define(persistent_terms,persistent_terms).
-define(port,port).
-define(proc,proc).
-define(proc_dictionary,proc_dictionary).
@@ -293,6 +295,8 @@ loaded_mod_details(Mod) ->
call({loaded_mod_details,Mod}).
memory() ->
call(memory).
+persistent_terms() ->
+ call(persistent_terms).
allocated_areas() ->
call(allocated_areas).
allocator_info() ->
@@ -471,6 +475,11 @@ handle_call(memory,_From,State=#state{file=File}) ->
Memory=memory(File),
TW = truncated_warning([?memory]),
{reply,{ok,Memory,TW},State};
+handle_call(persistent_terms,_From,State=#state{file=File,dump_vsn=DumpVsn}) ->
+ TW = truncated_warning([?persistent_terms,?literals]),
+ DecodeOpts = get_decode_opts(DumpVsn),
+ Terms = persistent_terms(File, DecodeOpts),
+ {reply,{ok,Terms,TW},State};
handle_call(allocated_areas,_From,State=#state{file=File}) ->
AllocatedAreas=allocated_areas(File),
TW = truncated_warning([?allocated_areas]),
@@ -580,9 +589,9 @@ truncated_here(Tag) ->
case get(truncated) of
true ->
case get(last_tag) of
- Tag -> % Tag == {TagType,Id}
+ {Tag,_Pos} -> % Tag == {TagType,Id}
true;
- {Tag,_Id} ->
+ {{Tag,_Id},_Pos} ->
true;
_LastTag ->
truncated_earlier(Tag)
@@ -837,8 +846,8 @@ do_read_file(File) ->
case check_dump_version(Id) of
{ok,DumpVsn} ->
reset_tables(),
- insert_index(Tag,Id,N1+1),
- put_last_tag(Tag,""),
+ insert_index(Tag,Id,Pos=N1+1),
+ put_last_tag(Tag,"",Pos),
DecodeOpts = get_decode_opts(DumpVsn),
indexify(Fd,DecodeOpts,Rest,N1),
end_progress(),
@@ -906,23 +915,11 @@ indexify(Fd,DecodeOpts,Bin,N) ->
_ ->
insert_index(Tag,Id,NewPos)
end,
- case put_last_tag(Tag,Id) of
- {?proc_heap,LastId} ->
- [{_,LastPos}] = lookup_index(?proc_heap,LastId),
+ case put_last_tag(Tag,Id,NewPos) of
+ {{?proc_heap,LastId},LastPos} ->
ets:insert(cdv_heap_file_chars,{LastId,N+Start+1-LastPos});
- {?literals,[]} ->
- case get(truncated_reason) of
- undefined ->
- [{_,LastPos}] = lookup_index(?literals,[]),
- ets:insert(cdv_heap_file_chars,
- {literals,N+Start+1-LastPos});
- _ ->
- %% Literals are truncated. Make sure we never
- %% attempt to read in the literals. (Heaps that
- %% references literals will show markers for
- %% incomplete heaps, but will otherwise work.)
- delete_index(?literals, [])
- end;
+ {{?literals,[]},LastPos} ->
+ ets:insert(cdv_heap_file_chars,{literals,N+Start+1-LastPos});
_ -> ok
end,
indexify(Fd,DecodeOpts,Rest,N1);
@@ -964,10 +961,18 @@ tag(Fd,<<>>,N,Gat,Di,Now) ->
check_if_truncated() ->
case get(last_tag) of
- {?ende,_} ->
+ {{?ende,_},_} ->
put(truncated,false),
put(truncated_proc,false);
- TruncatedTag ->
+ {{?literals,[]},_} ->
+ put(truncated,true),
+ put(truncated_proc,false),
+ %% Literals are truncated. Make sure we never
+ %% attempt to read in the literals. (Heaps that
+ %% references literals will show markers for
+ %% incomplete heaps, but will otherwise work.)
+ delete_index(?literals, []);
+ {TruncatedTag,_} ->
put(truncated,true),
find_truncated_proc(TruncatedTag)
end.
@@ -975,7 +980,6 @@ check_if_truncated() ->
find_truncated_proc({Tag,_Id}) when Tag==?atoms;
Tag==?binary;
Tag==?instr_data;
- Tag==?literals;
Tag==?memory_status;
Tag==?memory_map ->
put(truncated_proc,false);
@@ -1444,15 +1448,7 @@ maybe_other_node2(Channel) ->
expand_memory(Fd,Pid,DumpVsn) ->
DecodeOpts = get_decode_opts(DumpVsn),
put(fd,Fd),
- Dict0 = case get(?literals) of
- undefined ->
- Literals = read_literals(Fd,DecodeOpts),
- put(?literals,Literals),
- put(fd,Fd),
- Literals;
- Literals ->
- Literals
- end,
+ Dict0 = get_literals(Fd,DecodeOpts),
Dict = read_heap(Fd,Pid,DecodeOpts,Dict0),
Expanded = {read_stack_dump(Fd,Pid,DecodeOpts,Dict),
read_messages(Fd,Pid,DecodeOpts,Dict),
@@ -1468,6 +1464,18 @@ expand_memory(Fd,Pid,DumpVsn) ->
end,
{Expanded,IncompleteWarning}.
+get_literals(Fd,DecodeOpts) ->
+ case get(?literals) of
+ undefined ->
+ OldFd = put(fd,Fd),
+ Literals = read_literals(Fd,DecodeOpts),
+ put(fd,OldFd),
+ put(?literals,Literals),
+ Literals;
+ Literals ->
+ Literals
+ end.
+
read_literals(Fd,DecodeOpts) ->
case lookup_index(?literals,[]) of
[{_,Start}] ->
@@ -1594,31 +1602,92 @@ read_heap(Fd,Pid,DecodeOpts,Dict0) ->
Dict0
end.
-read_heap(DecodeOpts,Dict0) ->
- %% This function is never called if the dump is truncated in {?proc_heap,Pid}
- case get(fd) of
- end_of_heap ->
+read_heap(DecodeOpts, Dict0) ->
+ %% This function is never called if the dump is truncated in
+ %% {?proc_heap,Pid}.
+ %%
+ %% It is not always possible to reconstruct the heap terms
+ %% in a single pass, especially if maps are involved.
+ %% See crashdump_helper:literal_map/0 for an example.
+ %%
+ %% Therefore, we need two passes. In the first pass
+ %% we collect all lines without parsing them, and in the
+ %% second pass we parse them.
+ %%
+ %% The first pass follows.
+
+ Lines0 = read_heap_lines(),
+
+ %% Save a map of all unprocessed lines so that deref_ptr() can
+ %% access any line when there are references to terms not yet
+ %% built.
+
+ LineMap = maps:from_list(Lines0),
+ put(line_map, LineMap),
+
+ %% Refc binaries (tag "Yc") must be processed before any sub
+ %% binaries (tag "Ys") referencing them, so we make sure to
+ %% process all the refc binaries first.
+ %%
+ %% The other lines can be processed in any order, but processing
+ %% them in the reverse order compared to how they are printed in
+ %% the crash dump seems to minimize the number of references to
+ %% terms that have not yet been built. That happens to be the
+ %% order of the line list as returned by read_heap_lines/0.
+
+ RefcBins = [Refc || {_,<<"Yc",_/binary>>}=Refc <- Lines0],
+ Lines = RefcBins ++ Lines0,
+
+ %% Second pass.
+
+ init_progress("Processing terms", map_size(LineMap)),
+ Dict = parse_heap_terms(Lines, DecodeOpts, Dict0),
+ erase(line_map),
+ end_progress(),
+ Dict.
+
+read_heap_lines() ->
+ read_heap_lines_1(get(fd), []).
+
+read_heap_lines_1(Fd, Acc) ->
+ case bytes(Fd) of
+ "=" ++ _next_tag ->
end_progress(),
- Dict0;
- Fd ->
- case bytes(Fd) of
- "=" ++ _next_tag ->
- end_progress(),
- put(fd, end_of_heap),
- Dict0;
- Line ->
- update_progress(length(Line)+1),
- Dict = parse(Line,DecodeOpts,Dict0),
- read_heap(DecodeOpts,Dict)
- end
+ put(fd, end_of_heap),
+ Acc;
+ Line0 ->
+ update_progress(length(Line0)+1),
+ {Addr,":"++Line1} = get_hex(Line0),
+
+ %% Reduce the memory consumption by converting the
+ %% line to a binary. Measurements show that it may also
+ %% be benefical for performance, too, because it makes the
+ %% garbage collections cheaper.
+
+ Line = list_to_binary(Line1),
+ read_heap_lines_1(Fd, [{Addr,Line}|Acc])
end.
-parse(Line0, DecodeOpts, Dict0) ->
- {Addr,":"++Line1} = get_hex(Line0),
- {_Term,Line,Dict} = parse_heap_term(Line1, Addr, DecodeOpts, Dict0),
- [] = skip_blanks(Line),
+parse_heap_terms([{Addr,Line0}|T], DecodeOpts, Dict0) ->
+ case gb_trees:is_defined(Addr, Dict0) of
+ true ->
+ %% Already parsed (by a recursive call from do_deref_ptr()
+ %% to parse_line()). Nothing to do.
+ parse_heap_terms(T, DecodeOpts, Dict0);
+ false ->
+ %% Parse this previously unparsed term.
+ Dict = parse_line(Addr, Line0, DecodeOpts, Dict0),
+ parse_heap_terms(T, DecodeOpts, Dict)
+ end;
+parse_heap_terms([], _DecodeOpts, Dict) ->
Dict.
+parse_line(Addr, Line0, DecodeOpts, Dict0) ->
+ update_progress(1),
+ Line1 = binary_to_list(Line0),
+ {_Term,Line,Dict} = parse_heap_term(Line1, Addr, DecodeOpts, Dict0),
+ [] = skip_blanks(Line), %Assertion.
+ Dict.
%%-----------------------------------------------------------------
%% Page with one port
@@ -2142,6 +2211,56 @@ get_atom(Atom) when is_binary(Atom) ->
{Atom,nq}. % not quoted
%%-----------------------------------------------------------------
+%% Page with list of all persistent terms
+persistent_terms(File, DecodeOpts) ->
+ case lookup_index(?persistent_terms) of
+ [{_Id,Start}] ->
+ Fd = open(File),
+ pos_bof(Fd,Start),
+ Terms = get_persistent_terms(Fd),
+ Dict = get_literals(Fd,DecodeOpts),
+ parse_persistent_terms(Terms,DecodeOpts,Dict);
+ _ ->
+ []
+ end.
+
+parse_persistent_terms([[Name0,Val0]|Terms],DecodeOpts,Dict) ->
+ {Name,_,_} = parse_term(binary_to_list(Name0),DecodeOpts,Dict),
+ {Val,_,_} = parse_term(binary_to_list(Val0),DecodeOpts,Dict),
+ [{Name,Val}|parse_persistent_terms(Terms,DecodeOpts,Dict)];
+parse_persistent_terms([],_,_) -> [].
+
+get_persistent_terms(Fd) ->
+ case get_chunk(Fd) of
+ {ok,Bin} ->
+ get_persistent_terms(Fd,Bin,[]);
+ eof ->
+ []
+ end.
+
+
+%% Persistent_Terms are written one per line in the crash dump.
+get_persistent_terms(Fd,Bin,PersistentTerms) ->
+ Bins = binary:split(Bin,<<"\n">>,[global]),
+ get_persistent_terms1(Fd,Bins,PersistentTerms).
+
+get_persistent_terms1(_Fd,[<<"=",_/binary>>|_],PersistentTerms) ->
+ PersistentTerms;
+get_persistent_terms1(Fd,[LastBin],PersistentTerms) ->
+ case get_chunk(Fd) of
+ {ok,Bin0} ->
+ get_persistent_terms(Fd,<<LastBin/binary,Bin0/binary>>,PersistentTerms);
+ eof ->
+ [get_persistent_term(LastBin)|PersistentTerms]
+ end;
+get_persistent_terms1(Fd,[Bin|Bins],Persistent_Terms) ->
+ get_persistent_terms1(Fd,Bins,[get_persistent_term(Bin)|Persistent_Terms]).
+
+get_persistent_term(Bin) ->
+ binary:split(Bin,<<"|">>).
+
+
+%%-----------------------------------------------------------------
%% Page with memory information
memory(File) ->
case lookup_index(?memory) of
@@ -2746,12 +2865,12 @@ parse_heap_term("Yc"++Line0, Addr, DecodeOpts, D0) -> %Reference-counted binary.
SymbolicBin = {'#CDVBin',Start},
Term = cdvbin(Offset, Sz, SymbolicBin),
D1 = gb_trees:insert(Addr, Term, D0),
- D = gb_trees:insert(Binp, SymbolicBin, D1),
+ D = gb_trees:enter(Binp, SymbolicBin, D1),
{Term,Line,D};
[] ->
Term = '#CDVNonexistingBinary',
D1 = gb_trees:insert(Addr, Term, D0),
- D = gb_trees:insert(Binp, Term, D1),
+ D = gb_trees:enter(Binp, Term, D1),
{Term,Line,D}
end;
parse_heap_term("Ys"++Line0, Addr, DecodeOpts, D0) -> %Sub binary.
@@ -2763,12 +2882,17 @@ parse_heap_term("Ys"++Line0, Addr, DecodeOpts, D0) -> %Sub binary.
{Term,Line,D};
parse_heap_term("Mf"++Line0, Addr, DecodeOpts, D0) -> %Flatmap.
{Size,":"++Line1} = get_hex(Line0),
- {Keys,":"++Line2,D1} = parse_term(Line1, DecodeOpts, D0),
- {Values,Line,D2} = parse_tuple(Size, Line2, Addr,DecodeOpts, D1, []),
- Pairs = zip_tuples(tuple_size(Keys), Keys, Values, []),
- Map = maps:from_list(Pairs),
- D = gb_trees:update(Addr, Map, D2),
- {Map,Line,D};
+ case parse_term(Line1, DecodeOpts, D0) of
+ {Keys,":"++Line2,D1} when is_tuple(Keys) ->
+ {Values,Line,D2} = parse_tuple(Size, Line2, Addr,DecodeOpts, D1, []),
+ Pairs = zip_tuples(tuple_size(Keys), Keys, Values, []),
+ Map = maps:from_list(Pairs),
+ D = gb_trees:update(Addr, Map, D2),
+ {Map,Line,D};
+ {Incomplete,_Line,D1} ->
+ D = gb_trees:insert(Addr, Incomplete, D1),
+ {Incomplete,"",D}
+ end;
parse_heap_term("Mh"++Line0, Addr, DecodeOpts, D0) -> %Head node in a hashmap.
{MapSize,":"++Line1} = get_hex(Line0),
{N,":"++Line2} = get_hex(Line1),
@@ -2871,16 +2995,18 @@ parse_atom_translation_table(N, Line0, As) ->
deref_ptr(Ptr, Line, DecodeOpts, D) ->
- Lookup = fun(D0) ->
- gb_trees:lookup(Ptr, D0)
- end,
+ Lookup0 = fun(D0) ->
+ gb_trees:lookup(Ptr, D0)
+ end,
+ Lookup = wrap_line_map(Ptr, Lookup0),
do_deref_ptr(Lookup, Line, DecodeOpts, D).
deref_bin(Binp0, Offset, Sz, Line, DecodeOpts, D) ->
Binp = Binp0 bor DecodeOpts#dec_opts.bin_addr_adj,
- Lookup = fun(D0) ->
- lookup_binary(Binp, Offset, Sz, D0)
- end,
+ Lookup0 = fun(D0) ->
+ lookup_binary(Binp, Offset, Sz, D0)
+ end,
+ Lookup = wrap_line_map(Binp, Lookup0),
do_deref_ptr(Lookup, Line, DecodeOpts, D).
lookup_binary(Binp, Offset, Sz, D) ->
@@ -2899,26 +3025,36 @@ lookup_binary(Binp, Offset, Sz, D) ->
end
end.
+wrap_line_map(Ptr, Lookup) ->
+ wrap_line_map_1(get(line_map), Ptr, Lookup).
+
+wrap_line_map_1(#{}=LineMap, Ptr, Lookup) ->
+ fun(D) ->
+ case Lookup(D) of
+ {value,_}=Res ->
+ Res;
+ none ->
+ case LineMap of
+ #{Ptr:=Line} ->
+ {line,Ptr,Line};
+ #{} ->
+ none
+ end
+ end
+ end;
+wrap_line_map_1(undefined, _Ptr, Lookup) ->
+ Lookup.
+
do_deref_ptr(Lookup, Line, DecodeOpts, D0) ->
case Lookup(D0) of
{value,Term} ->
{Term,Line,D0};
none ->
- case get(fd) of
- end_of_heap ->
- put(incomplete_heap,true),
- {['#CDVIncompleteHeap'],Line,D0};
- Fd ->
- case bytes(Fd) of
- "="++_ ->
- put(fd, end_of_heap),
- do_deref_ptr(Lookup, Line, DecodeOpts, D0);
- L ->
- update_progress(length(L)+1),
- D = parse(L, DecodeOpts, D0),
- do_deref_ptr(Lookup, Line, DecodeOpts, D)
- end
- end
+ put(incomplete_heap, true),
+ {'#CDVIncompleteHeap',Line,D0};
+ {line,Addr,NewLine} ->
+ D = parse_line(Addr, NewLine, DecodeOpts, D0),
+ do_deref_ptr(Lookup, Line, DecodeOpts, D)
end.
get_hex(L) ->
@@ -3119,6 +3255,7 @@ tag_to_atom("literals") -> ?literals;
tag_to_atom("loaded_modules") -> ?loaded_modules;
tag_to_atom("memory") -> ?memory;
tag_to_atom("mod") -> ?mod;
+tag_to_atom("persistent_terms") -> ?persistent_terms;
tag_to_atom("no_distribution") -> ?no_distribution;
tag_to_atom("node") -> ?node;
tag_to_atom("not_connected") -> ?not_connected;
@@ -3138,13 +3275,13 @@ tag_to_atom(UnknownTag) ->
%%%-----------------------------------------------------------------
%%% Store last tag for use when truncated, and reason if aborted
-put_last_tag(?abort,Reason) ->
- %% Don't overwrite the real last tag, and make sure to return
- %% the previous last tag.
- put(truncated_reason,Reason),
- get(last_tag);
-put_last_tag(Tag,Id) ->
- put(last_tag,{Tag,Id}).
+put_last_tag(?abort,Reason,_Pos) ->
+ %% Don't overwrite the real last tag, and don't return it either,
+ %% since that would make the caller of this function believe that
+ %% the tag was complete.
+ put(truncated_reason,Reason);
+put_last_tag(Tag,Id,Pos) ->
+ put(last_tag,{{Tag,Id},Pos}).
%%%-----------------------------------------------------------------
%%% Fetch next chunk from crashdump file
diff --git a/lib/observer/src/observer.app.src b/lib/observer/src/observer.app.src
index d73293a5f9..d48b846ad2 100644
--- a/lib/observer/src/observer.app.src
+++ b/lib/observer/src/observer.app.src
@@ -34,6 +34,7 @@
cdv_mem_cb,
cdv_mod_cb,
cdv_multi_wx,
+ cdv_persistent_cb,
cdv_port_cb,
cdv_proc_cb,
cdv_table_wx,
diff --git a/lib/observer/src/observer_html_lib.erl b/lib/observer/src/observer_html_lib.erl
index 0c4e32af49..c67fa28c6d 100644
--- a/lib/observer/src/observer_html_lib.erl
+++ b/lib/observer/src/observer_html_lib.erl
@@ -62,7 +62,8 @@ expandable_term_body(Heading,[],_Tab) ->
"Dictionary" -> "No dictionary was found";
"ProcState" -> "Information could not be retrieved,"
" system messages may not be handled by this process.";
- "SaslLog" -> "No log entry was found"
+ "SaslLog" -> "No log entry was found";
+ "Persistent Terms" -> "No persistent terms were found"
end];
expandable_term_body(Heading,Expanded,Tab) ->
Attr = "BORDER=0 CELLPADDING=0 CELLSPACING=1 WIDTH=100%",
diff --git a/lib/observer/test/crashdump_helper.erl b/lib/observer/test/crashdump_helper.erl
index 145ff56b71..d5d3649525 100644
--- a/lib/observer/test/crashdump_helper.erl
+++ b/lib/observer/test/crashdump_helper.erl
@@ -21,7 +21,9 @@
-module(crashdump_helper).
-export([n1_proc/2,remote_proc/2,
dump_maps/0,create_maps/0,
- create_binaries/0,create_sub_binaries/1]).
+ create_binaries/0,create_sub_binaries/1,
+ dump_persistent_terms/0,
+ create_persistent_terms/0]).
-compile(r18).
-include_lib("common_test/include/ct.hrl").
@@ -62,6 +64,7 @@ n1_proc(Creator,_N2,Pid2,Port2,_L) ->
put(ref,Ref),
put(pid,Pid),
put(bin,Bin),
+ put(proc_bins,create_proc_bins()),
put(bins,create_binaries()),
put(sub_bin,SubBin),
put(sub_bins,create_sub_binaries(get(bins))),
@@ -117,6 +120,23 @@ create_sub_binary(Bin, Start, LenSub) ->
<<_:Start/bytes,Sub:Len/bytes,_/bytes>> = Bin,
Sub.
+create_proc_bins() ->
+ Parent = self(),
+ Pid =
+ spawn(
+ fun() ->
+ %% Just reverse the list here, so this binary is not
+ %% confused with the one created in n1_proc/5 above,
+ %% which is used for testing truncation (see
+ %% crashdump_viewer_SUITE:truncate_dump_binary/1)
+ Bin = list_to_binary(lists:reverse(lists:seq(1, 255))),
+ <<A:65/bytes,B:65/bytes,C/bytes>> = Bin,
+ Parent ! {self(),{A,B,C}}
+ end),
+ receive
+ {Pid,ProcBins} -> ProcBins
+ end.
+
%%%
%%% Test dumping of maps. Dumping of maps only from OTP 20.2.
%%%
@@ -142,4 +162,47 @@ create_maps() ->
Map3 = lists:foldl(fun(I, A) ->
A#{I=>I*I}
end, Map2, lists:seq(-10, 0)),
- #{a=>Map0,b=>Map1,c=>Map2,d=>Map3,e=>#{}}.
+ #{a=>Map0,b=>Map1,c=>Map2,d=>Map3,e=>#{},literal=>literal_map()}.
+
+literal_map() ->
+ %% A literal map such as the one below will produce a heap dump
+ %% like this:
+ %%
+ %% Address1:t4:H<Address3>,H<Address4>,H<Address5>,H<Address6>
+ %% Address2:Mf4:H<Adress1>:I1,I2,I3,I4
+ %% Address3: ... % "one"
+ %% Address4: ... % "two"
+ %% Address5: ... % "three"
+ %% Address6: ... % "four"
+ %%
+ %% The map cannot be reconstructed in a single sequential pass.
+ %%
+ %% To reconstruct the map, first the string keys "one"
+ %% through "four" must be reconstructed, then the tuple at
+ %% Adress1, then the map at Address2.
+
+ #{"one"=>1,"two"=>2,"three"=>3,"four"=>4}.
+
+%%%
+%%% Test dumping of persistent terms (from OTP 21.2).
+%%%
+
+dump_persistent_terms() ->
+ Parent = self(),
+ F = fun() ->
+ register(aaaaaaaa_persistent_terms, self()),
+ put(pts, create_persistent_terms()),
+ Parent ! {self(),done},
+ receive _ -> ok end
+ end,
+ Pid = spawn_link(F),
+ receive
+ {Pid,done} ->
+ {ok,Pid}
+ end.
+
+create_persistent_terms() ->
+ persistent_term:put({?MODULE,first}, {pid,42.0}),
+ persistent_term:put({?MODULE,second}, [1,2,3]),
+ persistent_term:get().
+
diff --git a/lib/observer/test/crashdump_viewer_SUITE.erl b/lib/observer/test/crashdump_viewer_SUITE.erl
index 864454cdff..8c5e618f4a 100644
--- a/lib/observer/test/crashdump_viewer_SUITE.erl
+++ b/lib/observer/test/crashdump_viewer_SUITE.erl
@@ -345,6 +345,7 @@ browse_file(File) ->
{ok,_AllocINfo,_AllocInfoTW} = crashdump_viewer:allocator_info(),
{ok,_HashTabs,_HashTabsTW} = crashdump_viewer:hash_tables(),
{ok,_IndexTabs,_IndexTabsTW} = crashdump_viewer:index_tables(),
+ {ok,_PTs,_PTsTW} = crashdump_viewer:persistent_terms(),
io:format(" info read",[]),
@@ -399,6 +400,13 @@ special(File,Procs) ->
crashdump_viewer:expand_binary({SOffset,SSize,SPos}),
io:format(" expand binary ok",[]),
+ ProcBins = proplists:get_value(proc_bins,Dict),
+ {['#CDVBin',0,65,ProcBin],
+ ['#CDVBin',65,65,ProcBin],
+ ['#CDVBin',130,125,ProcBin]} = ProcBins,
+ io:format(" ProcBins ok",[]),
+
+
Binaries = crashdump_helper:create_binaries(),
verify_binaries(Binaries, proplists:get_value(bins,Dict)),
io:format(" binaries ok",[]),
@@ -595,6 +603,23 @@ special(File,Procs) ->
Maps = proplists:get_value(maps,Dict),
io:format(" maps ok",[]),
ok;
+ ".persistent_terms" ->
+ %% I registered a process as aaaaaaaa_persistent_term in
+ %% the dump to make sure it will be the first in the list
+ %% when sorted on names.
+ [#proc{pid=Pid0,name=Name}|_Rest] = lists:keysort(#proc.name,Procs),
+ "aaaaaaaa_persistent_terms" = Name,
+ Pid = pid_to_list(Pid0),
+ {ok,ProcDetails=#proc{},[]} = crashdump_viewer:proc_details(Pid),
+ io:format(" process details ok",[]),
+
+ #proc{dict=Dict} = ProcDetails,
+ %% io:format("~p\n", [Dict]),
+ Pts1 = crashdump_helper:create_persistent_terms(),
+ Pts2 = proplists:get_value(pts,Dict),
+ true = lists:sort(Pts1) =:= lists:sort(Pts2),
+ io:format(" persistent terms ok",[]),
+ ok;
_ ->
ok
end,
@@ -679,9 +704,11 @@ do_create_dumps(DataDir,Rel) ->
CD5 = dump_with_size_limit_reached(DataDir,Rel,"trunc_bytes"),
CD6 = dump_with_unicode_atoms(DataDir,Rel,"unicode"),
CD7 = dump_with_maps(DataDir,Rel,"maps"),
+ CD8 = dump_with_persistent_terms(DataDir,Rel,"persistent_terms"),
TruncDumpMod = truncate_dump_mod(CD1),
TruncatedDumpsBinary = truncate_dump_binary(CD1),
- {[CD1,CD2,CD3,CD4,CD5,CD6,CD7,TruncDumpMod|TruncatedDumpsBinary],
+ {[CD1,CD2,CD3,CD4,CD5,CD6,CD7,CD8,
+ TruncDumpMod|TruncatedDumpsBinary],
DosDump};
_ ->
{[CD1,CD2], DosDump}
@@ -850,6 +877,16 @@ dump_with_maps(DataDir,Rel,DumpName) ->
?t:stop_node(n1),
CD.
+dump_with_persistent_terms(DataDir,Rel,DumpName) ->
+ Opt = rel_opt(Rel),
+ Pz = "-pz \"" ++ filename:dirname(code:which(?MODULE)) ++ "\"",
+ PzOpt = [{args,Pz}],
+ {ok,N1} = ?t:start_node(n1,peer,Opt ++ PzOpt),
+ {ok,_Pid} = rpc:call(N1,crashdump_helper,dump_persistent_terms,[]),
+ CD = dump(N1,DataDir,Rel,DumpName),
+ ?t:stop_node(n1),
+ CD.
+
dump(Node,DataDir,Rel,DumpName) ->
Crashdump = filename:join(DataDir, dump_prefix(Rel)++DumpName),
rpc:call(Node,os,putenv,["ERL_CRASH_DUMP",Crashdump]),
diff --git a/lib/os_mon/c_src/cpu_sup.c b/lib/os_mon/c_src/cpu_sup.c
index 17ef48c26e..c96a5c9f7c 100644
--- a/lib/os_mon/c_src/cpu_sup.c
+++ b/lib/os_mon/c_src/cpu_sup.c
@@ -152,6 +152,8 @@ static void util_measure(unsigned int **result_vec, int *result_sz);
#if defined(__sun__)
static unsigned int misc_measure(char* name);
+#elif defined(__linux__)
+static unsigned int misc_measure(char cmd);
#endif
static void sendi(unsigned int data);
static void sendv(unsigned int data[], int ints);
@@ -231,6 +233,11 @@ int main(int argc, char** argv) {
case AVG1: sendi(misc_measure("avenrun_1min")); break;
case AVG5: sendi(misc_measure("avenrun_5min")); break;
case AVG15: sendi(misc_measure("avenrun_15min")); break;
+#elif defined(__linux__)
+ case NPROCS:
+ case AVG1:
+ case AVG5:
+ case AVG15: sendi(misc_measure(cmd)); break;
#elif defined(__OpenBSD__) || (defined(__APPLE__) && defined(__MACH__)) || defined(__FreeBSD__) || defined(__DragonFly__)
case NPROCS: bsd_count_procs(); break;
case AVG1: bsd_loadavg(0); break;
@@ -238,7 +245,7 @@ int main(int argc, char** argv) {
case AVG15: bsd_loadavg(2); break;
#endif
#if defined(__sun__) || defined(__linux__) || (defined(__APPLE__) && defined(__MACH__)) || defined(__FreeBSD__)
- case UTIL: util_measure(&rv,&sz); sendv(rv, sz); break;
+ case UTIL: util_measure(&rv,&sz); sendv(rv, sz); break;
#endif
case QUIT: free((void*)rv); return 0;
default: error("Bad command"); break;
@@ -329,6 +336,22 @@ static void bsd_count_procs(void) {
#if defined(__linux__)
+static unsigned int misc_measure(char cmd) {
+ struct sysinfo info;
+
+ if (sysinfo(&info))
+ error(strerror(errno));
+
+ switch (cmd) {
+ case AVG1: return (unsigned int)(info.loads[0] / 256);
+ case AVG5: return (unsigned int)(info.loads[1] / 256);
+ case AVG15: return (unsigned int)(info.loads[2] / 256);
+ case NPROCS: return info.procs;
+ }
+
+ return -1;
+}
+
static cpu_t *read_procstat(FILE *fp, cpu_t *cpu) {
char buffer[BUFFERSIZE];
@@ -357,8 +380,24 @@ static void util_measure(unsigned int **result_vec, int *result_sz) {
FILE *fp;
unsigned int *rv = NULL;
cpu_t cpu;
-
+
+ rv = *result_vec;
+ rv[0] = no_of_cpus;
+
if ( (fp = fopen(PROCSTAT,"r")) == NULL) {
+ if (errno == EACCES) { /* SELinux */
+ rv[1] = 1; /* just the cpu id */
+ ++rv; /* first value is number of cpus */
+ ++rv; /* second value is number of entries */
+ for (i = 0; i < no_of_cpus; ++i) {
+ rv[0] = CU_CPU_ID;
+ rv[1] = i;
+ rv += 1*2;
+ }
+ *result_sz = 2 + 2*1 * no_of_cpus;
+ return;
+ }
+
/* Check if procfs is mounted,
* otherwise:
* try and try again, bad procsfs.
@@ -367,20 +406,19 @@ static void util_measure(unsigned int **result_vec, int *result_sz) {
return;
}
- /*ignore read*/
+ /*ignore read*/
if (fgets(buffer, BUFFERSIZE, fp) == NULL) {
*result_sz = 0;
return;
}
- rv = *result_vec;
- rv[0] = no_of_cpus;
+
rv[1] = CU_VALUES;
++rv; /* first value is number of cpus */
++rv; /* second value is number of entries */
for (i = 0; i < no_of_cpus; ++i) {
read_procstat(fp, &cpu);
-
+
rv[ 0] = CU_CPU_ID; rv[ 1] = cpu.id;
rv[ 2] = CU_USER; rv[ 3] = cpu.user;
rv[ 4] = CU_NICE_USER; rv[ 5] = cpu.nice_user;
diff --git a/lib/os_mon/src/cpu_sup.erl b/lib/os_mon/src/cpu_sup.erl
index 81e049ef22..ba2d89313e 100644
--- a/lib/os_mon/src/cpu_sup.erl
+++ b/lib/os_mon/src/cpu_sup.erl
@@ -220,17 +220,21 @@ code_change(_OldVsn, State, _Extra) ->
%% internal functions
%%----------------------------------------------------------------------
-get_uint32_measurement(Request, #internal{os_type = {unix, linux}}) ->
- {ok,F} = file:open("/proc/loadavg",[read,raw]),
- {ok,D} = file:read_line(F),
- ok = file:close(F),
- {ok,[Load1,Load5,Load15,_PRun,PTotal],_} = io_lib:fread("~f ~f ~f ~d/~d", D),
- case Request of
- ?avg1 -> sunify(Load1);
- ?avg5 -> sunify(Load5);
- ?avg15 -> sunify(Load15);
- ?ping -> 4711;
- ?nprocs -> PTotal
+get_uint32_measurement(Request, #internal{port = P, os_type = {unix, linux}}) ->
+ case file:open("/proc/loadavg",[read,raw]) of
+ {ok,F} ->
+ {ok,D} = file:read_line(F),
+ ok = file:close(F),
+ {ok,[Load1,Load5,Load15,_PRun,PTotal],_} = io_lib:fread("~f ~f ~f ~d/~d", D),
+ case Request of
+ ?avg1 -> sunify(Load1);
+ ?avg5 -> sunify(Load5);
+ ?avg15 -> sunify(Load15);
+ ?ping -> 4711;
+ ?nprocs -> PTotal
+ end;
+ {error,_} ->
+ port_server_call(P, Request)
end;
get_uint32_measurement(Request, #internal{port = P, os_type = {unix, Sys}}) when
Sys == sunos;
diff --git a/lib/public_key/asn1/OTP-PKIX.asn1 b/lib/public_key/asn1/OTP-PKIX.asn1
index 37196bb9bf..9bcd99fba3 100644
--- a/lib/public_key/asn1/OTP-PKIX.asn1
+++ b/lib/public_key/asn1/OTP-PKIX.asn1
@@ -326,8 +326,13 @@ PublicKeyAlgorithm ::= SEQUENCE {
OPTIONAL }
SupportedSignatureAlgorithms SIGNATURE-ALGORITHM-CLASS ::= {
- dsa-with-sha1 | dsaWithSHA1 | md2-with-rsa-encryption |
- md5-with-rsa-encryption | sha1-with-rsa-encryption | sha-1with-rsa-encryption |
+ dsa-with-sha1 | dsaWithSHA1 |
+ dsa-with-sha224 |
+ dsa-with-sha256 |
+ md2-with-rsa-encryption |
+ md5-with-rsa-encryption |
+ sha1-with-rsa-encryption |
+ sha-1with-rsa-encryption |
sha224-with-rsa-encryption |
sha256-with-rsa-encryption |
sha384-with-rsa-encryption |
@@ -368,6 +373,21 @@ SupportedPublicKeyAlgorithms PUBLIC-KEY-ALGORITHM-CLASS ::= {
ID id-dsaWithSHA1
TYPE DSAParams }
+ dsa-with-sha224 SIGNATURE-ALGORITHM-CLASS ::= {
+ ID id-dsa-with-sha224
+ TYPE DSAParams }
+
+ dsa-with-sha256 SIGNATURE-ALGORITHM-CLASS ::= {
+ ID id-dsa-with-sha256
+ TYPE DSAParams }
+
+ id-dsa-with-sha224 OBJECT IDENTIFIER ::= {
+ joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101)
+ csor(3) algorithms(4) id-dsa-with-sha2(3) 1 }
+
+ id-dsa-with-sha256 OBJECT IDENTIFIER ::= {
+ joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101)
+ csor(3) algorithms(4) id-dsa-with-sha2(3) 2 }
--
-- RSA Keys and Signatures
--
diff --git a/lib/public_key/doc/src/notes.xml b/lib/public_key/doc/src/notes.xml
index 62b4b4ca1b..7ed60ed3ca 100644
--- a/lib/public_key/doc/src/notes.xml
+++ b/lib/public_key/doc/src/notes.xml
@@ -35,6 +35,22 @@
<file>notes.xml</file>
</header>
+<section><title>Public_Key 1.6.3</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Add DSA SHA2 oids in public_keys ASN1-spec and
+ public_key:pkix_sign_types/1</p>
+ <p>
+ Own Id: OTP-15367</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Public_Key 1.6.2</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/lib/public_key/doc/src/public_key.xml b/lib/public_key/doc/src/public_key.xml
index a4d7e4a734..ee3877ddd0 100644
--- a/lib/public_key/doc/src/public_key.xml
+++ b/lib/public_key/doc/src/public_key.xml
@@ -109,6 +109,13 @@
</datatype>
<datatype>
+ <name name="ed_public_key"/>
+ <desc>
+ <warning><p>This format of the EdDSA curves is temporary and may change without prior notice!</p></warning>
+ </desc>
+ </datatype>
+
+ <datatype>
<name name="private_key"/>
<name name="rsa_private_key"/>
<name name="dsa_private_key"/>
@@ -118,6 +125,14 @@
</datatype>
<datatype>
+ <name name="ed_private_key"/>
+ <desc>
+ <warning><p>This format of the EdDSA curves is temporary and may change without prior notice!</p></warning>
+ </desc>
+ </datatype>
+
+
+ <datatype>
<name name="key_params"/>
<desc>
</desc>
diff --git a/lib/public_key/src/pubkey_pem.erl b/lib/public_key/src/pubkey_pem.erl
index b92790554f..d7e5bc3ad8 100644
--- a/lib/public_key/src/pubkey_pem.erl
+++ b/lib/public_key/src/pubkey_pem.erl
@@ -222,7 +222,9 @@ pem_start('CertificateList') ->
pem_start('EcpkParameters') ->
<<"-----BEGIN EC PARAMETERS-----">>;
pem_start('ECPrivateKey') ->
- <<"-----BEGIN EC PRIVATE KEY-----">>.
+ <<"-----BEGIN EC PRIVATE KEY-----">>;
+pem_start({no_asn1, new_openssh}) -> %% Temporarily in the prototype of this format
+ <<"-----BEGIN OPENSSH PRIVATE KEY-----">>.
pem_end(<<"-----BEGIN CERTIFICATE-----">>) ->
<<"-----END CERTIFICATE-----">>;
@@ -250,6 +252,8 @@ pem_end(<<"-----BEGIN EC PARAMETERS-----">>) ->
<<"-----END EC PARAMETERS-----">>;
pem_end(<<"-----BEGIN EC PRIVATE KEY-----">>) ->
<<"-----END EC PRIVATE KEY-----">>;
+pem_end(<<"-----BEGIN OPENSSH PRIVATE KEY-----">>) ->
+ <<"-----END OPENSSH PRIVATE KEY-----">>;
pem_end(_) ->
undefined.
@@ -278,7 +282,10 @@ asn1_type(<<"-----BEGIN X509 CRL-----">>) ->
asn1_type(<<"-----BEGIN EC PARAMETERS-----">>) ->
'EcpkParameters';
asn1_type(<<"-----BEGIN EC PRIVATE KEY-----">>) ->
- 'ECPrivateKey'.
+ 'ECPrivateKey';
+asn1_type(<<"-----BEGIN OPENSSH PRIVATE KEY-----">>) ->
+ {no_asn1, new_openssh}. %% Temporarily in the prototype of this format
+
pem_decrypt() ->
<<"Proc-Type: 4,ENCRYPTED">>.
diff --git a/lib/public_key/src/pubkey_ssh.erl b/lib/public_key/src/pubkey_ssh.erl
index 02c061efc9..d0ef4abfb1 100644
--- a/lib/public_key/src/pubkey_ssh.erl
+++ b/lib/public_key/src/pubkey_ssh.erl
@@ -25,7 +25,8 @@
-export([decode/2, encode/2,
dh_gex_group/4,
- dh_gex_group_sizes/0
+ dh_gex_group_sizes/0,
+pad/2, new_openssh_encode/1, new_openssh_decode/1 % For test and experiments
]).
-define(UINT32(X), X:32/unsigned-big-integer).
@@ -67,6 +68,8 @@ decode(Bin, rfc4716_public_key) ->
rfc4716_decode(Bin);
decode(Bin, ssh2_pubkey) ->
ssh2_pubkey_decode(Bin);
+decode(Bin, new_openssh) ->
+ new_openssh_decode(Bin);
decode(Bin, Type) ->
openssh_decode(Bin, Type).
@@ -177,6 +180,70 @@ join_entry([Line | Lines], Entry) ->
rfc4716_pubkey_decode(BinKey) -> ssh2_pubkey_decode(BinKey).
+%% From https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key
+new_openssh_decode(<<"openssh-key-v1",0,
+ ?DEC_BIN(CipherName, _L1),
+ ?DEC_BIN(KdfName, _L2),
+ ?DEC_BIN(KdfOptions, _L3),
+ ?UINT32(N), % number of keys
+ ?DEC_BIN(PublicKey, _L4),
+ ?DEC_BIN(Encrypted, _L5),
+ _Rest/binary
+ >>) ->
+ %%io:format("CipherName = ~p~nKdfName = ~p~nKdfOptions = ~p~nPublicKey = ~p~nN = ~p~nEncrypted = ~p~nRest = ~p~n", [CipherName, KdfName, KdfOptions, PublicKey, N, Encrypted, _Rest]),
+ new_openssh_decode(CipherName, KdfName, KdfOptions, PublicKey, N, Encrypted).
+
+new_openssh_decode(<<"none">>, <<"none">>, <<"">>, _PublicKey, 1,
+ <<?UINT32(CheckInt),
+ ?UINT32(CheckInt),
+ ?DEC_BIN(Type, _Lt),
+ ?DEC_BIN(PubKey, _Lpu),
+ ?DEC_BIN(PrivPubKey, _Lpripub),
+ ?DEC_BIN(_Comment, _C1),
+ _Pad/binary>>) ->
+ case {Type,PrivPubKey} of
+ {<<"ssh-ed25519">>,
+ <<PrivKey:32/binary, PubKey:32/binary>>} ->
+ {ed_pri, ed25519, PubKey, PrivKey};
+
+ {<<"ssh-ed448">>,
+ <<PrivKey:57/binary, PubKey/binary>>} -> % "Intelligent" guess from
+ % https://tools.ietf.org/html/draft-ietf-curdle-ssh-ed25519-ed448
+ {ed_pri, ed448, PubKey, PrivKey}
+ end.
+
+
+new_openssh_encode({ed_pri,_,PubKey,PrivKey}=Key) ->
+ Type = key_type(Key),
+ CheckInt = 17*256+17, %crypto:strong_rand_bytes(4),
+ Comment = <<>>,
+ PublicKey = <<?STRING(Type),?STRING(PubKey)>>,
+ CipherName = <<"none">>,
+ KdfName = <<"none">>,
+ KdfOptions = <<>>,
+ BlockSize = 8, % Crypto dependent
+ NumKeys = 1,
+ Encrypted0 = <<?UINT32(CheckInt),
+ ?UINT32(CheckInt),
+ ?STRING(Type),
+ ?STRING(PubKey),
+ ?STRING(<<PrivKey/binary,PubKey/binary>>),
+ ?STRING(Comment)
+ >>,
+ Pad = pad(size(Encrypted0), BlockSize),
+ Encrypted = <<Encrypted0/binary, Pad/binary>>,
+ <<"openssh-key-v1",0,
+ ?STRING(CipherName),
+ ?STRING(KdfName),
+ ?STRING(KdfOptions),
+ ?UINT32(NumKeys),
+ ?STRING(PublicKey),
+ ?STRING(Encrypted)>>.
+
+pad(N, BlockSize) when N>BlockSize -> pad(N rem BlockSize, BlockSize);
+pad(N, BlockSize) -> list_to_binary(lists:seq(1,BlockSize-N)).
+
+
openssh_decode(Bin, FileType) ->
Lines = binary:split(Bin, <<"\n">>, [global]),
do_openssh_decode(FileType, Lines, []).
@@ -235,6 +302,8 @@ do_openssh_decode(openssh_public_key = FileType, [Line | Lines], Acc) ->
<<"ssh-rsa">> -> true;
<<"ssh-dss">> -> true;
<<"ecdsa-sha2-",Curve/binary>> -> is_ssh_curvename(Curve);
+ <<"ssh-ed25519">> -> true;
+ <<"ssh-ed448">> -> true;
_ -> false
end,
@@ -247,7 +316,9 @@ do_openssh_decode(openssh_public_key = FileType, [Line | Lines], Acc) ->
Comment = string:strip(string_decode(iolist_to_binary(Comment0)), right, $\n),
do_openssh_decode(FileType, Lines,
[{openssh_pubkey_decode(KeyType, Base64Enc),
- [{comment, Comment}]} | Acc])
+ [{comment, Comment}]} | Acc]);
+ _ when KnownKeyType==false ->
+ do_openssh_decode(FileType, Lines, Acc)
end.
@@ -386,6 +457,10 @@ line_end(Comment) ->
key_type(#'RSAPublicKey'{}) -> <<"ssh-rsa">>;
key_type({_, #'Dss-Parms'{}}) -> <<"ssh-dss">>;
+key_type({ed_pub,ed25519,_}) -> <<"ssh-ed25519">>;
+key_type({ed_pub,ed448,_}) -> <<"ssh-ed448">>;
+key_type({ed_pri,ed25519,_,_}) -> <<"ssh-ed25519">>;
+key_type({ed_pri,ed448,_,_}) -> <<"ssh-ed448">>;
key_type({#'ECPoint'{}, {namedCurve,Curve}}) -> <<"ecdsa-sha2-", (public_key:oid2ssh_curvename(Curve))/binary>>.
comma_list_encode([Option], []) ->
@@ -404,7 +479,12 @@ ssh2_pubkey_encode({Y, #'Dss-Parms'{p = P, q = Q, g = G}}) ->
<<?STRING(<<"ssh-dss">>), ?Empint(P), ?Empint(Q), ?Empint(G), ?Empint(Y)>>;
ssh2_pubkey_encode(Key={#'ECPoint'{point = Q}, {namedCurve,OID}}) ->
Curve = public_key:oid2ssh_curvename(OID),
- <<?STRING(key_type(Key)), ?Estring(Curve), ?Estring(Q)>>.
+ <<?STRING(key_type(Key)), ?Estring(Curve), ?Estring(Q)>>;
+ssh2_pubkey_encode({ed_pub, ed25519, Key}) ->
+ <<?STRING(<<"ssh-ed25519">>), ?Estring(Key)>>;
+ssh2_pubkey_encode({ed_pub, ed448, Key}) ->
+ <<?STRING(<<"ssh-ed448">>), ?Estring(Key)>>.
+
ssh2_pubkey_decode(<<?DEC_BIN(Type,_TL), Bin/binary>>) ->
@@ -430,12 +510,23 @@ ssh2_pubkey_decode(<<"ssh-dss">>,
ssh2_pubkey_decode(<<"ecdsa-sha2-",Id/binary>>,
<<?DEC_BIN(Id, _IL),
?DEC_BIN(Q, _QL)>>) ->
- {#'ECPoint'{point = Q}, {namedCurve,public_key:ssh_curvename2oid(Id)}}.
+ {#'ECPoint'{point = Q}, {namedCurve,public_key:ssh_curvename2oid(Id)}};
+
+ssh2_pubkey_decode(<<"ssh-ed25519">>,
+ <<?DEC_BIN(Key, _L)>>) ->
+ {ed_pub, ed25519, Key};
+
+ssh2_pubkey_decode(<<"ssh-ed448">>,
+ <<?DEC_BIN(Key, _L)>>) ->
+ {ed_pub, ed448, Key}.
+
is_key_field(<<"ssh-dss">>) -> true;
is_key_field(<<"ssh-rsa">>) -> true;
+is_key_field(<<"ssh-ed25519">>) -> true;
+is_key_field(<<"ssh-ed448">>) -> true;
is_key_field(<<"ecdsa-sha2-",Id/binary>>) -> is_ssh_curvename(Id);
is_key_field(_) -> false.
diff --git a/lib/public_key/src/public_key.erl b/lib/public_key/src/public_key.erl
index 3f609ce6c6..75d40d2e8a 100644
--- a/lib/public_key/src/public_key.erl
+++ b/lib/public_key/src/public_key.erl
@@ -68,8 +68,8 @@
pki_asn1_type/0, asn1_type/0, ssh_file/0, der_encoded/0,
key_params/0, digest_type/0]).
--type public_key() :: rsa_public_key() | dsa_public_key() | ec_public_key().
--type private_key() :: rsa_private_key() | dsa_private_key() | ec_private_key().
+-type public_key() :: rsa_public_key() | dsa_public_key() | ec_public_key() | ed_public_key() .
+-type private_key() :: rsa_private_key() | dsa_private_key() | ec_private_key() | ed_private_key() .
-type rsa_public_key() :: #'RSAPublicKey'{}.
-type rsa_private_key() :: #'RSAPrivateKey'{}.
@@ -79,6 +79,9 @@
-type ecpk_parameters_api() :: ecpk_parameters() | #'ECParameters'{} | {namedCurve, Name::crypto:ec_named_curve()}.
-type ec_public_key() :: {#'ECPoint'{}, ecpk_parameters_api()}.
-type ec_private_key() :: #'ECPrivateKey'{}.
+-type ed_public_key() :: {ed_pub, ed25519|ed448, Key::binary()}.
+-type ed_private_key() :: {ed_pri, ed25519|ed448, Pub::binary(), Priv::binary()}.
+
-type key_params() :: #'DHParameter'{} | {namedCurve, oid()} | #'ECParameters'{} |
{rsa, Size::integer(), PubExp::integer()}.
-type der_encoded() :: binary().
@@ -166,6 +169,8 @@ pem_entry_decode({'SubjectPublicKeyInfo', Der, _}) ->
ECCParams = der_decode('EcpkParameters', Params),
{#'ECPoint'{point = Key0}, ECCParams}
end;
+pem_entry_decode({{no_asn1,new_openssh}, Special, not_encrypted}) ->
+ ssh_decode(Special, new_openssh);
pem_entry_decode({Asn1Type, Der, not_encrypted}) when is_atom(Asn1Type),
is_binary(Der) ->
der_decode(Asn1Type, Der).
@@ -609,6 +614,10 @@ pkix_sign_types(?'id-dsa-with-sha1') ->
{sha, dsa};
pkix_sign_types(?'id-dsaWithSHA1') ->
{sha, dsa};
+pkix_sign_types(?'id-dsa-with-sha224') ->
+ {sha224, dsa};
+pkix_sign_types(?'id-dsa-with-sha256') ->
+ {sha256, dsa};
pkix_sign_types(?'ecdsa-with-SHA1') ->
{sha, ecdsa};
pkix_sign_types(?'ecdsa-with-SHA256') ->
@@ -1066,8 +1075,9 @@ pkix_verify_hostname_match_fun(https) ->
-spec ssh_decode(SshBin, Type) ->
Decoded
when SshBin :: binary(),
- Type :: ssh2_pubkey | OtherType,
+ Type :: ssh2_pubkey | OtherType | InternalType,
OtherType :: public_key | ssh_file(),
+ InternalType :: new_openssh,
Decoded :: Decoded_ssh2_pubkey
| Decoded_OtherType,
Decoded_ssh2_pubkey :: public_key(),
@@ -1086,7 +1096,8 @@ ssh_decode(SshBin, Type) when is_binary(SshBin),
Type == openssh_public_key;
Type == auth_keys;
Type == known_hosts;
- Type == ssh2_pubkey ->
+ Type == ssh2_pubkey;
+ Type == new_openssh ->
pubkey_ssh:decode(SshBin, Type).
%%--------------------------------------------------------------------
@@ -1229,6 +1240,8 @@ format_sign_key(#'DSAPrivateKey'{p = P, q = Q, g = G, x = X}) ->
{dss, [P, Q, G, X]};
format_sign_key(#'ECPrivateKey'{privateKey = PrivKey, parameters = Param}) ->
{ecdsa, [PrivKey, ec_curve_spec(Param)]};
+format_sign_key({ed_pri, Curve, _Pub, Priv}) ->
+ {eddsa, [Priv,Curve]};
format_sign_key(_) ->
badarg.
@@ -1238,6 +1251,8 @@ format_verify_key({#'ECPoint'{point = Point}, Param}) ->
{ecdsa, [Point, ec_curve_spec(Param)]};
format_verify_key({Key, #'Dss-Parms'{p = P, q = Q, g = G}}) ->
{dss, [P, Q, G, Key]};
+format_verify_key({ed_pub, Curve, Key}) ->
+ {eddsa, [Key,Curve]};
%% Convert private keys to public keys
format_verify_key(#'RSAPrivateKey'{modulus = Mod, publicExponent = Exp}) ->
format_verify_key(#'RSAPublicKey'{modulus = Mod, publicExponent = Exp});
diff --git a/lib/public_key/test/public_key_SUITE.erl b/lib/public_key/test/public_key_SUITE.erl
index 1955e9e119..878489eb0f 100644
--- a/lib/public_key/test/public_key_SUITE.erl
+++ b/lib/public_key/test/public_key_SUITE.erl
@@ -44,7 +44,9 @@ all() ->
encrypt_decrypt,
{group, sign_verify},
pkix, pkix_countryname, pkix_emailaddress, pkix_path_validation,
- pkix_iso_rsa_oid, pkix_iso_dsa_oid, pkix_crl, general_name,
+ pkix_iso_rsa_oid, pkix_iso_dsa_oid,
+ pkix_dsa_sha2_oid,
+ pkix_crl, general_name,
pkix_verify_hostname_cn,
pkix_verify_hostname_subjAltName,
pkix_verify_hostname_subjAltName_IP,
@@ -1114,6 +1116,13 @@ pkix_iso_dsa_oid(Config) when is_list(Config) ->
{_, dsa} = public_key:pkix_sign_types(SigAlg#'SignatureAlgorithm'.algorithm).
%%--------------------------------------------------------------------
+pkix_dsa_sha2_oid() ->
+ [{doc, "Test support dsa_sha2 oid"}].
+pkix_dsa_sha2_oid(Config) when is_list(Config) ->
+ {sha224, dsa} = public_key:pkix_sign_types(?'id-dsa-with-sha224'),
+ {sha256, dsa} = public_key:pkix_sign_types(?'id-dsa-with-sha256').
+
+%%--------------------------------------------------------------------
pkix_crl() ->
[{doc, "test pkix_crl_* functions"}].
diff --git a/lib/public_key/vsn.mk b/lib/public_key/vsn.mk
index 4e52028c36..96eaf4f962 100644
--- a/lib/public_key/vsn.mk
+++ b/lib/public_key/vsn.mk
@@ -1 +1 @@
-PUBLIC_KEY_VSN = 1.6.2
+PUBLIC_KEY_VSN = 1.6.3
diff --git a/lib/reltool/src/reltool_utils.erl b/lib/reltool/src/reltool_utils.erl
index 060a0912f9..2afa386cb3 100644
--- a/lib/reltool/src/reltool_utils.erl
+++ b/lib/reltool/src/reltool_utils.erl
@@ -47,6 +47,9 @@
call/2, cast/2, reply/3]).
+%% For testing
+-export([erl_libs/2]).
+
-include_lib("kernel/include/file.hrl").
-include_lib("wx/include/wx.hrl").
-include("reltool.hrl").
@@ -55,7 +58,15 @@ root_dir() ->
code:root_dir().
erl_libs() ->
- string:lexemes(os:getenv("ERL_LIBS", ""), ":;").
+ erl_libs(os:getenv("ERL_LIBS", ""), os:type()).
+
+erl_libs(ErlLibs, OsType) when is_list(ErlLibs) ->
+ Sep =
+ case OsType of
+ {win32, _} -> ";";
+ _ -> ":"
+ end,
+ string:lexemes(ErlLibs, Sep).
lib_dirs(Dir) ->
case erl_prim_loader:list_dir(Dir) of
diff --git a/lib/reltool/test/reltool_server_SUITE.erl b/lib/reltool/test/reltool_server_SUITE.erl
index 4e1937d479..9ee61b595d 100644
--- a/lib/reltool/test/reltool_server_SUITE.erl
+++ b/lib/reltool/test/reltool_server_SUITE.erl
@@ -142,7 +142,8 @@ all() ->
use_selected_vsn,
use_selected_vsn_relative_path,
non_standard_vsn_id,
- undefined_regexp].
+ undefined_regexp,
+ windows_erl_libs].
groups() ->
[].
@@ -2546,10 +2547,21 @@ undefined_regexp(_Config) ->
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Checks that reltool_utils can correctly read Windows ERL_LIBS
+
+windows_erl_libs(_Config) ->
+ WinErlLibs =
+ "C:\\Program Files\\Erlang Libs;C:\\Program Files\\More Erlang Libs",
+ Ret = reltool_utils:erl_libs(WinErlLibs, {win32, nt}),
+ ?m(["C:\\Program Files\\Erlang Libs","C:\\Program Files\\More Erlang Libs"],
+ Ret),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Library functions
erl_libs() ->
- string:lexemes(os:getenv("ERL_LIBS", ""), ":;").
+ reltool_utils:erl_libs().
datadir(Config) ->
%% Removes the trailing slash...
diff --git a/lib/sasl/src/sasl.app.src b/lib/sasl/src/sasl.app.src
index 688aff16f1..5d45af0b50 100644
--- a/lib/sasl/src/sasl.app.src
+++ b/lib/sasl/src/sasl.app.src
@@ -43,5 +43,5 @@
{env, []},
{mod, {sasl, []}},
{runtime_dependencies, ["tools-2.6.14","stdlib-3.4","kernel-5.3",
- "erts-9.0"]}]}.
+ "erts-@OTP-13468@"]}]}.
diff --git a/lib/sasl/src/sasl.appup.src b/lib/sasl/src/sasl.appup.src
index 83ee328af2..d37c5b3d95 100644
--- a/lib/sasl/src/sasl.appup.src
+++ b/lib/sasl/src/sasl.appup.src
@@ -19,10 +19,10 @@
{"%VSN%",
%% Up from - max one major revision back
[{<<"3\\.0\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.0
- {<<"3\\.1(\\.[0-2]+)*">>,[restart_new_emulator]}, % OTP-20.1+
- {<<"3\\.1(\\.[3-9]+)*">>,[restart_new_emulator]}], % OTP-21
+ {<<"3\\.1(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.1+
+ {<<"3\\.2(\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-21.*
%% Down to - max one major revision back
[{<<"3\\.0\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.*
- {<<"3\\.1(\\.[0-2]+)*">>,[restart_new_emulator]}, % OTP-20.1+
- {<<"3\\.1(\\.[3-9]+)*">>,[restart_new_emulator]}] % OTP-21
+ {<<"3\\.1(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.1+
+ {<<"3\\.2(\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-21.*
}.
diff --git a/lib/sasl/src/systools_make.erl b/lib/sasl/src/systools_make.erl
index 6916107623..f085246924 100644
--- a/lib/sasl/src/systools_make.erl
+++ b/lib/sasl/src/systools_make.erl
@@ -1562,10 +1562,10 @@ mandatory_modules() ->
preloaded() ->
%% Sorted
- [erl_prim_loader,erl_tracer,erlang,
+ [atomics, counters, erl_prim_loader,erl_tracer,erlang,
erts_code_purger,erts_dirty_process_signal_handler,
erts_internal,erts_literal_area_collector,
- init,otp_ring0,prim_buffer,prim_eval,prim_file,
+ init,otp_ring0,persistent_term,prim_buffer,prim_eval,prim_file,
prim_inet,prim_zip,zlib].
%%______________________________________________________________________
diff --git a/lib/ssh/doc/src/Makefile b/lib/ssh/doc/src/Makefile
index 77fa356092..4e32dd9976 100644
--- a/lib/ssh/doc/src/Makefile
+++ b/lib/ssh/doc/src/Makefile
@@ -45,6 +45,7 @@ XML_REF3_FILES = \
ssh_connection.xml \
ssh_server_channel.xml \
ssh_server_key_api.xml \
+ ssh_file.xml \
ssh_sftp.xml \
ssh_sftpd.xml \
@@ -56,8 +57,8 @@ XML_CHAPTER_FILES = \
notes.xml \
introduction.xml \
using_ssh.xml \
+ terminology.xml \
configure_algos.xml
-# ssh_protocol.xml \
BOOK_FILES = book.xml
diff --git a/lib/ssh/doc/src/notes.xml b/lib/ssh/doc/src/notes.xml
index 7e77c6a457..42bdf667f8 100644
--- a/lib/ssh/doc/src/notes.xml
+++ b/lib/ssh/doc/src/notes.xml
@@ -230,6 +230,22 @@
</section>
</section>
+<section><title>Ssh 4.6.9.2</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Incompatibility with newer OpenSSH fixed. Previously
+ versions 7.8 and later could cause Erlang SSH to exit.</p>
+ <p>
+ Own Id: OTP-15413</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Ssh 4.6.9.1</title>
<section><title>Fixed Bugs and Malfunctions</title>
<list>
@@ -3869,4 +3885,3 @@
</section>
</chapter>
-
diff --git a/lib/ssh/doc/src/ref_man.xml b/lib/ssh/doc/src/ref_man.xml
index df37b0244f..60572b985b 100644
--- a/lib/ssh/doc/src/ref_man.xml
+++ b/lib/ssh/doc/src/ref_man.xml
@@ -40,6 +40,7 @@
<xi:include href="ssh_connection.xml"/>
<xi:include href="ssh_client_key_api.xml"/>
<xi:include href="ssh_server_key_api.xml"/>
+ <xi:include href="ssh_file.xml"/>
<xi:include href="ssh_sftp.xml"/>
<xi:include href="ssh_sftpd.xml"/>
</application>
diff --git a/lib/ssh/doc/src/specs.xml b/lib/ssh/doc/src/specs.xml
index acdbe2ddfd..a6517f3660 100644
--- a/lib/ssh/doc/src/specs.xml
+++ b/lib/ssh/doc/src/specs.xml
@@ -6,6 +6,7 @@
<xi:include href="../specs/specs_ssh_connection.xml"/>
<xi:include href="../specs/specs_ssh_server_channel.xml"/>
<xi:include href="../specs/specs_ssh_server_key_api.xml"/>
+ <xi:include href="../specs/specs_ssh_file.xml"/>
<xi:include href="../specs/specs_ssh_sftp.xml"/>
<xi:include href="../specs/specs_ssh_sftpd.xml"/>
</specs>
diff --git a/lib/ssh/doc/src/ssh.xml b/lib/ssh/doc/src/ssh.xml
index f238bf2ca8..8435fced11 100644
--- a/lib/ssh/doc/src/ssh.xml
+++ b/lib/ssh/doc/src/ssh.xml
@@ -99,8 +99,8 @@
</p>
<p>The paths could easily be changed by options:
- <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso> and
- <seealso marker="#type-system_dir_daemon_option"><c>system_dir</c></seealso>.
+ <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso> and
+ <seealso marker="ssh_file#type-system_dir_daemon_option"><c>system_dir</c></seealso>.
</p>
<p>A completly different storage could be interfaced by writing call-back modules
using the behaviours
@@ -123,12 +123,12 @@
<item><c>ssh_host_ecdsa_key</c> and <c>ssh_host_ecdsa_key.pub</c></item>
</list>
<p>The host keys directory could be changed with the option
- <seealso marker="#type-system_dir_daemon_option"><c>system_dir</c></seealso>.</p>
+ <seealso marker="ssh_file#type-system_dir_daemon_option"><c>system_dir</c></seealso>.</p>
</item>
<item>Optional: one or more <i>User's public key</i> in case of <c>publickey</c> authorization.
Default is to store them concatenated in the file <c>.ssh/authorized_keys</c> in the user's home directory.
<p>The user keys directory could be changed with the option
- <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso>.</p>
+ <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso>.</p>
</item>
</list>
</section>
@@ -138,7 +138,7 @@
<p>The keys and some other data are by default stored in files in the directory <c>.ssh</c>
in the user's home directory.</p>
<p>The directory could be changed with the option
- <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso>.
+ <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso>.
</p>
<list>
<item>Optional: a list of <i>Host public key(s)</i> for previously connected hosts. This list
@@ -183,31 +183,6 @@
</datatype>
<datatype>
- <name name="pref_public_key_algs_client_option"/>
- <desc>
- <p>List of user (client) public key algorithms to try to use.</p>
- <p>The default value is the <c>public_key</c> entry in the list returned by
- <seealso marker="#default_algorithms/0">ssh:default_algorithms/0</seealso>.
- </p>
- <p>If there is no public key of a specified type available, the corresponding entry is ignored.
- Note that the available set is dependent on the underlying cryptolib and current user's public keys.
- </p>
- <p>See also the option <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso>
- for specifying the path to the user's keys.
- </p>
- </desc>
- </datatype>
-
- <datatype>
- <name name="pubkey_passphrase_client_options"/>
- <desc>
- <p>If the user's DSA, RSA or ECDSA key is protected by a passphrase, it can be
- supplied with thoose options.
- </p>
- </desc>
- </datatype>
-
- <datatype>
<name name="host_accepting_client_options"/>
<name name="accept_hosts"/>
<name name="fp_digest_alg"/>
@@ -220,7 +195,7 @@
<p>This option guides the <c>connect</c> function on how to act when the connected server presents a Host
Key that the client has not seen before. The default is to ask the user with a question on stdio of whether to
accept or reject the new Host Key.
- See the option <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso>
+ See the option <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso>
for specifying the path to the file <c>known_hosts</c> where previously accepted Host Keys are recorded.
See also the option
<seealso marker="#type-key_cb_common_option">key_cb</seealso>
@@ -276,7 +251,7 @@
accept question the next time the same host is connected. If the option
<seealso marker="#type-key_cb_common_option"><c>key_cb</c></seealso>
is not present, the key is saved in the file "known_hosts". See option
- <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso> for
+ <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso> for
the location of that file.
</p>
<p>If <c>false</c>, the key is not saved and the key will still be unknown
@@ -406,9 +381,20 @@
<datatype>
<name name="exec_daemon_option"/>
+ <name name="exec_spec"/>
+ <desc/>
+ </datatype>
+ <datatype>
+ <name name="exec_fun"/>
+ <desc/>
+ </datatype>
+ <datatype>
<name name="'exec_fun/1'"/>
<name name="'exec_fun/2'"/>
<name name="'exec_fun/3'"/>
+ <desc/>
+ </datatype>
+ <datatype>
<name name="exec_result"/>
<desc>
<p>This option changes how the daemon execute exec-requests from clients. The term in the return value
@@ -478,18 +464,6 @@
<name name="pwdfun_4"/>
<desc>
<taglist>
- <tag><marker id="type-system_dir_daemon_option"/><c>system_dir</c></tag>
- <item>
- <p>Sets the system directory, containing the host key files
- that identify the host keys for <c>ssh</c>. Defaults to
- <c>/etc/ssh</c>.</p>
- <p>For security reasons, this directory is normally accessible only to the root user.</p>
- <p>See also the option
- <seealso marker="#type-key_cb_common_option">key_cb</seealso>
- for the general way to handle keys.
- </p>
- </item>
-
<tag><c>auth_method_kb_interactive_data</c></tag>
<item>
<p>Sets the text strings that the daemon sends to the client for presentation to the user when
@@ -502,7 +476,7 @@
</p>
</item>
- <tag><c>user_passwords</c></tag>
+ <tag><marker id="option-user_passwords"/><c>user_passwords</c></tag>
<item>
<p>Provides passwords for password authentication. The passwords are used when someone tries
to connect to the server and public key user-authentication fails. The option provides
@@ -510,7 +484,7 @@
</p>
</item>
- <tag><c>password</c></tag>
+ <tag><marker id="option-password"/><c>password</c></tag>
<item>
<p>Provides a global password that authenticates any user.</p>
<warning>
@@ -519,7 +493,9 @@
</warning>
</item>
- <tag><c>pwdfun</c> with <c>pwdfun_4()</c></tag>
+ <tag><marker id="option-pwdfun"/><c>pwdfun</c> with
+ <seealso marker="#type-pwdfun_4"><c>pwdfun_4()</c></seealso>
+ </tag>
<item>
<p>Provides a function for password validation. This could used for calling an external system or handeling
passwords stored as hash values.
@@ -546,7 +522,9 @@
can be used for this. The return value <c>disconnect</c> is useful for this.</p>
</item>
- <tag><c>pwdfun</c> with <c>pwdfun_2()</c></tag>
+ <tag><c>pwdfun</c> with
+ <seealso marker="#type-pwdfun_2"><c>pwdfun_2()</c></seealso>
+ </tag>
<item>
<p>Provides a function for password validation. This function is called with user and password
as strings, and returns:</p>
@@ -725,21 +703,6 @@
</datatype>
<datatype>
- <name name="user_dir_common_option"/>
- <desc>
- <p>Sets the user directory. That is, the directory containing <c>ssh</c> configuration
- files for the user, such as
- <c>known_hosts</c>, <c>id_rsa</c>, <c>id_dsa</c>>, <c>id_ecdsa</c> and <c>authorized_key</c>.
- Defaults to the directory normally referred to as <c>~/.ssh</c>.
- </p>
- <p>See also the option
- <seealso marker="#type-key_cb_common_option">key_cb</seealso>
- for the general way to handle keys.
- </p>
- </desc>
- </datatype>
-
- <datatype>
<name name="profile_common_option"/>
<desc>
<p>Used together with <c>ip-address</c> and <c>port</c> to
@@ -795,7 +758,8 @@
</p>
<p>The <c>Opts</c> defaults to <c>[]</c> when only the <c>Module</c> is specified.
</p>
- <p>The default value of this option is <c>{ssh_file, []}</c>.
+ <p>The default value of this option is <c>{ssh_file, []}</c>. See also the manpage of
+ <seealso marker="ssh:ssh_file">ssh_file</seealso>.
</p>
<p>A call to the call-back function <c>F</c> will be</p>
<code>
@@ -804,13 +768,32 @@
<p>where <c>...</c> are arguments to <c>F</c> as in
<seealso marker="ssh_client_key_api">ssh_client_key_api</seealso> and/or
<seealso marker="ssh_server_key_api">ssh_server_key_api</seealso>.
- The <c>UserOptions</c> are the options given to <c>ssh:connect</c>, <c>ssh:shell</c> or <c>ssh:daemon</c>.
+ The <c>UserOptions</c> are the options given to
+ <seealso marker="ssh:ssh#connect-3">ssh:connect</seealso>,
+ <seealso marker="ssh:ssh#shell-1">ssh:shell</seealso> or
+ <seealso marker="ssh:ssh#daemon-2">ssh:daemon</seealso>.
</p>
</desc>
</datatype>
<datatype>
+ <name name="pref_public_key_algs_common_option"/>
+ <desc>
+ <p>List of user (client) public key algorithms to try to use.</p>
+ <p>The default value is the <c>public_key</c> entry in the list returned by
+ <seealso marker="#default_algorithms/0">ssh:default_algorithms/0</seealso>.
+ </p>
+ <p>If there is no public key of a specified type available, the corresponding entry is ignored.
+ Note that the available set is dependent on the underlying cryptolib and current user's public keys.
+ </p>
+ <p>See also the option <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso>
+ for specifying the path to the user's keys.
+ </p>
+ </desc>
+ </datatype>
+
+ <datatype>
<name name="disconnectfun_common_option"/>
<desc>
<p>Provides a fun to implement your own logging when the peer disconnects.</p>
diff --git a/lib/ssh/doc/src/ssh_app.xml b/lib/ssh/doc/src/ssh_app.xml
index e80bb1853d..0c22a50c3f 100644
--- a/lib/ssh/doc/src/ssh_app.xml
+++ b/lib/ssh/doc/src/ssh_app.xml
@@ -74,13 +74,18 @@
<c>id_ecdsa_key</c>,
<c>known_hosts</c>, and <c>authorized_keys</c> in ~/.ssh,
and for the host key files in <c>/etc/ssh</c>. These locations can be changed
- by the options <c>user_dir</c> and <c>system_dir</c>.
+ by the options
+ <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso> and
+ <seealso marker="ssh_file#type-system_dir_daemon_option"><c>system_dir</c></seealso>.
</p>
<p>Public key handling can also be customized through a callback module that
implements the behaviors
<seealso marker="ssh_client_key_api">ssh_client_key_api</seealso> and
<seealso marker="ssh_server_key_api">ssh_server_key_api</seealso>.
</p>
+ <p>See also the default callback module documentation in
+ <seealso marker="ssh_file">ssh_file</seealso>.
+ </p>
</section>
<section>
@@ -170,6 +175,8 @@
<item>ecdsa-sha2-nistp384</item>
<item>ecdsa-sha2-nistp521</item>
<item>ecdsa-sha2-nistp256</item>
+ <item>ssh-ed25519</item>
+ <item>ssh-ed448</item>
<item>ssh-rsa</item>
<item>rsa-sha2-256</item>
<item>rsa-sha2-512</item>
@@ -373,7 +380,11 @@
<item>
<url href="https://tools.ietf.org/html/draft-ietf-curdle-ssh-curves">Secure Shell (SSH) Key Exchange Method using Curve25519 and Curve448 (work in progress)</url>
</item>
-
+
+ <item>
+ <url href="https://tools.ietf.org/html/draft-ietf-curdle-ssh-ed25519-ed448">Ed25519 and Ed448 public key algorithms for the Secure Shell (SSH) protocol (work in progress)</url>
+ </item>
+
</list>
</section>
diff --git a/lib/ssh/doc/src/ssh_file.xml b/lib/ssh/doc/src/ssh_file.xml
new file mode 100644
index 0000000000..6681d9c306
--- /dev/null
+++ b/lib/ssh/doc/src/ssh_file.xml
@@ -0,0 +1,285 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2018</year><year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>ssh_file</title>
+ <prepared></prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ </header>
+ <module>ssh_file</module>
+ <modulesummary>Default callback module for the client's and server's database operations in the ssh application</modulesummary>
+ <description>
+ <p>This module is the default callback handler for the client's and the server's user and host "database" operations.
+ All data, for instance key pairs, are stored in files in the normal file system. This page documents the files, where they
+ are stored and configuration options for this callback module.
+ </p>
+ <p>The intention is to be compatible with the
+ <url href="http://www.openssh.com">OpenSSH</url>
+ storage in files. Therefore it mimics directories and filenames of
+ <url href="http://www.openssh.com">OpenSSH</url>.
+ </p>
+
+ <p>Ssh_file implements the <seealso marker="ssh:ssh_server_key_api">ssh_server_key_api</seealso> and
+ the <seealso marker="ssh:ssh_client_key_api">ssh_client_key_api</seealso>.
+ This enables the user to make an own interface using for example a database handler.
+ </p>
+ <p>Such another callback module could be used by setting the option
+ <seealso marker="ssh:ssh#type-key_cb_common_option"><c>key_cb</c></seealso>
+ when starting a client or a server (with for example
+ <seealso marker="ssh:ssh#connect-3">ssh:connect</seealso>,
+ <seealso marker="ssh:ssh#daemon-2">ssh:daemon</seealso> of
+ <seealso marker="ssh:ssh#shell-1">ssh:shell</seealso>
+ ).
+ </p>
+
+ <note>
+ <p>The functions are <i>Callbacks</i> for the SSH app. They are not intended to be called from the user's code!
+ </p>
+ </note>
+ </description>
+
+ <section>
+ <title>Files, directories and who uses them</title>
+ <section>
+ <title>Daemons</title>
+ <p>Daemons uses all files stored in the <seealso marker="#SYSDIR">SYSDIR</seealso> directory.
+ </p>
+ <p>Optionaly, in case of <c>publickey</c> authorization, one or more of the remote user's public keys
+ in the <seealso marker="#USERDIR">USERDIR</seealso> directory are used.
+ See the files
+ <seealso marker="#USERDIR-authorized_keys"><c>USERDIR/authorized_keys</c></seealso> and
+ <seealso marker="#USERDIR-authorized_keys2"><c>USERDIR/authorized_keys2</c></seealso>.
+ </p>
+ </section>
+
+ <section>
+ <title>Clients</title>
+ <p>Clients uses all files stored in the <seealso marker="#USERDIR">USERDIR</seealso> directory.
+ </p>
+ </section>
+
+ <section>
+ <title>Directory contents</title>
+ <taglist>
+ <tag><marker id="LOCALUSER"/>LOCALUSER</tag>
+ <item><p>The user name of the OS process running the Erlang virtual machine (emulator).</p>
+ </item>
+
+ <tag><marker id="SYSDIR"/>SYSDIR</tag>
+ <item><p>This is the directory holding the server's files:</p>
+ <list>
+ <item><marker id="SYSDIR-ssh_host_dsa_key"/><c>ssh_host_dsa_key</c> - private dss host key (optional)</item>
+ <item><marker id="SYSDIR-ssh_host_rsa_key"/><c>ssh_host_rsa_key</c> - private rsa host key (optional)</item>
+ <item><marker id="SYSDIR-ssh_host_ecdsa_key"/><c>ssh_host_ecdsa_key</c> - private ecdsa host key (optional)</item>
+ <item><marker id="SYSDIR-ssh_host_ed25519_key"/><c>ssh_host_ed25519_key</c> - private eddsa host key for curve 25519 (optional)</item>
+ <item><marker id="SYSDIR-ssh_host_ed448_key"/><c>ssh_host_ed448_key</c> - private eddsa host key for curve 448 (optional)</item>
+ </list>
+ <p>At least one host key must be defined. The default value of SYSDIR is <marker id="#/etc/ssh"/><c>/etc/ssh</c>.
+ </p>
+ <p>For security reasons, this directory is normally accessible only to the root user.
+ </p>
+ <p>To change the SYSDIR, see the <seealso marker="#type-system_dir_daemon_option">system_dir</seealso> option.
+ </p>
+ </item>
+
+ <tag><marker id="USERDIR"/>USERDIR</tag>
+ <item><p>This is the directory holding the files:</p>
+ <list>
+ <item><marker id="USERDIR-authorized_keys"/><c>authorized_keys</c>
+ and, as second alternative
+ <marker id="USERDIR-authorized_keys2"/><c>authorized_keys2</c> -
+ the user's public keys are stored concatenated in one of those files.
+ </item>
+ <item><marker id="USERDIR-known_hosts"/><c>known_hosts</c> - host keys from hosts visited
+ concatenated. The file is created and used by the client.</item>
+ <item><marker id="USERDIR-id_dsa"/><c>id_dsa</c> - private dss user key (optional)</item>
+ <item><marker id="USERDIR-id_rsa"/><c>id_rsa</c> - private rsa user key (optional)</item>
+ <item><marker id="USERDIR-id_ecdsa"/><c>id_ecdsa</c> - private ecdsa user key (optional)</item>
+ <item><marker id="USERDIR-id_ed25519"/><c>id_ed25519</c> - private eddsa user key for curve 25519 (optional)</item>
+ <item><marker id="USERDIR-id_ed448"/><c>id_ed448</c> - private eddsa user key for curve 448 (optional)</item>
+ </list>
+ <p>The default value of USERDIR is <c>/home/</c><seealso marker="#LOCALUSER"><c>LOCALUSER</c></seealso><c>/.ssh</c>.
+ </p>
+ <p>To change the USERDIR, see the <seealso marker="#type-user_dir_common_option">user_dir</seealso> option
+ </p>
+ </item>
+ </taglist>
+ </section>
+ </section>
+
+ <datatypes>
+ <datatype_title>Options for the default ssh_file callback module</datatype_title>
+ <datatype>
+ <name name="user_dir_common_option"/>
+ <desc>
+ <p>Sets the <seealso marker="#USERDIR">user directory</seealso>.</p>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="user_dir_fun_common_option"/>
+ <name name="user2dir"/>
+ <desc>
+ <p>Sets the <seealso marker="#USERDIR">user directory</seealso> dynamically
+ by evaluating the <c>user2dir</c> function.
+ </p>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="system_dir_daemon_option"/>
+ <desc>
+ <p>Sets the <seealso marker="#SYSDIR">system directory</seealso>.</p>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="pubkey_passphrase_client_options"/>
+ <desc>
+ <p>If the user's DSA, RSA or ECDSA key is protected by a passphrase, it can be
+ supplied with thoose options.
+ </p>
+ <p>Note that EdDSA passhrases (Curves 25519 and 448) are not implemented.</p>
+ </desc>
+ </datatype>
+
+ </datatypes>
+
+ <funcs>
+ <func>
+ <name>host_key(Algorithm, DaemonOptions) -> {ok, Key} | {error, Reason}</name>
+ <fsummary></fsummary>
+ <desc>
+ <p><strong>Types and description</strong></p>
+ <p>See the api description in
+ <seealso marker="ssh:ssh_server_key_api#Module:host_key-2">ssh_server_key_api, Module:host_key/2</seealso>.
+ </p>
+ <p><strong>Options</strong></p>
+ <list>
+ <item><seealso marker="#type-system_dir_daemon_option">system_dir</seealso></item>
+ <!-- item>dsa_pass_phrase</item -->
+ <!-- item>rsa_pass_phrase</item -->
+ <!-- item>ecdsa_pass_phrase</item -->
+ </list>
+ <p><strong>Files</strong></p>
+ <list>
+ <item><seealso marker="#SYSDIR-ssh_host_rsa_key"><c>SYSDIR/ssh_host_rsa_key</c></seealso></item>
+ <item><seealso marker="#SYSDIR-ssh_host_dsa_key"><c>SYSDIR/ssh_host_dsa_key</c></seealso></item>
+ <item><seealso marker="#SYSDIR-ssh_host_ecdsa_key"><c>SYSDIR/ssh_host_ecdsa_key</c></seealso></item>
+ <item><seealso marker="#SYSDIR-ssh_host_ed25519_key"><c>SYSDIR/ssh_host_ed25519_key</c></seealso></item>
+ <item><seealso marker="#SYSDIR-ssh_host_ed448_key"><c>SYSDIR/ssh_host_ed448_key</c>c></seealso></item>
+ </list>
+ </desc>
+ </func>
+
+ <func>
+ <name>is_auth_key(PublicUserKey, User, DaemonOptions) -> Result</name>
+ <fsummary></fsummary>
+ <desc>
+ <p><strong>Types and description</strong></p>
+ <p>See the api description in
+ <seealso marker="ssh:ssh_server_key_api#Module:is_auth_key-3">ssh_server_key_api: Module:is_auth_key/3</seealso>.
+ </p>
+ <p><strong>Options</strong></p>
+ <list>
+ <item><seealso marker="#type-user_dir_fun_common_option">user_dir_fun</seealso></item>
+ <item><seealso marker="#type-user_dir_common_option">user_dir</seealso></item>
+ </list>
+ <p><strong>Files</strong></p>
+ <list>
+ <item><seealso marker="#USERDIR-authorized_keys"><c>USERDIR/authorized_keys</c></seealso></item>
+ <item><seealso marker="#USERDIR-authorized_keys2"><c>USERDIR/authorized_keys2</c></seealso></item>
+ </list>
+ </desc>
+ </func>
+
+ <func>
+ <name>add_host_key(HostNames, PublicHostKey, ConnectOptions) -> ok | {error, Reason}</name>
+ <fsummary></fsummary>
+ <desc>
+ <p><strong>Types and description</strong></p>
+ <p>See the api description in
+ <seealso marker="ssh:ssh_client_key_api#Module:add_host_key-3">ssh_client_key_api, Module:add_host_key/3</seealso>.
+ </p>
+ <p><strong>Option</strong></p>
+ <list>
+ <item><seealso marker="#type-user_dir_common_option">user_dir</seealso></item>
+ </list>
+ <p><strong>File</strong></p>
+ <list>
+ <item><seealso marker="#USERDIR-known_hosts"><c>USERDIR/known_hosts</c></seealso></item>
+ </list>
+ </desc>
+ </func>
+
+ <func>
+ <name>is_host_key(Key, Host, Algorithm, ConnectOptions) -> Result</name>
+ <fsummary></fsummary>
+ <desc>
+ <p><strong>Types and description</strong></p>
+ <p>See the api description in
+ <seealso marker="ssh:ssh_client_key_api#Module:is_host_key-4">ssh_client_key_api, Module:is_host_key/4</seealso>.
+ </p>
+ <p><strong>Option</strong></p>
+ <list>
+ <item><seealso marker="#type-user_dir_common_option">user_dir</seealso></item>
+ </list>
+ <p><strong>File</strong></p>
+ <list>
+ <item><seealso marker="#USERDIR-known_hosts"><c>USERDIR/known_hosts</c></seealso></item>
+ </list>
+ </desc>
+ </func>
+
+ <func>
+ <name>user_key(Algorithm, ConnectOptions) -> {ok, PrivateKey} | {error, Reason}</name>
+ <fsummary></fsummary>
+ <desc>
+ <p><strong>Types and description</strong></p>
+ <p>See the api description in
+ <seealso marker="ssh:ssh_client_key_api#Module:user_key-2">ssh_client_key_api, Module:user_key/2</seealso>.
+ </p>
+ <p><strong>Options</strong></p>
+ <list>
+ <item><seealso marker="#type-user_dir_common_option">user_dir</seealso></item>
+ <item><seealso marker="#type-pubkey_passphrase_client_options">dsa_pass_phrase</seealso></item>
+ <item><seealso marker="#type-pubkey_passphrase_client_options">rsa_pass_phrase</seealso></item>
+ <item><seealso marker="#type-pubkey_passphrase_client_options">ecdsa_pass_phrase</seealso></item>
+ </list>
+ <p>Note that EdDSA passhrases (Curves 25519 and 448) are not implemented.</p>
+ <p><strong>Files</strong></p>
+ <list>
+ <item><seealso marker="#USERDIR-id_dsa"><c>USERDIR/id_dsa</c></seealso></item>
+ <item><seealso marker="#USERDIR-id_rsa"><c>USERDIR/id_rsa</c></seealso></item>
+ <item><seealso marker="#USERDIR-id_ecdsa"><c>USERDIR/id_ecdsa</c></seealso></item>
+ <item><seealso marker="#USERDIR-id_ed25519"><c>USERDIR/id_ed25519</c></seealso></item>
+ <item><seealso marker="#USERDIR-id_ed448"><c>USERDIR/id_ed448</c></seealso></item>
+ </list>
+ </desc>
+ </func>
+
+ </funcs>
+
+</erlref>
diff --git a/lib/ssh/doc/src/ssh_sftp.xml b/lib/ssh/doc/src/ssh_sftp.xml
index ea55126cb3..8c105147d6 100644
--- a/lib/ssh/doc/src/ssh_sftp.xml
+++ b/lib/ssh/doc/src/ssh_sftp.xml
@@ -425,7 +425,6 @@
<type>
<v>ChannelPid = pid()</v>
<v>Handle = term()</v>
- <v>Position = integer()</v>
<v>Len = integer()</v>
<v>Timeout = timeout()</v>
<v>Data = string() | binary()</v>
diff --git a/lib/ssh/doc/src/terminology.xml b/lib/ssh/doc/src/terminology.xml
new file mode 100644
index 0000000000..db1e08970d
--- /dev/null
+++ b/lib/ssh/doc/src/terminology.xml
@@ -0,0 +1,185 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2018</year>
+ <year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>Terminology</title>
+ <prepared></prepared>
+ <docno></docno>
+ <approved></approved>
+ <date></date>
+ <rev></rev>
+ <file>terminology.xml</file>
+ </header>
+
+ <section>
+ <title>General Information</title>
+ <p>In the following terms that may cause confusion are explained.
+ </p>
+ </section>
+
+ <section>
+ <title>The term "user"</title>
+ <p>A "user" is a term that everyone understands intuitively. However, the understandings may differ which can
+ cause confusion.
+ </p>
+ <p>The term is used differently in <url href="http://www.openssh.com">OpenSSH</url> and SSH in Erlang/OTP.
+ The reason is the different environments and use cases that are not immediatly obvious.
+ </p>
+ <p>This chapter aims at explaining the differences and giving a rationale for why Erlang/OTP handles "user" as
+ it does.
+ </p>
+
+ <section>
+ <title>In OpenSSH</title>
+ <p>Many have been in contact with the command 'ssh' on a Linux machine (or similar) to remotly log in on
+ another machine. One types
+ </p>
+ <code>ssh host</code>
+ <p>to log in on the machine named <c>host</c>. The command prompts for your password on the remote <c>host</c> and
+ then you can read, write and execute as your <i>user name</i> has rights on the remote <c>host</c>. There are
+ stronger variants with pre-distributed keys or certificates, but that are for now just details in the
+ authentication process.
+ </p>
+ <p>You could log in as the user <c>anotheruser</c> with
+ </p>
+ <code>ssh anotheruser@host</code>
+ <p>and you will then be enabled to act as <c>anotheruser</c> on the <c>host</c> if authorized correctly.
+ </p>
+ <p>So what does <i>"your user name has rights"</i> mean? In a UNIX/Linux/etc context it is exactly as that context:
+ The <i>user</i> could read, write and execute programs according to the OS rules.
+ In addition, the user has a home directory (<c>$HOME</c>) and there is a <c>$HOME/.ssh/</c> directory
+ with ssh-specific files.
+ </p>
+ <section>
+ <title>SSH password authentication</title>
+ <p>When SSH tries to log in to a host, the ssh protocol communicates the user name (as a string) and a password.
+ The remote ssh server checks that there is such a user defined and that the provided password is acceptable.
+ </p>
+ <p>If so, the user is authorized.
+ </p>
+ </section>
+ <section>
+ <title>SSH public key authentication</title>
+ <p>This is a stronger method where the ssh protocol brings the user name, the user's public key and some
+ cryptographic information which we could ignore here.
+ </p>
+ <p>The ssh server on the remote host checks:
+ </p>
+ <list>
+ <item>That the <i>user</i> has a home directory,</item>
+ <item>that home directory contains a .ssh/ directory and</item>
+ <item>the .ssh/ directory contains the public key just received in the <c>authorized_keys</c> file</item>
+ </list>
+ <p>if so, the user is authorized.
+ </p>
+ </section>
+ <section>
+ <title>The SSH server on UNIX/Linux/etc after a succesful authentication</title>
+ <p>After a succesful incoming authentication, a new process runs as the just authenticated user.</p>
+ <p>Next step is to start a service according to the ssh request. In case of a request of a shell,
+ a new one is started which handles the OS-commands that arrives from the client (that's "you").
+ </p>
+ <p>In case of a sftp request, an sftp server is started in with the user's rights. So it could read, write or delete
+ files if allowed for that user.
+ </p>
+ </section>
+ </section>
+
+ <section>
+ <title>In Erlang/OTP SSH</title>
+ <p>For the Erlang/OTP SSH server the situation is different. The server executes in an Erlang process
+ in the Erlang emulator which in turn executes in an OS process. The emulator does not try to change its
+ user when authenticated over the SSH protocol.
+ So the remote user name is only for authentication purposes in the Erlang/OTP SSH application.
+ </p>
+ <section>
+ <title>Password authentication in Erlang SSH</title>
+ <p>The Erlang/OTP SSH server checks the user name and password in the following order:
+ </p>
+ <list type="ordered">
+ <item>If a
+ <seealso marker="ssh:ssh#option-pwdfun"><c>pwdfun</c></seealso>
+ is defined, that one is called and the returned boolean is the authentication result.
+ </item>
+ <item>Else, if the
+ <seealso marker="ssh:ssh#option-user_passwords"><c>user_passwords</c></seealso>
+ option is defined and the username and the password matches, the authentication is a success.
+ </item>
+ <item>Else, if the option
+ <seealso marker="ssh:ssh#option-password"><c>password</c></seealso>
+ is defined and matches the password the authentication is a success.
+ Note that the use of this option is not recommended in non-test code.
+ </item>
+ </list>
+ </section>
+ <section>
+ <title>Public key authentication in Erlang SSH</title>
+ <p>The user name, public key and cryptographic data (a signature) that is sent by the client, are used as follows
+ (some steps left out for clearity):
+ </p>
+ <list type="ordered">
+ <item>A callback module is selected using the options
+ <seealso marker="ssh:ssh#type-key_cb_common_option"><c>key_cb</c></seealso>.
+ </item>
+ <item>The callback module is used to check that the provided public key is one of the user's pre-stored.
+ In case of the default callback module, the files <c>authorized_keys</c> and <c>authorized_keys2</c>
+ are searched in a directory found in the following order:
+ <list>
+ <item>If the option
+ <seealso marker="ssh:ssh_file#type-user_dir_fun_common_option"><c>user_dir_fun</c></seealso>
+ is defined, that fun is called and the returned directory is used,
+ </item>
+ <item>Else, If the option
+ <seealso marker="ssh:ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso>
+ is defined, that directory is used,
+ </item>
+ <item>Else the subdirectory <c>.ssh</c> in the home directory of the user executing
+ the OS process of the Erlang emulator is used.
+ </item>
+ </list>
+ If the provided public key is not found, the authentication fails.
+ </item>
+ <item>Finally, if the provided public key is found, the signature provided by the client is checked with
+ the public key.
+ </item>
+ </list>
+ </section>
+ <section>
+ <title>The Erlang/OTP SSH server after a succesful authentication</title>
+ <p>After a successful authentication an <i>Erlang process</i> is handling the service request from the remote
+ ssh client. The rights of that process are those of the user of the OS process running the Erlang emulator.
+ </p>
+ <p>If a shell service request arrives to the server, an <i>Erlang shell</i> is opened in the server's emulator.
+ The rights in that shell is independent of the just authenticated user.
+ </p>
+ <p>In case of an sftp request, an sftp server is started with the rights of the user of the Erlang emulator's OS
+ process. So with sftp the authenticated user does not influence the rights.
+ </p>
+ <p>So after an authentication, the user name is not used anymore and has no influence.
+ </p>
+ </section>
+ </section>
+ </section>
+</chapter>
+
diff --git a/lib/ssh/doc/src/usersguide.xml b/lib/ssh/doc/src/usersguide.xml
index 38ffa48cde..8a4df208d8 100644
--- a/lib/ssh/doc/src/usersguide.xml
+++ b/lib/ssh/doc/src/usersguide.xml
@@ -36,5 +36,6 @@
</description>
<xi:include href="introduction.xml"/>
<xi:include href="using_ssh.xml"/>
+ <xi:include href="terminology.xml"/>
<xi:include href="configure_algos.xml"/>
</part>
diff --git a/lib/ssh/doc/src/using_ssh.xml b/lib/ssh/doc/src/using_ssh.xml
index 80662e9a70..4455d5ecc5 100644
--- a/lib/ssh/doc/src/using_ssh.xml
+++ b/lib/ssh/doc/src/using_ssh.xml
@@ -74,16 +74,17 @@
<marker id="Running an Erlang ssh Daemon"></marker>
<title>Running an Erlang ssh Daemon</title>
- <p>The <c>system_dir</c> option must be a directory containing a host
- key file and it defaults to <c>/etc/ssh</c>. For details, see Section
- Configuration Files in <seealso
- marker="SSH_app">ssh(6)</seealso>.
+ <p>The
+ <seealso marker="ssh_file#type-system_dir_daemon_option"><c>system_dir</c></seealso>
+ option must be a directory containing a host key file and it defaults to <c>/etc/ssh</c>.
+ For details, see Section Configuration Files in <seealso marker="SSH_app">ssh(6)</seealso>.
</p>
<note><p>Normally, the <c>/etc/ssh</c> directory is only readable by root.</p>
</note>
- <p>The option <c>user_dir</c> defaults to directory <c>users ~/.ssh</c>.</p>
+ <p>The option <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso>
+ defaults to directory <c>users ~/.ssh</c>.</p>
<p><em>Step 1.</em> To run the example without root privileges,
generate new keys and host keys:</p>
diff --git a/lib/ssh/src/ssh.hrl b/lib/ssh/src/ssh.hrl
index 94b9f3a196..923e9309f4 100644
--- a/lib/ssh/src/ssh.hrl
+++ b/lib/ssh/src/ssh.hrl
@@ -129,6 +129,8 @@
-type pubkey_alg() :: 'ecdsa-sha2-nistp256' |
'ecdsa-sha2-nistp384' |
'ecdsa-sha2-nistp521' |
+ 'ssh-ed25519' |
+ 'ssh-ed448' |
'rsa-sha2-256' |
'rsa-sha2-512' |
'ssh-dss' |
@@ -173,7 +175,7 @@
-type common_options() :: [ common_option() ].
-type common_option() ::
- user_dir_common_option()
+ ssh_file:user_dir_common_option()
| profile_common_option()
| max_idle_time_common_option()
| key_cb_common_option()
@@ -182,6 +184,7 @@
| ssh_msg_debug_fun_common_option()
| rekey_limit_common_option()
| id_string_common_option()
+ | pref_public_key_algs_common_option()
| preferred_algorithms_common_option()
| modify_algorithms_common_option()
| auth_methods_common_option()
@@ -191,8 +194,6 @@
-define(COMMON_OPTION, common_option()).
-
--type user_dir_common_option() :: {user_dir, false | string()}.
-type profile_common_option() :: {profile, atom() }.
-type max_idle_time_common_option() :: {idle_time, timeout()}.
-type rekey_limit_common_option() :: {rekey_limit, Bytes::limit_bytes() |
@@ -211,6 +212,7 @@
{ssh_msg_debug_fun, fun((ssh:connection_ref(),AlwaysDisplay::boolean(),Msg::binary(),LanguageTag::binary()) -> any()) } .
-type id_string_common_option() :: {id_string, string() | random | {random,Nmin::pos_integer(),Nmax::pos_integer()} }.
+-type pref_public_key_algs_common_option() :: {pref_public_key_algs, [pubkey_alg()] } .
-type preferred_algorithms_common_option():: {preferred_algorithms, algs_list()}.
-type modify_algorithms_common_option() :: {modify_algorithms, modify_algs_list()}.
-type auth_methods_common_option() :: {auth_methods, string() }.
@@ -223,14 +225,13 @@
{transport, {atom(),atom(),atom()} }
| {vsn, {non_neg_integer(),non_neg_integer()} }
| {tstflg, list(term())}
- | {user_dir_fun, fun()}
+ | ssh_file:user_dir_fun_common_option()
| {max_random_length_padding, non_neg_integer()} .
-type client_option() ::
- pref_public_key_algs_client_option()
- | pubkey_passphrase_client_options()
+ ssh_file:pubkey_passphrase_client_options()
| host_accepting_client_options()
| authentication_client_options()
| diffie_hellman_group_exchange_client_option()
@@ -241,15 +242,14 @@
| ?COMMON_OPTION .
-type opaque_client_options() ::
- {keyboard_interact_fun, fun((term(),term(),term()) -> term())}
+ {keyboard_interact_fun, fun((Name::iodata(),
+ Instruction::iodata(),
+ Prompts::[{Prompt::iodata(),Echo::boolean()}]
+ ) ->
+ [Response::iodata()]
+ )}
| opaque_common_options().
--type pref_public_key_algs_client_option() :: {pref_public_key_algs, [pubkey_alg()] } .
-
--type pubkey_passphrase_client_options() :: {dsa_pass_phrase, string()}
- | {rsa_pass_phrase, string()}
- | {ecdsa_pass_phrase, string()} .
-
-type host_accepting_client_options() ::
{silently_accept_hosts, accept_hosts()}
| {user_interaction, boolean()}
@@ -299,8 +299,9 @@
-type 'shell_fun/1'() :: fun((User::string()) -> pid()) .
-type 'shell_fun/2'() :: fun((User::string(), PeerAddr::inet:ip_address()) -> pid()).
--type exec_daemon_option() :: {exec, 'exec_fun/1'() | 'exec_fun/2'() | 'exec_fun/3'() }.
-
+-type exec_daemon_option() :: {exec, exec_spec()} .
+-type exec_spec() :: {direct, exec_fun()} .
+-type exec_fun() :: 'exec_fun/1'() | 'exec_fun/2'() | 'exec_fun/3'().
-type 'exec_fun/1'() :: fun((Cmd::string()) -> exec_result()) .
-type 'exec_fun/2'() :: fun((Cmd::string(), User::string()) -> exec_result()) .
-type 'exec_fun/3'() :: fun((Cmd::string(), User::string(), ClientAddr::ip_port()) -> exec_result()) .
@@ -311,7 +312,7 @@
-type send_ext_info_daemon_option() :: {send_ext_info, boolean()} .
-type authentication_daemon_options() ::
- {system_dir, string()}
+ ssh_file:system_dir_daemon_option()
| {auth_method_kb_interactive_data, prompt_texts() }
| {user_passwords, [{UserName::string(),Pwd::string()}]}
| {password, string()}
@@ -386,9 +387,6 @@
algorithms, %% #alg{}
- key_cb, %% Private/Public key callback module
- io_cb, %% Interaction callback module
-
send_mac = none, %% send MAC algorithm
send_mac_key, %% key used in send MAC algorithm
send_mac_size = 0,
diff --git a/lib/ssh/src/ssh_auth.erl b/lib/ssh/src/ssh_auth.erl
index 4e4aa440de..9632168e65 100644
--- a/lib/ssh/src/ssh_auth.erl
+++ b/lib/ssh/src/ssh_auth.erl
@@ -91,8 +91,10 @@ unique(L) ->
%%%---- userauth_request_msg "callbacks"
-password_msg([#ssh{opts = Opts, io_cb = IoCb,
- user = User, service = Service} = Ssh0]) ->
+password_msg([#ssh{opts = Opts,
+ user = User,
+ service = Service} = Ssh0]) ->
+ IoCb = ?GET_INTERNAL_OPT(io_cb, Opts),
{Password,Ssh} =
case ?GET_OPT(password, Opts) of
undefined when IoCb == ssh_no_io ->
@@ -137,9 +139,7 @@ keyboard_interactive_msg([#ssh{user = User,
get_public_key(SigAlg, #ssh{opts = Opts}) ->
KeyAlg = key_alg(SigAlg),
- {KeyCb,KeyCbOpts} = ?GET_OPT(key_cb, Opts),
- UserOpts = ?GET_OPT(user_options, Opts),
- case KeyCb:user_key(KeyAlg, [{key_cb_private,KeyCbOpts}|UserOpts]) of
+ case ssh_transport:call_KeyCb(user_key, [KeyAlg], Opts) of
{ok, PrivKey} ->
try
%% Check the key - the KeyCb may be a buggy plugin
@@ -387,11 +387,9 @@ handle_userauth_info_request(#ssh_msg_userauth_info_request{name = Name,
instruction = Instr,
num_prompts = NumPrompts,
data = Data},
- #ssh{opts = Opts,
- io_cb = IoCb
- } = Ssh) ->
+ #ssh{opts=Opts} = Ssh) ->
PromptInfos = decode_keyboard_interactive_prompts(NumPrompts,Data),
- case keyboard_interact_get_responses(IoCb, Opts, Name, Instr, PromptInfos) of
+ case keyboard_interact_get_responses(Opts, Name, Instr, PromptInfos) of
not_ok ->
not_ok;
Responses ->
@@ -498,9 +496,7 @@ get_password_option(Opts, User) ->
pre_verify_sig(User, KeyBlob, Opts) ->
try
Key = public_key:ssh_decode(KeyBlob, ssh2_pubkey), % or exception
- {KeyCb,KeyCbOpts} = ?GET_OPT(key_cb, Opts),
- UserOpts = ?GET_OPT(user_options, Opts),
- KeyCb:is_auth_key(Key, User, [{key_cb_private,KeyCbOpts}|UserOpts])
+ ssh_transport:call_KeyCb(is_auth_key, [Key, User], Opts)
catch
_:_ ->
false
@@ -509,10 +505,8 @@ pre_verify_sig(User, KeyBlob, Opts) ->
verify_sig(SessionId, User, Service, AlgBin, KeyBlob, SigWLen, #ssh{opts = Opts} = Ssh) ->
try
Alg = binary_to_list(AlgBin),
- {KeyCb,KeyCbOpts} = ?GET_OPT(key_cb, Opts),
- UserOpts = ?GET_OPT(user_options, Opts),
Key = public_key:ssh_decode(KeyBlob, ssh2_pubkey), % or exception
- true = KeyCb:is_auth_key(Key, User, [{key_cb_private,KeyCbOpts}|UserOpts]),
+ true = ssh_transport:call_KeyCb(is_auth_key, [Key, User], Opts),
PlainText = build_sig_data(SessionId, User, Service, KeyBlob, Alg),
<<?UINT32(AlgSigLen), AlgSig:AlgSigLen/binary>> = SigWLen,
<<?UINT32(AlgLen), _Alg:AlgLen/binary,
@@ -536,56 +530,78 @@ build_sig_data(SessionId, User, Service, KeyBlob, Alg) ->
+key_alg('rsa-sha2-256') -> 'ssh-rsa';
+key_alg('rsa-sha2-512') -> 'ssh-rsa';
+key_alg(Alg) -> Alg.
+
+%%%================================================================
+%%%
+%%% Keyboard-interactive
+%%%
+
decode_keyboard_interactive_prompts(_NumPrompts, Data) ->
ssh_message:decode_keyboard_interactive_prompts(Data, []).
-keyboard_interact_get_responses(IoCb, Opts, Name, Instr, PromptInfos) ->
- NumPrompts = length(PromptInfos),
+keyboard_interact_get_responses(Opts, Name, Instr, PromptInfos) ->
keyboard_interact_get_responses(?GET_OPT(user_interaction, Opts),
?GET_OPT(keyboard_interact_fun, Opts),
- ?GET_OPT(password, Opts), IoCb, Name,
- Instr, PromptInfos, Opts, NumPrompts).
+ ?GET_OPT(password, Opts),
+ Name,
+ Instr,
+ PromptInfos,
+ Opts).
-keyboard_interact_get_responses(_, _, not_ok, _, _, _, _, _, _) ->
+%% Don't re-try an already rejected password. This could happen if both keyboard-interactive
+%% and password methods are tried:
+keyboard_interact_get_responses(_, _, not_ok, _, _, _, _) ->
not_ok;
-keyboard_interact_get_responses(_, undefined, Password, _, _, _, _, _,
- 1) when Password =/= undefined ->
- [Password]; %% Password auth implemented with keyboard-interaction and passwd is known
-keyboard_interact_get_responses(_, _, _, _, _, _, _, _, 0) ->
+
+%% Only one password requestedm and we have got one via the 'password' option for the daemon:
+keyboard_interact_get_responses(_, undefined, Pwd, _, _, [_], _) when Pwd =/= undefined ->
+ [Pwd]; %% Password auth implemented with keyboard-interaction and passwd is known
+
+%% No password requested (keyboard-interactive):
+keyboard_interact_get_responses(_, _, _, _, _, [], _) ->
[];
-keyboard_interact_get_responses(false, undefined, undefined, _, _, _, [Prompt|_], Opts, _) ->
- ssh_no_io:read_line(Prompt, Opts); %% Throws error as keyboard interaction is not allowed
-keyboard_interact_get_responses(true, undefined, _,IoCb, Name, Instr, PromptInfos, Opts, _) ->
- keyboard_interact(IoCb, Name, Instr, PromptInfos, Opts);
-keyboard_interact_get_responses(true, Fun, _Pwd, _IoCb, Name, Instr, PromptInfos, _Opts, NumPrompts) ->
- keyboard_interact_fun(Fun, Name, Instr, PromptInfos, NumPrompts).
-
-keyboard_interact(IoCb, Name, Instr, Prompts, Opts) ->
+
+%% user_interaction is forbidden (by option user_interaction) and we have to ask
+%% the user for one or more.
+%% Throw an error:
+keyboard_interact_get_responses(false, undefined, undefined, _, _, [Prompt|_], Opts) ->
+ ssh_no_io:read_line(Prompt, Opts);
+
+%% One or more passwords are requested, we may prompt the user and no fun is used
+%% to get the responses:
+keyboard_interact_get_responses(true, undefined, _, Name, Instr, PromptInfos, Opts) ->
+ prompt_user_for_passwords(Name, Instr, PromptInfos, Opts);
+
+%% The passwords are provided with a fun. Use that one!
+keyboard_interact_get_responses(true, Fun, _Pwd, Name, Instr, PromptInfos, _Opts) ->
+ keyboard_interact_fun(Fun, Name, Instr, PromptInfos).
+
+
+
+prompt_user_for_passwords(Name, Instr, PromptInfos, Opts) ->
+ IoCb = ?GET_INTERNAL_OPT(io_cb, Opts),
write_if_nonempty(IoCb, Name),
write_if_nonempty(IoCb, Instr),
lists:map(fun({Prompt, true}) -> IoCb:read_line(Prompt, Opts);
({Prompt, false}) -> IoCb:read_password(Prompt, Opts)
end,
- Prompts).
+ PromptInfos).
-write_if_nonempty(_, "") -> ok;
-write_if_nonempty(_, <<>>) -> ok;
-write_if_nonempty(IoCb, Text) -> IoCb:format("~s~n",[Text]).
-
-
-keyboard_interact_fun(KbdInteractFun, Name, Instr, PromptInfos, NumPrompts) ->
- Prompts = lists:map(fun({Prompt, _Echo}) -> Prompt end,
- PromptInfos),
- case KbdInteractFun(Name, Instr, Prompts) of
- Rs when length(Rs) == NumPrompts ->
- Rs;
- _Rs ->
+keyboard_interact_fun(KbdInteractFun, Name, Instr, PromptInfos) ->
+ case KbdInteractFun(Name, Instr, PromptInfos) of
+ Responses when is_list(Responses),
+ length(Responses) == length(PromptInfos) ->
+ Responses;
+ _ ->
nok
end.
-key_alg('rsa-sha2-256') -> 'ssh-rsa';
-key_alg('rsa-sha2-512') -> 'ssh-rsa';
-key_alg(Alg) -> Alg.
+write_if_nonempty(_, "") -> ok;
+write_if_nonempty(_, <<>>) -> ok;
+write_if_nonempty(IoCb, Text) -> IoCb:format("~s~n",[Text]).
diff --git a/lib/ssh/src/ssh_connection_handler.erl b/lib/ssh/src/ssh_connection_handler.erl
index 4b41c10cbb..7c87591cf2 100644
--- a/lib/ssh/src/ssh_connection_handler.erl
+++ b/lib/ssh/src/ssh_connection_handler.erl
@@ -447,7 +447,6 @@ init_ssh_record(Role, Socket, Opts) ->
init_ssh_record(Role, Socket, PeerAddr, Opts) ->
AuthMethods = ?GET_OPT(auth_methods, Opts),
S0 = #ssh{role = Role,
- key_cb = ?GET_OPT(key_cb, Opts),
opts = Opts,
userauth_supported_methods = AuthMethods,
available_host_keys = available_hkey_algorithms(Role, Opts),
@@ -472,10 +471,11 @@ init_ssh_record(Role, Socket, PeerAddr, Opts) ->
S1 =
S0#ssh{c_vsn = Vsn,
c_version = Version,
- io_cb = case ?GET_OPT(user_interaction, Opts) of
- true -> ssh_io;
- false -> ssh_no_io
- end,
+ opts = ?PUT_INTERNAL_OPT({io_cb, case ?GET_OPT(user_interaction, Opts) of
+ true -> ssh_io;
+ false -> ssh_no_io
+ end},
+ Opts),
userauth_quiet_mode = ?GET_OPT(quiet_mode, Opts),
peer = {PeerName, PeerAddr},
local = LocalName
@@ -488,7 +488,6 @@ init_ssh_record(Role, Socket, PeerAddr, Opts) ->
server ->
S0#ssh{s_vsn = Vsn,
s_version = Version,
- io_cb = ?GET_INTERNAL_OPT(io_cb, Opts, ssh_io),
userauth_methods = string:tokens(AuthMethods, ","),
kb_tries_left = 3,
peer = {undefined, PeerAddr},
@@ -983,6 +982,10 @@ handle_event(_, #ssh_msg_userauth_info_request{}, {userauth_keyboard_interactive
%%% ######## {connected, client|server} ####
+%% Skip ext_info messages in connected state (for example from OpenSSH >= 7.7)
+handle_event(_, #ssh_msg_ext_info{}, {connected,_Role}, D) ->
+ {keep_state, D};
+
handle_event(_, {#ssh_msg_kexinit{},_}, {connected,Role}, D0) ->
{KeyInitMsg, SshPacket, Ssh} = ssh_transport:key_exchange_init_msg(D0#data.ssh_params),
D = D0#data{ssh_params = Ssh,
@@ -1682,18 +1685,19 @@ peer_role(client) -> server;
peer_role(server) -> client.
%%--------------------------------------------------------------------
-available_hkey_algorithms(Role, Options) ->
- KeyCb = ?GET_OPT(key_cb, Options),
- case [A || A <- available_hkey_algos(Options),
- (Role==client) orelse available_host_key(KeyCb, A, Options)
- ] of
-
- [] when Role==client ->
- error({shutdown, "No public key algs"});
-
- [] when Role==server ->
- error({shutdown, "No host key available"});
+available_hkey_algorithms(client, Options) ->
+ case available_hkey_algos(Options) of
+ [] ->
+ error({shutdown, "No public key algs"});
+ Algs ->
+ [atom_to_list(A) || A<-Algs]
+ end;
+available_hkey_algorithms(server, Options) ->
+ case [A || A <- available_hkey_algos(Options),
+ is_usable_host_key(A, Options)] of
+ [] ->
+ error({shutdown, "No host key available"});
Algs ->
[atom_to_list(A) || A<-Algs]
end.
@@ -1709,18 +1713,6 @@ available_hkey_algos(Options) ->
AvailableAndSupported.
-%% Alg :: atom()
-available_host_key({KeyCb,KeyCbOpts}, Alg, Opts) ->
- UserOpts = ?GET_OPT(user_options, Opts),
- case KeyCb:host_key(Alg, [{key_cb_private,KeyCbOpts}|UserOpts]) of
- {ok,Key} ->
- %% Check the key - the KeyCb may be a buggy plugin
- ssh_transport:valid_key_sha_alg(Key, Alg);
- _ ->
- false
- end.
-
-
send_msg(Msg, State=#data{ssh_params=Ssh0}) when is_tuple(Msg) ->
{Bytes, Ssh} = ssh_transport:ssh_packet(Msg, Ssh0),
send_bytes(Bytes, State),
@@ -1840,10 +1832,21 @@ ext_info(_, D0) ->
D0.
%%%----------------------------------------------------------------
-is_usable_user_pubkey(A, Ssh) ->
- case ssh_auth:get_public_key(A, Ssh) of
+is_usable_user_pubkey(Alg, Ssh) ->
+ try ssh_auth:get_public_key(Alg, Ssh) of
{ok,_} -> true;
_ -> false
+ catch
+ _:_ -> false
+ end.
+
+%%%----------------------------------------------------------------
+is_usable_host_key(Alg, Opts) ->
+ try ssh_transport:get_host_key(Alg, Opts)
+ of
+ _PrivHostKey -> true
+ catch
+ _:_ -> false
end.
%%%----------------------------------------------------------------
diff --git a/lib/ssh/src/ssh_file.erl b/lib/ssh/src/ssh_file.erl
index 832952ed52..510269bbb1 100644
--- a/lib/ssh/src/ssh_file.erl
+++ b/lib/ssh/src/ssh_file.erl
@@ -39,6 +39,24 @@
is_auth_key/3]).
+-export_type([system_dir_daemon_option/0,
+ user_dir_common_option/0,
+ user_dir_fun_common_option/0,
+ pubkey_passphrase_client_options/0
+ ]).
+
+-type system_dir_daemon_option() :: {system_dir, string()}.
+-type user_dir_common_option() :: {user_dir, string()}.
+-type user_dir_fun_common_option() :: {user_dir_fun, user2dir()}.
+-type user2dir() :: fun((RemoteUserName::string()) -> UserDir :: string()) .
+
+-type pubkey_passphrase_client_options() :: {dsa_pass_phrase, string()}
+ | {rsa_pass_phrase, string()}
+%% Not yet implemented: | {ed25519_pass_phrase, string()}
+%% Not yet implemented: | {ed448_pass_phrase, string()}
+ | {ecdsa_pass_phrase, string()} .
+
+
-define(PERM_700, 8#700).
-define(PERM_644, 8#644).
@@ -103,6 +121,8 @@ file_base_name('ssh-dss' ) -> "ssh_host_dsa_key";
file_base_name('ecdsa-sha2-nistp256') -> "ssh_host_ecdsa_key";
file_base_name('ecdsa-sha2-nistp384') -> "ssh_host_ecdsa_key";
file_base_name('ecdsa-sha2-nistp521') -> "ssh_host_ecdsa_key";
+file_base_name('ssh-ed25519' ) -> "ssh_host_ed25519_key";
+file_base_name('ssh-ed448' ) -> "ssh_host_ed448_key";
file_base_name(_ ) -> "ssh_host_key".
decode(File, Password) ->
@@ -240,6 +260,8 @@ identity_key_filename('ssh-rsa' ) -> "id_rsa";
identity_key_filename('rsa-sha2-256' ) -> "id_rsa";
identity_key_filename('rsa-sha2-384' ) -> "id_rsa";
identity_key_filename('rsa-sha2-512' ) -> "id_rsa";
+identity_key_filename('ssh-ed25519' ) -> "id_ed25519";
+identity_key_filename('ssh-ed448' ) -> "id_ed448";
identity_key_filename('ecdsa-sha2-nistp256') -> "id_ecdsa";
identity_key_filename('ecdsa-sha2-nistp384') -> "id_ecdsa";
identity_key_filename('ecdsa-sha2-nistp521') -> "id_ecdsa".
@@ -249,9 +271,12 @@ identity_pass_phrase("ssh-rsa" ) -> rsa_pass_phrase;
identity_pass_phrase("rsa-sha2-256" ) -> rsa_pass_phrase;
identity_pass_phrase("rsa-sha2-384" ) -> rsa_pass_phrase;
identity_pass_phrase("rsa-sha2-512" ) -> rsa_pass_phrase;
+%% Not yet implemented: identity_pass_phrase("ssh-ed25519" ) -> ed25519_pass_phrase;
+%% Not yet implemented: identity_pass_phrase("ssh-ed448" ) -> ed448_pass_phrase;
identity_pass_phrase("ecdsa-sha2-"++_) -> ecdsa_pass_phrase;
identity_pass_phrase(P) when is_atom(P) ->
- identity_pass_phrase(atom_to_list(P)).
+ identity_pass_phrase(atom_to_list(P));
+identity_pass_phrase(_) -> undefined.
lookup_host_key_fd(Fd, KeyToMatch, Host, KeyType) ->
case io:get_line(Fd, '') of
@@ -301,6 +326,10 @@ key_match({#'ECPoint'{},{namedCurve,Curve}}, Alg) ->
_ ->
false
end;
+key_match({ed_pub,ed25519,_}, 'ssh-ed25519') ->
+ true;
+key_match({ed_pub,ed448,_}, 'ssh-ed448') ->
+ true;
key_match(_, _) ->
false.
diff --git a/lib/ssh/src/ssh_message.erl b/lib/ssh/src/ssh_message.erl
index da4027a763..d95e58c1bb 100644
--- a/lib/ssh/src/ssh_message.erl
+++ b/lib/ssh/src/ssh_message.erl
@@ -611,7 +611,13 @@ encode_signature({_, #'Dss-Parms'{}}, _SigAlg, Signature) ->
<<?Ebinary(<<"ssh-dss">>), ?Ebinary(Signature)>>;
encode_signature({#'ECPoint'{}, {namedCurve,OID}}, _SigAlg, Signature) ->
CurveName = public_key:oid2ssh_curvename(OID),
- <<?Ebinary(<<"ecdsa-sha2-",CurveName/binary>>), ?Ebinary(Signature)>>.
+ <<?Ebinary(<<"ecdsa-sha2-",CurveName/binary>>), ?Ebinary(Signature)>>;
+encode_signature({ed_pub, ed25519,_}, _SigAlg, Signature) ->
+ <<?Ebinary(<<"ssh-ed25519">>), ?Ebinary(Signature)>>;
+encode_signature({ed_pub, ed448,_}, _SigAlg, Signature) ->
+ <<?Ebinary(<<"ssh-ed448">>), ?Ebinary(Signature)>>.
+
+
%%%################################################################
%%%#
diff --git a/lib/ssh/src/ssh_options.erl b/lib/ssh/src/ssh_options.erl
index bc9f2156bc..1010c9be55 100644
--- a/lib/ssh/src/ssh_options.erl
+++ b/lib/ssh/src/ssh_options.erl
@@ -434,6 +434,18 @@ default(client) ->
class => user_options
},
+%%% Not yet implemented {ed25519_pass_phrase, def} =>
+%%% Not yet implemented #{default => undefined,
+%%% Not yet implemented chk => fun check_string/1,
+%%% Not yet implemented class => user_options
+%%% Not yet implemented },
+%%% Not yet implemented
+%%% Not yet implemented {ed448_pass_phrase, def} =>
+%%% Not yet implemented #{default => undefined,
+%%% Not yet implemented chk => fun check_string/1,
+%%% Not yet implemented class => user_options
+%%% Not yet implemented },
+%%% Not yet implemented
{silently_accept_hosts, def} =>
#{default => false,
chk => fun check_silently_accept_hosts/1,
@@ -452,12 +464,6 @@ default(client) ->
class => user_options
},
- {pref_public_key_algs, def} =>
- #{default => ssh_transport:default_algorithms(public_key),
- chk => fun check_pref_public_key_algs/1,
- class => user_options
- },
-
{dh_gex_limits, def} =>
#{default => {1024, 6144, 8192}, % FIXME: Is this true nowadays?
chk => fun({Min,I,Max}) ->
@@ -523,6 +529,12 @@ default(common) ->
class => user_options
},
+ {pref_public_key_algs, def} =>
+ #{default => ssh_transport:default_algorithms(public_key),
+ chk => fun check_pref_public_key_algs/1,
+ class => user_options
+ },
+
{preferred_algorithms, def} =>
#{default => ssh:default_algorithms(),
chk => fun check_preferred_algorithms/1,
diff --git a/lib/ssh/src/ssh_transport.erl b/lib/ssh/src/ssh_transport.erl
index c5b0704925..9ff20454cd 100644
--- a/lib/ssh/src/ssh_transport.erl
+++ b/lib/ssh/src/ssh_transport.erl
@@ -51,7 +51,9 @@
extract_public_key/1,
ssh_packet/2, pack/2,
valid_key_sha_alg/2,
- sha/1, sign/3, verify/5]).
+ sha/1, sign/3, verify/5,
+ get_host_key/2,
+ call_KeyCb/3]).
-export([dbg_trace/3]).
@@ -147,6 +149,8 @@ supported_algorithms(public_key) ->
{'ecdsa-sha2-nistp384', [{public_keys,ecdsa}, {hashs,sha384}, {curves,secp384r1}]},
{'ecdsa-sha2-nistp521', [{public_keys,ecdsa}, {hashs,sha512}, {curves,secp521r1}]},
{'ecdsa-sha2-nistp256', [{public_keys,ecdsa}, {hashs,sha256}, {curves,secp256r1}]},
+ {'ssh-ed25519', [{public_keys,eddsa}, {curves,ed25519} ]},
+ {'ssh-ed448', [{public_keys,eddsa}, {curves,ed448} ]},
{'ssh-rsa', [{public_keys,rsa}, {hashs,sha} ]},
{'rsa-sha2-256', [{public_keys,rsa}, {hashs,sha256} ]},
{'rsa-sha2-512', [{public_keys,rsa}, {hashs,sha512} ]},
@@ -431,7 +435,8 @@ key_exchange_first_msg(Kex, Ssh0) when Kex == 'ecdh-sha2-nistp256' ;
%%%
handle_kexdh_init(#ssh_msg_kexdh_init{e = E},
Ssh0 = #ssh{algorithms = #alg{kex=Kex,
- hkey=SignAlg} = Algs}) ->
+ hkey=SignAlg} = Algs,
+ opts = Opts}) ->
%% server
{G, P} = dh_group(Kex),
if
@@ -439,7 +444,7 @@ handle_kexdh_init(#ssh_msg_kexdh_init{e = E},
Sz = dh_bits(Algs),
{Public, Private} = generate_key(dh, [P,G,2*Sz]),
K = compute_key(dh, E, Private, [P,G]),
- MyPrivHostKey = get_host_key(Ssh0, SignAlg),
+ MyPrivHostKey = get_host_key(SignAlg, Opts),
MyPubHostKey = extract_public_key(MyPrivHostKey),
H = kex_hash(Ssh0, MyPubHostKey, sha(Kex), {E,Public,K}),
H_SIG = sign(H, sha(SignAlg), MyPrivHostKey),
@@ -578,14 +583,15 @@ handle_kex_dh_gex_init(#ssh_msg_kex_dh_gex_init{e = E},
#ssh{keyex_key = {{Private, Public}, {G, P}},
keyex_info = {Min, Max, NBits},
algorithms = #alg{kex=Kex,
- hkey=SignAlg}} = Ssh0) ->
+ hkey=SignAlg},
+ opts = Opts} = Ssh0) ->
%% server
if
1=<E, E=<(P-1) ->
K = compute_key(dh, E, Private, [P,G]),
if
1<K, K<(P-1) ->
- MyPrivHostKey = get_host_key(Ssh0, SignAlg),
+ MyPrivHostKey = get_host_key(SignAlg, Opts),
MyPubHostKey = extract_public_key(MyPrivHostKey),
H = kex_hash(Ssh0, MyPubHostKey, sha(Kex), {Min,NBits,Max,P,G,E,Public,K}),
H_SIG = sign(H, sha(SignAlg), MyPrivHostKey),
@@ -653,7 +659,8 @@ handle_kex_dh_gex_reply(#ssh_msg_kex_dh_gex_reply{public_host_key = PeerPubHostK
%%%
handle_kex_ecdh_init(#ssh_msg_kex_ecdh_init{q_c = PeerPublic},
Ssh0 = #ssh{algorithms = #alg{kex=Kex,
- hkey=SignAlg}}) ->
+ hkey=SignAlg},
+ opts = Opts}) ->
%% at server
Curve = ecdh_curve(Kex),
{MyPublic, MyPrivate} = generate_key(ecdh, Curve),
@@ -661,7 +668,7 @@ handle_kex_ecdh_init(#ssh_msg_kex_ecdh_init{q_c = PeerPublic},
compute_key(ecdh, PeerPublic, MyPrivate, Curve)
of
K ->
- MyPrivHostKey = get_host_key(Ssh0, SignAlg),
+ MyPrivHostKey = get_host_key(SignAlg, Opts),
MyPubHostKey = extract_public_key(MyPrivHostKey),
H = kex_hash(Ssh0, MyPubHostKey, sha(Curve), {PeerPublic, MyPublic, K}),
H_SIG = sign(H, sha(SignAlg), MyPrivHostKey),
@@ -759,8 +766,7 @@ ext_info_message(#ssh{role=server,
send_ext_info=true,
opts = Opts} = Ssh0) ->
AlgsList = lists:map(fun erlang:atom_to_list/1,
- proplists:get_value(public_key,
- ?GET_OPT(preferred_algorithms, Opts))),
+ ?GET_OPT(pref_public_key_algs, Opts)),
Msg = #ssh_msg_ext_info{nr_extensions = 1,
data = [{"server-sig-algs", string:join(AlgsList,",")}]
},
@@ -778,10 +784,8 @@ sid(#ssh{session_id = Id}, _) -> Id.
%%
%% The host key should be read from storage
%%
-get_host_key(SSH, SignAlg) ->
- #ssh{key_cb = {KeyCb,KeyCbOpts}, opts = Opts} = SSH,
- UserOpts = ?GET_OPT(user_options, Opts),
- case KeyCb:host_key(SignAlg, [{key_cb_private,KeyCbOpts}|UserOpts]) of
+get_host_key(SignAlg, Opts) ->
+ case call_KeyCb(host_key, [SignAlg], Opts) of
{ok, PrivHostKey} ->
%% Check the key - the KeyCb may be a buggy plugin
case valid_key_sha_alg(PrivHostKey, SignAlg) of
@@ -792,6 +796,11 @@ get_host_key(SSH, SignAlg) ->
exit({error, {Result, unsupported_key_type}})
end.
+call_KeyCb(F, Args, Opts) ->
+ {KeyCb,KeyCbOpts} = ?GET_OPT(key_cb, Opts),
+ UserOpts = ?GET_OPT(user_options, Opts),
+ apply(KeyCb, F, Args ++ [[{key_cb_private,KeyCbOpts}|UserOpts]]).
+
extract_public_key(#'RSAPrivateKey'{modulus = N, publicExponent = E}) ->
#'RSAPublicKey'{modulus = N, publicExponent = E};
extract_public_key(#'DSAPrivateKey'{y = Y, p = P, q = Q, g = G}) ->
@@ -799,6 +808,8 @@ extract_public_key(#'DSAPrivateKey'{y = Y, p = P, q = Q, g = G}) ->
extract_public_key(#'ECPrivateKey'{parameters = {namedCurve,OID},
publicKey = Q}) ->
{#'ECPoint'{point=Q}, {namedCurve,OID}};
+extract_public_key({ed_pri, Alg, Pub, _Priv}) ->
+ {ed_pub, Alg, Pub};
extract_public_key(#{engine:=_, key_id:=_, algorithm:=Alg} = M) ->
case {Alg, crypto:privkey_to_pubkey(Alg, M)} of
{rsa, [E,N]} ->
@@ -858,29 +869,30 @@ accepted_host(Ssh, PeerName, Public, Opts) ->
end.
-yes_no(Ssh, Prompt) ->
- (Ssh#ssh.io_cb):yes_no(Prompt, Ssh#ssh.opts).
+yes_no(#ssh{opts=Opts}, Prompt) ->
+ IoCb = ?GET_INTERNAL_OPT(io_cb, Opts, ssh_io),
+ IoCb:yes_no(Prompt, Opts).
fmt_hostkey('ssh-rsa') -> "RSA";
fmt_hostkey('ssh-dss') -> "DSA";
+fmt_hostkey('ssh-ed25519') -> "ED25519";
+fmt_hostkey('ssh-ed448') -> "ED448";
fmt_hostkey(A) when is_atom(A) -> fmt_hostkey(atom_to_list(A));
fmt_hostkey("ecdsa"++_) -> "ECDSA";
fmt_hostkey(X) -> X.
-known_host_key(#ssh{opts = Opts, key_cb = {KeyCb,KeyCbOpts}, peer = {PeerName,_}} = Ssh,
+known_host_key(#ssh{opts = Opts, peer = {PeerName,_}} = Ssh,
Public, Alg) ->
- UserOpts = ?GET_OPT(user_options, Opts),
- case is_host_key(KeyCb, Public, PeerName, Alg, [{key_cb_private,KeyCbOpts}|UserOpts]) of
- {_,true} ->
+ case call_KeyCb(is_host_key, [Public, PeerName, Alg], Opts) of
+ true ->
ok;
- {_,false} ->
+ false ->
DoAdd = ?GET_OPT(save_accepted_host, Opts),
case accepted_host(Ssh, PeerName, Public, Opts) of
true when DoAdd == true ->
- {_,R} = add_host_key(KeyCb, PeerName, Public, [{key_cb_private,KeyCbOpts}|UserOpts]),
- R;
+ call_KeyCb(add_host_key, [PeerName, Public], Opts);
true when DoAdd == false ->
ok;
false ->
@@ -890,13 +902,6 @@ known_host_key(#ssh{opts = Opts, key_cb = {KeyCb,KeyCbOpts}, peer = {PeerName,_}
end
end.
-is_host_key(KeyCb, Public, PeerName, Alg, Data) ->
- {KeyCb, KeyCb:is_host_key(Public, PeerName, Alg, Data)}.
-
-add_host_key(KeyCb, PeerName, Public, Data) ->
- {KeyCb, KeyCb:add_host_key(PeerName, Public, Data)}.
-
-
%% Each of the algorithm strings MUST be a comma-separated list of
%% algorithm names (see ''Algorithm Naming'' in [SSH-ARCH]). Each
%% supported (allowed) algorithm MUST be listed in order of preference.
@@ -1937,6 +1942,11 @@ valid_key_sha_alg(#'RSAPrivateKey'{}, 'ssh-rsa' ) -> true;
valid_key_sha_alg({_, #'Dss-Parms'{}}, 'ssh-dss') -> true;
valid_key_sha_alg(#'DSAPrivateKey'{}, 'ssh-dss') -> true;
+valid_key_sha_alg({ed_pub, ed25519,_}, 'ssh-ed25519') -> true;
+valid_key_sha_alg({ed_pri, ed25519,_,_},'ssh-ed25519') -> true;
+valid_key_sha_alg({ed_pub, ed448,_}, 'ssh-ed448') -> true;
+valid_key_sha_alg({ed_pri, ed448,_,_}, 'ssh-ed448') -> true;
+
valid_key_sha_alg({#'ECPoint'{},{namedCurve,OID}}, Alg) -> valid_key_sha_alg_ec(OID, Alg);
valid_key_sha_alg(#'ECPrivateKey'{parameters = {namedCurve,OID}}, Alg) -> valid_key_sha_alg_ec(OID, Alg);
valid_key_sha_alg(_, _) -> false.
@@ -1946,12 +1956,17 @@ valid_key_sha_alg_ec(OID, Alg) ->
Alg == list_to_atom("ecdsa-sha2-" ++ binary_to_list(Curve)).
+-dialyzer({no_match, public_algo/1}).
+
public_algo(#'RSAPublicKey'{}) -> 'ssh-rsa'; % FIXME: Not right with draft-curdle-rsa-sha2
public_algo({_, #'Dss-Parms'{}}) -> 'ssh-dss';
+public_algo({ed_pub, ed25519,_}) -> 'ssh-ed25519';
+public_algo({ed_pub, ed448,_}) -> 'ssh-ed448';
public_algo({#'ECPoint'{},{namedCurve,OID}}) ->
Curve = public_key:oid2ssh_curvename(OID),
list_to_atom("ecdsa-sha2-" ++ binary_to_list(Curve)).
+
sha('ssh-rsa') -> sha;
sha('rsa-sha2-256') -> sha256;
sha('rsa-sha2-384') -> sha384;
@@ -1960,6 +1975,8 @@ sha('ssh-dss') -> sha;
sha('ecdsa-sha2-nistp256') -> sha(secp256r1);
sha('ecdsa-sha2-nistp384') -> sha(secp384r1);
sha('ecdsa-sha2-nistp521') -> sha(secp521r1);
+sha('ssh-ed25519') -> undefined; % Included in the spec of ed25519
+sha('ssh-ed448') -> undefined; % Included in the spec of ed448
sha(secp256r1) -> sha256;
sha(secp384r1) -> sha384;
sha(secp521r1) -> sha512;
@@ -2054,7 +2071,6 @@ ecdh_curve('curve448-sha512' ) -> x448;
ecdh_curve('curve25519-sha256' ) -> x25519;
ecdh_curve('[email protected]' ) -> x25519.
-
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
%% Utils for default_algorithms/1 and supported_algorithms/1
diff --git a/lib/ssh/test/.gitignore b/lib/ssh/test/.gitignore
new file mode 100644
index 0000000000..c9d5f086b3
--- /dev/null
+++ b/lib/ssh/test/.gitignore
@@ -0,0 +1,5 @@
+
+
+property_test/ssh_eqc_client_server_dirs/system
+property_test/ssh_eqc_client_server_dirs/user
+
diff --git a/lib/ssh/test/property_test/ssh_eqc_client_info_timing.erl b/lib/ssh/test/property_test/ssh_eqc_client_info_timing.erl
index 6d0d8f5d99..f4b521356f 100644
--- a/lib/ssh/test/property_test/ssh_eqc_client_info_timing.erl
+++ b/lib/ssh/test/property_test/ssh_eqc_client_info_timing.erl
@@ -58,6 +58,7 @@
%%% Properties:
prop_seq(Config) ->
+ error_logger:tty(false),
{ok,Pid} = ssh_eqc_event_handler:add_report_handler(),
{_, _, Port} = init_daemon(Config),
numtests(1000,
@@ -66,16 +67,25 @@ prop_seq(Config) ->
send_bad_sequence(Port, Delay, Pid),
not any_relevant_error_report(Pid)
catch
- C:E -> io:format('~p:~p~n',[C,E]),
+ C:E:S -> ct:log("~p:~p~n~p",[C,E,S]),
false
end
)).
send_bad_sequence(Port, Delay, Pid) ->
- {ok,S} = gen_tcp:connect("localhost",Port,[]),
- gen_tcp:send(S,"Illegal info-string\r\n"),
- ssh_test_lib:sleep_microsec(Delay),
- gen_tcp:close(S).
+ send_bad_sequence(Port, Delay, Pid, 10).
+
+send_bad_sequence(Port, Delay, Pid, N) ->
+ case gen_tcp:connect("localhost",Port,[]) of
+ {ok,S} ->
+ gen_tcp:send(S,"Illegal info-string\r\n"),
+ ssh_test_lib:sleep_microsec(Delay),
+ gen_tcp:close(S);
+
+ {error,econnreset} when N>0 ->
+ timer:sleep(1),
+ send_bad_sequence(Port, Delay, Pid, N-1)
+ end.
any_relevant_error_report(Pid) ->
{ok, Reports} = ssh_eqc_event_handler:get_reports(Pid),
diff --git a/lib/ssh/test/property_test/ssh_eqc_client_server.erl b/lib/ssh/test/property_test/ssh_eqc_client_server.erl
index 39d0b4e410..acb0faa0c7 100644
--- a/lib/ssh/test/property_test/ssh_eqc_client_server.erl
+++ b/lib/ssh/test/property_test/ssh_eqc_client_server.erl
@@ -22,25 +22,27 @@
-module(ssh_eqc_client_server).
-compile(export_all).
+
+-proptest([proper]).
--include_lib("common_test/include/ct.hrl").
-
--ifdef(PROPER).
-%% Proper is not supported.
--else.
--ifdef(TRIQ).
-%% Proper is not supported.
+-ifndef(PROPER).
-else.
+%% Only use proper
+%%
+%% Previously only EQC was supported, but the changes to support PROPER is not
+%% just a wrapper. Since we do not have access to eqc we can't test the changes
+%% so therefore eqc is disabeled.
+%% However, with access to eqc it ought to be quite easy to re-enable eqc by
+%% studying the diff.
+-include_lib("proper/include/proper.hrl").
+-define(MOD_eqc,proper).
+
+-include_lib("common_test/include/ct.hrl").
%% Limit the testing time on CI server... this needs to be improved in % from total budget.
-define(TESTINGTIME(Prop), eqc:testing_time(30,Prop)).
-
--include_lib("eqc/include/eqc.hrl").
--include_lib("eqc/include/eqc_statem.hrl").
--eqc_group_commands(true).
-
-define(SSH_DIR,"ssh_eqc_client_server_dirs").
-define(sec, *1000).
@@ -51,10 +53,6 @@
port
}).
--record(conn,{ref,
- srvr_ref
- }).
-
-record(chan, {ref,
conn_ref,
subsystem,
@@ -65,7 +63,7 @@
initialized = false,
servers = [], % [#srvr{}]
clients = [],
- connections = [], % [#conn{}]
+ connections = [],
channels = [], % [#chan{}]
data_dir
}).
@@ -80,9 +78,8 @@
-define(SUBSYSTEMS, ["echo1", "echo2", "echo3", "echo4"]).
--define(SERVER_ADDRESS, { {127,1,0,choose(1,254)}, % IP
- choose(1024,65535) % Port
- }).
+-define(SERVER_ADDRESS, {127,0,0,1}). % Server listening IP. Darwin, Solaris & FreeBSD
+ % dislikes all other in 127.0.0.0/24
-define(SERVER_EXTRA_OPTIONS, [{parallel_login,bool()}] ).
@@ -104,10 +101,12 @@
%% To be called as eqc:quickcheck( ssh_eqc_client_server:prop_seq() ).
prop_seq() ->
- ?TESTINGTIME(do_prop_seq(?SSH_DIR)).
+ error_logger:tty(false),
+ ?TESTINGTIME(do_prop_seq(?SSH_DIR)).
%% To be called from a common_test test suite
prop_seq(CT_Config) ->
+ error_logger:tty(false),
do_prop_seq(full_path(?SSH_DIR, CT_Config)).
@@ -124,10 +123,12 @@ full_path(SSHdir, CT_Config) ->
SSHdir).
%%%----
prop_parallel() ->
+ error_logger:tty(false),
?TESTINGTIME(do_prop_parallel(?SSH_DIR)).
%% To be called from a common_test test suite
prop_parallel(CT_Config) ->
+ error_logger:tty(false),
do_prop_parallel(full_path(?SSH_DIR, CT_Config)).
do_prop_parallel(DataDir) ->
@@ -139,22 +140,22 @@ do_prop_parallel(DataDir) ->
end).
%%%----
-prop_parallel_multi() ->
- ?TESTINGTIME(do_prop_parallel_multi(?SSH_DIR)).
-
-%% To be called from a common_test test suite
-prop_parallel_multi(CT_Config) ->
- do_prop_parallel_multi(full_path(?SSH_DIR, CT_Config)).
-
-do_prop_parallel_multi(DataDir) ->
- setup_rsa(DataDir),
- ?FORALL(Repetitions,?SHRINK(1,[10]),
- ?FORALL(Cmds,parallel_commands(?MODULE),
- ?ALWAYS(Repetitions,
- begin
- {H,Sf,Result} = run_parallel_commands(?MODULE,Cmds,[{data_dir,DataDir}]),
- present_result(?MODULE, Cmds, {H,Sf,Result}, Result==ok)
- end))).
+%% prop_parallel_multi() ->
+%% ?TESTINGTIME(do_prop_parallel_multi(?SSH_DIR)).
+
+%% %% To be called from a common_test test suite
+%% prop_parallel_multi(CT_Config) ->
+%% do_prop_parallel_multi(full_path(?SSH_DIR, CT_Config)).
+
+%% do_prop_parallel_multi(DataDir) ->
+%% setup_rsa(DataDir),
+%% ?FORALL(Repetitions,?SHRINK(1,[10]),
+%% ?FORALL(Cmds,parallel_commands(?MODULE),
+%% ?ALWAYS(Repetitions,
+%% begin
+%% {H,Sf,Result} = run_parallel_commands(?MODULE,Cmds,[{data_dir,DataDir}]),
+%% present_result(?MODULE, Cmds, {H,Sf,Result}, Result==ok)
+%% end))).
%%%================================================================
%%% State machine spec
@@ -169,13 +170,50 @@ initial_state(DataDir) ->
ssh:start().
%%%----------------
-weight(S, ssh_send) -> 5*length([C || C<-S#state.channels, has_subsyst(C)]);
-weight(S, ssh_start_subsyst) -> 3*length([C || C<-S#state.channels, no_subsyst(C)]);
+weight(S, ssh_send) -> 20*length([C || C<-S#state.channels, has_subsyst(C)]);
+weight(S, ssh_start_subsyst) -> 10*length([C || C<-S#state.channels, no_subsyst(C)]);
weight(S, ssh_close_channel) -> 2*length([C || C<-S#state.channels, has_subsyst(C)]);
-weight(S, ssh_open_channel) -> length(S#state.connections);
+weight(S, ssh_open_channel) -> 2*length(S#state.connections);
weight(_S, _) -> 1.
%%%----------------
+fns() -> [initial_state,
+ ssh_server,
+ ssh_client,
+ ssh_open_connection,
+ ssh_close_connection,
+ ssh_open_channel,
+ ssh_close_channel,
+ ssh_start_subsyst,
+ ssh_send
+ ].
+
+call_f(Name, Sfx) ->
+ case get({Name,Sfx}) of
+ undefined -> F = list_to_atom(lists:concat([Name,"_",Sfx])),
+ put({Name,Sfx}, F),
+ F;
+ F when is_atom(F) -> F
+ end.
+
+-define(call(Name, What, Args), apply(?MODULE, call_f(Name,What), Args)).
+
+symbolic_call(S,Name) -> {call, ?MODULE, Name, ?call(Name,args,[S])}.
+
+may_generate(S, F) -> ?call(F,pre,[S]).
+
+command(S) ->
+ frequency([{weight(S,F), symbolic_call(S,F)} || F <- fns(),
+ may_generate(S, F)]
+ ).
+
+precondition(S, {call,_M,F,As}) -> try ?call(F, pre, [S,As])
+ catch _:undef -> try ?call(F,pre,[S]) catch _:undef -> true end
+ end.
+next_state(S, Res, {call,_M,F,As}) -> try ?call(F, next, [S,Res,As]) catch _:undef -> S end.
+postcondition(S, {call,_M,F,As}, Res) -> try ?call(F, post, [S,As,Res]) catch _:undef -> true end.
+
+%%%----------------
%%% Initialize
initial_state_pre(S) -> not S#state.initialized.
@@ -200,24 +238,34 @@ ssh_server_pre(S) -> S#state.initialized andalso
ssh_server_args(_) -> [?SERVER_ADDRESS, {var,data_dir}, ?SERVER_EXTRA_OPTIONS].
-ssh_server({IP,Port}, DataDir, ExtraOptions) ->
- ok(ssh:daemon(IP, Port,
- [
- {system_dir, system_dir(DataDir)},
- {user_dir, user_dir(DataDir)},
- {subsystems, [{SS, {ssh_eqc_subsys, [SS]}} || SS <- ?SUBSYSTEMS]}
- | ExtraOptions
- ])).
-
-ssh_server_post(_S, _Args, {error,eaddrinuse}) -> true;
-ssh_server_post(_S, _Args, Result) -> is_ok(Result).
-
-ssh_server_next(S, {error,eaddrinuse}, _) -> S;
-ssh_server_next(S, Result, [{IP,Port},_,_]) ->
- S#state{servers=[#srvr{ref = Result,
- address = IP,
- port = Port}
- | S#state.servers]}.
+ssh_server(IP0, DataDir, ExtraOptions) ->
+ case ssh:daemon(IP0, 0,
+ [
+ {system_dir, system_dir(DataDir)},
+ {user_dir, user_dir(DataDir)},
+ {subsystems, [{SS, {ssh_eqc_subsys, [SS]}} || SS <- ?SUBSYSTEMS]}
+ | ExtraOptions
+ ]) of
+ {ok,DaemonRef} ->
+ case ssh:daemon_info(DaemonRef) of
+ {ok, Props} ->
+ Port = proplists:get_value(port,Props),
+ IP = proplists:get_value(ip,Props),
+ #srvr{ref = DaemonRef,
+ address = IP,
+ port = Port};
+ Other ->
+ Other
+ end;
+ Other ->
+ Other
+ end.
+
+ssh_server_post(_S, _Args, #srvr{port=Port}) -> (0 < Port) andalso (Port < 65536);
+ssh_server_post(_S, _Args, _) -> false.
+
+ssh_server_next(S, Srvr, _) ->
+ S#state{servers=[Srvr | S#state.servers]}.
%%%----------------
%%% Start a new client
@@ -271,8 +319,7 @@ ssh_open_connection(#srvr{address=Ip, port=Port}, DataDir) ->
ssh_open_connection_post(_S, _Args, Result) -> is_ok(Result).
-ssh_open_connection_next(S, ConnRef, [#srvr{ref=SrvrRef},_]) ->
- S#state{connections=[#conn{ref=ConnRef, srvr_ref=SrvrRef}|S#state.connections]}.
+ssh_open_connection_next(S, ConnRef, [_,_]) -> S#state{connections=[ConnRef|S#state.connections]}.
%%%----------------
%%% Stop a new connection
@@ -282,12 +329,12 @@ ssh_close_connection_pre(S) -> S#state.connections /= [].
ssh_close_connection_args(S) -> [oneof(S#state.connections)].
-ssh_close_connection(#conn{ref=ConnectionRef}) -> ssh:close(ConnectionRef).
+ssh_close_connection(ConnectionRef) -> ssh:close(ConnectionRef).
-ssh_close_connection_next(S, _, [Conn=#conn{ref=ConnRef}]) ->
- S#state{connections = S#state.connections--[Conn],
- channels = [C || C <- S#state.channels,
- C#chan.conn_ref /= ConnRef]
+ssh_close_connection_next(S, _, [ConnRef]) ->
+ S#state{connections = S#state.connections--[ConnRef],
+ channels = [C || C <- S#state.channels,
+ C#chan.conn_ref /= ConnRef]
}.
%%%----------------
@@ -299,14 +346,14 @@ ssh_open_channel_pre(S) -> S#state.connections /= [].
ssh_open_channel_args(S) -> [oneof(S#state.connections)].
%%% For re-arrangement in parallel tests.
-ssh_open_channel_pre(S,[C]) -> lists:member(C,S#state.connections).
+ssh_open_channel_pre(S,[C]) when is_record(S,state) -> lists:member(C,S#state.connections).
-ssh_open_channel(#conn{ref=ConnectionRef}) ->
+ssh_open_channel(ConnectionRef) ->
ok(ssh_connection:session_channel(ConnectionRef, 20?sec)).
ssh_open_channel_post(_S, _Args, Result) -> is_ok(Result).
-ssh_open_channel_next(S, ChannelRef, [#conn{ref=ConnRef}]) ->
+ssh_open_channel_next(S, ChannelRef, [ConnRef]) ->
S#state{channels=[#chan{ref=ChannelRef,
conn_ref=ConnRef}
| S#state.channels]}.
@@ -326,9 +373,7 @@ ssh_close_channel_next(S, _, [C]) ->
S#state{channels = [Ci || Ci <- S#state.channels,
sig(C) /= sig(Ci)]}.
-
sig(C) -> {C#chan.ref, C#chan.conn_ref}.
-
%%%----------------
%%% Start a sub system on a channel
@@ -361,9 +406,10 @@ ssh_start_subsyst_next(S, _Result, [C,SS,Pid|_]) ->
ssh_send_pre(S) -> lists:any(fun has_subsyst/1, S#state.channels).
-ssh_send_args(S) -> [oneof(lists:filter(fun has_subsyst/1, S#state.channels)),
- choose(0,1),
- message()].
+ssh_send_args(S) ->
+ [oneof(lists:filter(fun has_subsyst/1, S#state.channels)),
+ choose(0,1),
+ message()].
%% For re-arrangement in parallel tests.
ssh_send_pre(S, [C|_]) -> lists:member(C, S#state.channels).
@@ -388,17 +434,17 @@ ssh_send(C=#chan{conn_ref=ConnectionRef, ref=ChannelRef, client_pid=Pid}, Type,
end).
ssh_send_blocking(_S, _Args) ->
- true.
+ true.
ssh_send_post(_S, [C,_,Msg], Response) when is_binary(Response) ->
- Expected = ssh_eqc_subsys:response(modify_msg(C,Msg), C#chan.subsystem),
+ Expected = ssh_eqc_subsys:response(modify_msg(C,Msg), C#chan.subsystem),
case Response of
Expected -> true;
_ -> {send_failed, size(Response), size(Expected)}
end;
ssh_send_post(_S, _Args, Response) ->
- {error,Response}.
+ {error,Response}.
modify_msg(_, <<>>) -> <<>>;
@@ -440,7 +486,11 @@ present_result(_Module, Cmds, _Triple, true) ->
true)))));
present_result(Module, Cmds, Triple, false) ->
- pretty_commands(Module, Cmds, Triple, [{show_states,true}], false).
+ pretty_comands(Module, Cmds, Triple, [{show_states,true}], false),
+ false. % Proper dislikes non-boolean results while eqc treats non-true as false.
+
+pretty_comands(Module, Cmds, Triple, Opts, Bool) ->
+ ct:log("Module = ~p,~n Cmds = ~p,~n Triple = ~p,~n Opts = ~p,~n Bool = ~p",[Module, Cmds, Triple, Opts, Bool]).
@@ -476,23 +526,35 @@ traverse_commands(Fseq, Fpar, {Seq, ParLs}) -> lists:append([Fseq(Seq)|Fpar(ParL
print_frequencies() -> print_frequencies(10).
print_frequencies(Ngroups) -> fun([]) -> io:format('Empty list!~n',[]);
- (L ) -> print_frequencies(L,Ngroups,0,element(1,lists:last(L)))
+ (L ) ->
+ try
+ M = lists:last(L),
+ Max = if is_integer(M) -> M;
+ is_tuple(M) -> element(1,L)
+ end,
+ print_frequencies(L,Ngroups,0,Max)
+ catch
+ C:E:S ->
+ ct:pal("~p:~p ~p:~p~n~p~n~p",[?MODULE,?LINE,C,E,S,L])
+ end
end.
+
print_frequencies(Ngroups, MaxValue) -> fun(L) -> print_frequencies(L,Ngroups,0,MaxValue) end.
print_frequencies(L, N, Min, Max) when N>Max -> print_frequencies(L++[{N,0}], N, Min, N);
-print_frequencies(L, N, Min, Max) ->
-%%io:format('L=~p~n',[L]),
+print_frequencies(L, N, Min, Max0) ->
try
+ Interval = round((Max0-Min)/N),
+ Max = Max0 + (Max0 rem Interval),
IntervalUpperLimits =
lists:reverse(
- [Max | tl(lists:reverse(lists:seq(Min,Max,round((Max-Min)/N))))]
+ [Max | tl(lists:reverse(lists:seq(Min,Max,Interval)))]
),
{Acc0,_} = lists:mapfoldl(fun(Upper,Lower) ->
{{{Lower,Upper},0}, Upper+1}
end, hd(IntervalUpperLimits), tl(IntervalUpperLimits)),
- Fs0 = get_frequencies(L, Acc0),
+ Fs0 = get_frequencies(L, Acc0),
SumVal = lists:sum([V||{_,V}<-Fs0]),
Fs = with_percentage(Fs0, SumVal),
Mean = mean(L),
@@ -517,7 +579,6 @@ print_frequencies(L, N, Min, Max) ->
|| {Interval={Rlow,Rhigh},Val,Percent} <- Fs],
io:format('~*c ~*c~n',[2*Npos_range,32,Npos_value+2,$-]),
io:format('~*c ~*w~n',[2*Npos_range,32,Npos_value,SumVal])
- %%,io:format('L=~p~n',[L])
catch
C:E ->
io:format('*** Faild printing (~p:~p) for~n~p~n',[C,E,L])
@@ -527,6 +588,8 @@ get_frequencies([{I,Num}|T], [{{Lower,Upper},Cnt}|Acc]) when Lower=<I,I=<Upper -
get_frequencies(T, [{{Lower,Upper},Cnt+Num}|Acc]);
get_frequencies(L=[{I,_Num}|_], [Ah={{_Lower,Upper},_Cnt}|Acc]) when I>Upper ->
[Ah | get_frequencies(L,Acc)];
+get_frequencies([I|T], Acc) when is_integer(I) ->
+ get_frequencies([{I,1}|T], Acc);
get_frequencies([], Acc) ->
Acc.
@@ -616,4 +679,3 @@ erase_dir(Dir) ->
file:del_dir(Dir).
-endif.
--endif.
diff --git a/lib/ssh/test/ssh_algorithms_SUITE.erl b/lib/ssh/test/ssh_algorithms_SUITE.erl
index 5e589e585f..02e5f40c38 100644
--- a/lib/ssh/test/ssh_algorithms_SUITE.erl
+++ b/lib/ssh/test/ssh_algorithms_SUITE.erl
@@ -184,12 +184,15 @@ init_per_testcase(TC, {public_key,Alg}, Config) ->
| ExtraOpts],
[{extra_daemon,true}|Config]);
{{ok,_}, {error,Err}} ->
+ ct:log("Alg = ~p~nOpts = ~p",[Alg,Opts]),
{skip, io_lib:format("No host key: ~p",[Err])};
{{error,Err}, {ok,_}} ->
+ ct:log("Alg = ~p~nOpts = ~p",[Alg,Opts]),
{skip, io_lib:format("No user key: ~p",[Err])};
_ ->
+ ct:log("Alg = ~p~nOpts = ~p",[Alg,Opts]),
{skip, "Neither host nor user key"}
end;
@@ -470,7 +473,9 @@ setup_pubkey(Alg, Config) ->
'rsa-sha2-512' -> ssh_test_lib:setup_rsa(DataDir, UserDir);
'ecdsa-sha2-nistp256' -> ssh_test_lib:setup_ecdsa("256", DataDir, UserDir);
'ecdsa-sha2-nistp384' -> ssh_test_lib:setup_ecdsa("384", DataDir, UserDir);
- 'ecdsa-sha2-nistp521' -> ssh_test_lib:setup_ecdsa("521", DataDir, UserDir)
+ 'ecdsa-sha2-nistp521' -> ssh_test_lib:setup_ecdsa("521", DataDir, UserDir);
+ 'ssh-ed25519' -> ssh_test_lib:setup_eddsa(ed25519, DataDir, UserDir);
+ 'ssh-ed448' -> ssh_test_lib:setup_eddsa(ed448, DataDir, UserDir)
end,
Config.
diff --git a/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed25519 b/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed25519
new file mode 100644
index 0000000000..401a3e4a9a
--- /dev/null
+++ b/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed25519
@@ -0,0 +1,7 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+QyNTUxOQAAACDm9P8/gC0IOKmwHLSvkmEtS2Xx0RRqUDqC6wY6UgDVnwAAAJg3+6xpN/us
+aQAAAAtzc2gtZWQyNTUxOQAAACDm9P8/gC0IOKmwHLSvkmEtS2Xx0RRqUDqC6wY6UgDVnw
+AAAEBzC/Z2WGJhZ3l3tIBnUc6DCbp+lXY2yc2RRpWQTdf8sub0/z+ALQg4qbActK+SYS1L
+ZfHRFGpQOoLrBjpSANWfAAAAE3VhYmhuaWxAZWx4YWRsajNxMzIBAg==
+-----END OPENSSH PRIVATE KEY-----
diff --git a/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed25519.pub b/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed25519.pub
new file mode 100644
index 0000000000..a5c03b19c1
--- /dev/null
+++ b/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed25519.pub
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOb0/z+ALQg4qbActK+SYS1LZfHRFGpQOoLrBjpSANWf uabhnil@elxadlj3q32
diff --git a/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed448 b/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed448
new file mode 100644
index 0000000000..8ecfd710dc
--- /dev/null
+++ b/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed448
@@ -0,0 +1,10 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAASgAAAAlz
+c2gtZWQ0NDgAAAA53OqeePNaG/NJmoMbELhskKrAHNhLZ6AQm1WjbpMoseNl/OFh
+1xznExpUPqTLX36fHYsAaWRHABQAAAAA0AAAEREAABERAAAACXNzaC1lZDQ0OAAA
+ADnc6p5481ob80magxsQuGyQqsAc2EtnoBCbVaNukyix42X84WHXHOcTGlQ+pMtf
+fp8diwBpZEcAFAAAAAByzSPST3FCdOdENDI3uTKQ9RH2Ql+Y5kRZ/yA+iYUIP/32
+BQBVOrwOBc0CGEvbicTM1n4YeVEmfrMo3OqeePNaG/NJmoMbELhskKrAHNhLZ6AQ
+m1WjbpMoseNl/OFh1xznExpUPqTLX36fHYsAaWRHABQAAAAAAAECAwQ=
+-----END OPENSSH PRIVATE KEY-----
+
diff --git a/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed448.pub b/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed448.pub
new file mode 100644
index 0000000000..cec0765a5d
--- /dev/null
+++ b/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed448.pub
@@ -0,0 +1 @@
+ssh-ed448 AAAACXNzaC1lZDQ0OAAAADnc6p5481ob80magxsQuGyQqsAc2EtnoBCbVaNukyix42X84WHXHOcTGlQ+pMtffp8diwBpZEcAFAA= uabhnil@elxadlj3q32
diff --git a/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed25519_key b/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed25519_key
new file mode 100644
index 0000000000..13a8fcf491
--- /dev/null
+++ b/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed25519_key
@@ -0,0 +1,7 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+QyNTUxOQAAACBJSOuiYGWaO9lye8Bgafod1kw8P6cV3Xb2qJgCB6yJfQAAAJi+h4O7voeD
+uwAAAAtzc2gtZWQyNTUxOQAAACBJSOuiYGWaO9lye8Bgafod1kw8P6cV3Xb2qJgCB6yJfQ
+AAAEBaOcJfGPNemKc1wPHTCmM4Kwvh6dZ0CqY14UT361UnN0lI66JgZZo72XJ7wGBp+h3W
+TDw/pxXddvaomAIHrIl9AAAAE3VhYmhuaWxAZWx4YWRsajNxMzIBAg==
+-----END OPENSSH PRIVATE KEY-----
diff --git a/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed25519_key.pub b/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed25519_key.pub
new file mode 100644
index 0000000000..156ef4045c
--- /dev/null
+++ b/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed25519_key.pub
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIElI66JgZZo72XJ7wGBp+h3WTDw/pxXddvaomAIHrIl9 uabhnil@elxadlj3q32
diff --git a/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed448_key b/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed448_key
new file mode 100644
index 0000000000..31a7e4e8c3
--- /dev/null
+++ b/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed448_key
@@ -0,0 +1,10 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAASgAAAAlz
+c2gtZWQ0NDgAAAA5X9dEm1m0Yf0s54fsYWrUah2hNCSFpw4fig6nXYDpZ3jt8SR2
+m0bHBhvWeD3x5Q9s0foavq/oJWGAAAAA0AAAEREAABERAAAACXNzaC1lZDQ0OAAA
+ADlf10SbWbRh/Sznh+xhatRqHaE0JIWnDh+KDqddgOlneO3xJHabRscGG9Z4PfHl
+D2zR+hq+r+glYYAAAABybIKlYsuAjRDWMr6JyFE+v2ySnzTd+oyfY8mWDvbjSKNS
+jIo/zC8ETjmj/FuUSS+PAy51SaIAmPlbX9dEm1m0Yf0s54fsYWrUah2hNCSFpw4f
+ig6nXYDpZ3jt8SR2m0bHBhvWeD3x5Q9s0foavq/oJWGAAAAAAAECAwQ=
+-----END OPENSSH PRIVATE KEY-----
+
diff --git a/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed448_key.pub b/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed448_key.pub
new file mode 100644
index 0000000000..8c390dcb58
--- /dev/null
+++ b/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed448_key.pub
@@ -0,0 +1 @@
+ssh-ed448 AAAACXNzaC1lZDQ0OAAAADlf10SbWbRh/Sznh+xhatRqHaE0JIWnDh+KDqddgOlneO3xJHabRscGG9Z4PfHlD2zR+hq+r+glYYA=
diff --git a/lib/ssh/test/ssh_basic_SUITE.erl b/lib/ssh/test/ssh_basic_SUITE.erl
index 778ae1e7b6..da94b5722f 100644
--- a/lib/ssh/test/ssh_basic_SUITE.erl
+++ b/lib/ssh/test/ssh_basic_SUITE.erl
@@ -56,6 +56,8 @@ groups() ->
{group, ecdsa_sha2_nistp256_key},
{group, ecdsa_sha2_nistp384_key},
{group, ecdsa_sha2_nistp521_key},
+ {group, ed25519_key},
+ {group, ed448_key},
{group, dsa_pass_key},
{group, rsa_pass_key},
{group, ecdsa_sha2_nistp256_pass_key},
@@ -94,6 +96,8 @@ groups() ->
{ecdsa_sha2_nistp256_key, [], [{group, basic}]},
{ecdsa_sha2_nistp384_key, [], [{group, basic}]},
{ecdsa_sha2_nistp521_key, [], [{group, basic}]},
+ {ed25519_key, [], [{group, basic}]},
+ {ed448_key, [], [{group, basic}]},
{rsa_host_key_is_actualy_ecdsa, [], [fail_daemon_start]},
{host_user_key_differs, [parallel], [exec_key_differs1,
exec_key_differs2,
@@ -222,6 +226,28 @@ init_per_group(ecdsa_sha2_nistp521_key, Config) ->
false ->
{skip, unsupported_pub_key}
end;
+init_per_group(ed25519_key, Config) ->
+ case lists:member('ssh-ed25519',
+ ssh_transport:default_algorithms(public_key)) of
+ true ->
+ DataDir = proplists:get_value(data_dir, Config),
+ PrivDir = proplists:get_value(priv_dir, Config),
+ ssh_test_lib:setup_eddsa(ed25519, DataDir, PrivDir),
+ Config;
+ false ->
+ {skip, unsupported_pub_key}
+ end;
+init_per_group(ed448_key, Config) ->
+ case lists:member('ssh-ed448',
+ ssh_transport:default_algorithms(public_key)) of
+ true ->
+ DataDir = proplists:get_value(data_dir, Config),
+ PrivDir = proplists:get_value(priv_dir, Config),
+ ssh_test_lib:setup_eddsa(ed448, DataDir, PrivDir),
+ Config;
+ false ->
+ {skip, unsupported_pub_key}
+ end;
init_per_group(rsa_pass_key, Config) ->
case lists:member('ssh-rsa',
ssh_transport:default_algorithms(public_key)) of
diff --git a/lib/ssh/test/ssh_basic_SUITE_data/id_ed25519 b/lib/ssh/test/ssh_basic_SUITE_data/id_ed25519
new file mode 100644
index 0000000000..401a3e4a9a
--- /dev/null
+++ b/lib/ssh/test/ssh_basic_SUITE_data/id_ed25519
@@ -0,0 +1,7 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+QyNTUxOQAAACDm9P8/gC0IOKmwHLSvkmEtS2Xx0RRqUDqC6wY6UgDVnwAAAJg3+6xpN/us
+aQAAAAtzc2gtZWQyNTUxOQAAACDm9P8/gC0IOKmwHLSvkmEtS2Xx0RRqUDqC6wY6UgDVnw
+AAAEBzC/Z2WGJhZ3l3tIBnUc6DCbp+lXY2yc2RRpWQTdf8sub0/z+ALQg4qbActK+SYS1L
+ZfHRFGpQOoLrBjpSANWfAAAAE3VhYmhuaWxAZWx4YWRsajNxMzIBAg==
+-----END OPENSSH PRIVATE KEY-----
diff --git a/lib/ssh/test/ssh_basic_SUITE_data/id_ed25519.pub b/lib/ssh/test/ssh_basic_SUITE_data/id_ed25519.pub
new file mode 100644
index 0000000000..a5c03b19c1
--- /dev/null
+++ b/lib/ssh/test/ssh_basic_SUITE_data/id_ed25519.pub
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOb0/z+ALQg4qbActK+SYS1LZfHRFGpQOoLrBjpSANWf uabhnil@elxadlj3q32
diff --git a/lib/ssh/test/ssh_basic_SUITE_data/id_ed448 b/lib/ssh/test/ssh_basic_SUITE_data/id_ed448
new file mode 100644
index 0000000000..8ecfd710dc
--- /dev/null
+++ b/lib/ssh/test/ssh_basic_SUITE_data/id_ed448
@@ -0,0 +1,10 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAASgAAAAlz
+c2gtZWQ0NDgAAAA53OqeePNaG/NJmoMbELhskKrAHNhLZ6AQm1WjbpMoseNl/OFh
+1xznExpUPqTLX36fHYsAaWRHABQAAAAA0AAAEREAABERAAAACXNzaC1lZDQ0OAAA
+ADnc6p5481ob80magxsQuGyQqsAc2EtnoBCbVaNukyix42X84WHXHOcTGlQ+pMtf
+fp8diwBpZEcAFAAAAAByzSPST3FCdOdENDI3uTKQ9RH2Ql+Y5kRZ/yA+iYUIP/32
+BQBVOrwOBc0CGEvbicTM1n4YeVEmfrMo3OqeePNaG/NJmoMbELhskKrAHNhLZ6AQ
+m1WjbpMoseNl/OFh1xznExpUPqTLX36fHYsAaWRHABQAAAAAAAECAwQ=
+-----END OPENSSH PRIVATE KEY-----
+
diff --git a/lib/ssh/test/ssh_basic_SUITE_data/id_ed448.pub b/lib/ssh/test/ssh_basic_SUITE_data/id_ed448.pub
new file mode 100644
index 0000000000..cec0765a5d
--- /dev/null
+++ b/lib/ssh/test/ssh_basic_SUITE_data/id_ed448.pub
@@ -0,0 +1 @@
+ssh-ed448 AAAACXNzaC1lZDQ0OAAAADnc6p5481ob80magxsQuGyQqsAc2EtnoBCbVaNukyix42X84WHXHOcTGlQ+pMtffp8diwBpZEcAFAA= uabhnil@elxadlj3q32
diff --git a/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed25519_key b/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed25519_key
new file mode 100644
index 0000000000..13a8fcf491
--- /dev/null
+++ b/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed25519_key
@@ -0,0 +1,7 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+QyNTUxOQAAACBJSOuiYGWaO9lye8Bgafod1kw8P6cV3Xb2qJgCB6yJfQAAAJi+h4O7voeD
+uwAAAAtzc2gtZWQyNTUxOQAAACBJSOuiYGWaO9lye8Bgafod1kw8P6cV3Xb2qJgCB6yJfQ
+AAAEBaOcJfGPNemKc1wPHTCmM4Kwvh6dZ0CqY14UT361UnN0lI66JgZZo72XJ7wGBp+h3W
+TDw/pxXddvaomAIHrIl9AAAAE3VhYmhuaWxAZWx4YWRsajNxMzIBAg==
+-----END OPENSSH PRIVATE KEY-----
diff --git a/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed25519_key.pub b/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed25519_key.pub
new file mode 100644
index 0000000000..156ef4045c
--- /dev/null
+++ b/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed25519_key.pub
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIElI66JgZZo72XJ7wGBp+h3WTDw/pxXddvaomAIHrIl9 uabhnil@elxadlj3q32
diff --git a/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed448_key b/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed448_key
new file mode 100644
index 0000000000..31a7e4e8c3
--- /dev/null
+++ b/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed448_key
@@ -0,0 +1,10 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAASgAAAAlz
+c2gtZWQ0NDgAAAA5X9dEm1m0Yf0s54fsYWrUah2hNCSFpw4fig6nXYDpZ3jt8SR2
+m0bHBhvWeD3x5Q9s0foavq/oJWGAAAAA0AAAEREAABERAAAACXNzaC1lZDQ0OAAA
+ADlf10SbWbRh/Sznh+xhatRqHaE0JIWnDh+KDqddgOlneO3xJHabRscGG9Z4PfHl
+D2zR+hq+r+glYYAAAABybIKlYsuAjRDWMr6JyFE+v2ySnzTd+oyfY8mWDvbjSKNS
+jIo/zC8ETjmj/FuUSS+PAy51SaIAmPlbX9dEm1m0Yf0s54fsYWrUah2hNCSFpw4f
+ig6nXYDpZ3jt8SR2m0bHBhvWeD3x5Q9s0foavq/oJWGAAAAAAAECAwQ=
+-----END OPENSSH PRIVATE KEY-----
+
diff --git a/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed448_key.pub b/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed448_key.pub
new file mode 100644
index 0000000000..8c390dcb58
--- /dev/null
+++ b/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed448_key.pub
@@ -0,0 +1 @@
+ssh-ed448 AAAACXNzaC1lZDQ0OAAAADlf10SbWbRh/Sznh+xhatRqHaE0JIWnDh+KDqddgOlneO3xJHabRscGG9Z4PfHlD2zR+hq+r+glYYA=
diff --git a/lib/ssh/test/ssh_compat_SUITE.erl b/lib/ssh/test/ssh_compat_SUITE.erl
index 1c607bebe8..f4eef2dc77 100644
--- a/lib/ssh/test/ssh_compat_SUITE.erl
+++ b/lib/ssh/test/ssh_compat_SUITE.erl
@@ -648,6 +648,7 @@ setup_remote_priv_and_local_auth_keys(KeyAlg, IP, Port, UserDir, Config) ->
{silently_accept_hosts,true},
{user_interaction,false}
]),
+ rm_id_in_remote_dir(Ch, ".ssh"),
_ = ssh_sftp:make_dir(Ch, ".ssh"),
DstFile = filename:join(".ssh", dst_filename(user,KeyAlg)),
ok = ssh_sftp:write_file(Ch, DstFile, Priv),
@@ -658,6 +659,18 @@ setup_remote_priv_and_local_auth_keys(KeyAlg, IP, Port, UserDir, Config) ->
ok = ssh:close(Cc),
UserDir.
+rm_id_in_remote_dir(Ch, Dir) ->
+ case ssh_sftp:list_dir(Ch, Dir) of
+ {error,_Error} ->
+ ok;
+ {ok,FileNames} ->
+ lists:foreach(fun("id_"++_ = F) ->
+ ok = ssh_sftp:delete(Ch, filename:join(Dir,F));
+ (_) ->
+ leave
+ end, FileNames)
+ end.
+
user_priv_pub_keys(Config, KeyAlg) -> priv_pub_keys("users_keys", user, Config, KeyAlg).
host_priv_pub_keys(Config, KeyAlg) -> priv_pub_keys("host_keys", host, Config, KeyAlg).
@@ -673,6 +686,8 @@ src_filename(user, 'ssh-rsa' ) -> "id_rsa";
src_filename(user, 'rsa-sha2-256' ) -> "id_rsa";
src_filename(user, 'rsa-sha2-512' ) -> "id_rsa";
src_filename(user, 'ssh-dss' ) -> "id_dsa";
+src_filename(user, 'ssh-ed25519' ) -> "id_ed25519";
+src_filename(user, 'ssh-ed448' ) -> "id_ed448";
src_filename(user, 'ecdsa-sha2-nistp256') -> "id_ecdsa256";
src_filename(user, 'ecdsa-sha2-nistp384') -> "id_ecdsa384";
src_filename(user, 'ecdsa-sha2-nistp521') -> "id_ecdsa521";
@@ -680,6 +695,8 @@ src_filename(host, 'ssh-rsa' ) -> "ssh_host_rsa_key";
src_filename(host, 'rsa-sha2-256' ) -> "ssh_host_rsa_key";
src_filename(host, 'rsa-sha2-512' ) -> "ssh_host_rsa_key";
src_filename(host, 'ssh-dss' ) -> "ssh_host_dsa_key";
+src_filename(host, 'ssh-ed25519' ) -> "ssh_host_ed25519_key";
+src_filename(host, 'ssh-ed448' ) -> "ssh_host_ed448_key";
src_filename(host, 'ecdsa-sha2-nistp256') -> "ssh_host_ecdsa_key256";
src_filename(host, 'ecdsa-sha2-nistp384') -> "ssh_host_ecdsa_key384";
src_filename(host, 'ecdsa-sha2-nistp521') -> "ssh_host_ecdsa_key521".
@@ -688,6 +705,8 @@ dst_filename(user, 'ssh-rsa' ) -> "id_rsa";
dst_filename(user, 'rsa-sha2-256' ) -> "id_rsa";
dst_filename(user, 'rsa-sha2-512' ) -> "id_rsa";
dst_filename(user, 'ssh-dss' ) -> "id_dsa";
+dst_filename(user, 'ssh-ed25519' ) -> "id_ed25519";
+dst_filename(user, 'ssh-ed448' ) -> "id_ed448";
dst_filename(user, 'ecdsa-sha2-nistp256') -> "id_ecdsa";
dst_filename(user, 'ecdsa-sha2-nistp384') -> "id_ecdsa";
dst_filename(user, 'ecdsa-sha2-nistp521') -> "id_ecdsa";
@@ -695,6 +714,8 @@ dst_filename(host, 'ssh-rsa' ) -> "ssh_host_rsa_key";
dst_filename(host, 'rsa-sha2-256' ) -> "ssh_host_rsa_key";
dst_filename(host, 'rsa-sha2-512' ) -> "ssh_host_rsa_key";
dst_filename(host, 'ssh-dss' ) -> "ssh_host_dsa_key";
+dst_filename(host, 'ssh-ed25519' ) -> "ssh_host_ed25519_key";
+dst_filename(host, 'ssh-ed448' ) -> "ssh_host_ed448_key";
dst_filename(host, 'ecdsa-sha2-nistp256') -> "ssh_host_ecdsa_key";
dst_filename(host, 'ecdsa-sha2-nistp384') -> "ssh_host_ecdsa_key";
dst_filename(host, 'ecdsa-sha2-nistp521') -> "ssh_host_ecdsa_key".
diff --git a/lib/ssh/test/ssh_compat_SUITE_data/build_scripts/create_all b/lib/ssh/test/ssh_compat_SUITE_data/build_scripts/create_all
index 0dcf8cb570..c2e77fcc79 100755
--- a/lib/ssh/test/ssh_compat_SUITE_data/build_scripts/create_all
+++ b/lib/ssh/test/ssh_compat_SUITE_data/build_scripts/create_all
@@ -18,6 +18,12 @@ SSH_SSL_VERSIONS=(\
openssh 7.6p1 openssl 1.0.2n \
\
openssh 7.6p1 libressl 2.6.4 \
+ \
+ openssh 7.7p1 openssl 1.0.2p \
+ openssh 7.8p1 openssl 1.0.2p \
+ openssh 7.9p1 openssl 1.0.2p \
+ \
+ openssh 7.9p1 libressl 2.6.4 \
)
if [ "x$1" == "x-b" ]
diff --git a/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed25519_key b/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed25519_key
new file mode 100644
index 0000000000..13a8fcf491
--- /dev/null
+++ b/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed25519_key
@@ -0,0 +1,7 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+QyNTUxOQAAACBJSOuiYGWaO9lye8Bgafod1kw8P6cV3Xb2qJgCB6yJfQAAAJi+h4O7voeD
+uwAAAAtzc2gtZWQyNTUxOQAAACBJSOuiYGWaO9lye8Bgafod1kw8P6cV3Xb2qJgCB6yJfQ
+AAAEBaOcJfGPNemKc1wPHTCmM4Kwvh6dZ0CqY14UT361UnN0lI66JgZZo72XJ7wGBp+h3W
+TDw/pxXddvaomAIHrIl9AAAAE3VhYmhuaWxAZWx4YWRsajNxMzIBAg==
+-----END OPENSSH PRIVATE KEY-----
diff --git a/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed25519_key.pub b/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed25519_key.pub
new file mode 100644
index 0000000000..156ef4045c
--- /dev/null
+++ b/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed25519_key.pub
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIElI66JgZZo72XJ7wGBp+h3WTDw/pxXddvaomAIHrIl9 uabhnil@elxadlj3q32
diff --git a/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed448_key b/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed448_key
new file mode 100644
index 0000000000..31a7e4e8c3
--- /dev/null
+++ b/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed448_key
@@ -0,0 +1,10 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAASgAAAAlz
+c2gtZWQ0NDgAAAA5X9dEm1m0Yf0s54fsYWrUah2hNCSFpw4fig6nXYDpZ3jt8SR2
+m0bHBhvWeD3x5Q9s0foavq/oJWGAAAAA0AAAEREAABERAAAACXNzaC1lZDQ0OAAA
+ADlf10SbWbRh/Sznh+xhatRqHaE0JIWnDh+KDqddgOlneO3xJHabRscGG9Z4PfHl
+D2zR+hq+r+glYYAAAABybIKlYsuAjRDWMr6JyFE+v2ySnzTd+oyfY8mWDvbjSKNS
+jIo/zC8ETjmj/FuUSS+PAy51SaIAmPlbX9dEm1m0Yf0s54fsYWrUah2hNCSFpw4f
+ig6nXYDpZ3jt8SR2m0bHBhvWeD3x5Q9s0foavq/oJWGAAAAAAAECAwQ=
+-----END OPENSSH PRIVATE KEY-----
+
diff --git a/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed448_key.pub b/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed448_key.pub
new file mode 100644
index 0000000000..8c390dcb58
--- /dev/null
+++ b/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed448_key.pub
@@ -0,0 +1 @@
+ssh-ed448 AAAACXNzaC1lZDQ0OAAAADlf10SbWbRh/Sznh+xhatRqHaE0JIWnDh+KDqddgOlneO3xJHabRscGG9Z4PfHlD2zR+hq+r+glYYA=
diff --git a/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed25519 b/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed25519
new file mode 100644
index 0000000000..401a3e4a9a
--- /dev/null
+++ b/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed25519
@@ -0,0 +1,7 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+QyNTUxOQAAACDm9P8/gC0IOKmwHLSvkmEtS2Xx0RRqUDqC6wY6UgDVnwAAAJg3+6xpN/us
+aQAAAAtzc2gtZWQyNTUxOQAAACDm9P8/gC0IOKmwHLSvkmEtS2Xx0RRqUDqC6wY6UgDVnw
+AAAEBzC/Z2WGJhZ3l3tIBnUc6DCbp+lXY2yc2RRpWQTdf8sub0/z+ALQg4qbActK+SYS1L
+ZfHRFGpQOoLrBjpSANWfAAAAE3VhYmhuaWxAZWx4YWRsajNxMzIBAg==
+-----END OPENSSH PRIVATE KEY-----
diff --git a/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed25519.pub b/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed25519.pub
new file mode 100644
index 0000000000..a5c03b19c1
--- /dev/null
+++ b/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed25519.pub
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOb0/z+ALQg4qbActK+SYS1LZfHRFGpQOoLrBjpSANWf uabhnil@elxadlj3q32
diff --git a/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed448 b/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed448
new file mode 100644
index 0000000000..8ecfd710dc
--- /dev/null
+++ b/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed448
@@ -0,0 +1,10 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAASgAAAAlz
+c2gtZWQ0NDgAAAA53OqeePNaG/NJmoMbELhskKrAHNhLZ6AQm1WjbpMoseNl/OFh
+1xznExpUPqTLX36fHYsAaWRHABQAAAAA0AAAEREAABERAAAACXNzaC1lZDQ0OAAA
+ADnc6p5481ob80magxsQuGyQqsAc2EtnoBCbVaNukyix42X84WHXHOcTGlQ+pMtf
+fp8diwBpZEcAFAAAAAByzSPST3FCdOdENDI3uTKQ9RH2Ql+Y5kRZ/yA+iYUIP/32
+BQBVOrwOBc0CGEvbicTM1n4YeVEmfrMo3OqeePNaG/NJmoMbELhskKrAHNhLZ6AQ
+m1WjbpMoseNl/OFh1xznExpUPqTLX36fHYsAaWRHABQAAAAAAAECAwQ=
+-----END OPENSSH PRIVATE KEY-----
+
diff --git a/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed448.pub b/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed448.pub
new file mode 100644
index 0000000000..cec0765a5d
--- /dev/null
+++ b/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed448.pub
@@ -0,0 +1 @@
+ssh-ed448 AAAACXNzaC1lZDQ0OAAAADnc6p5481ob80magxsQuGyQqsAc2EtnoBCbVaNukyix42X84WHXHOcTGlQ+pMtffp8diwBpZEcAFAA= uabhnil@elxadlj3q32
diff --git a/lib/ssh/test/ssh_engine_SUITE.erl b/lib/ssh/test/ssh_engine_SUITE.erl
index c2e6ac1fee..3adb23acdb 100644
--- a/lib/ssh/test/ssh_engine_SUITE.erl
+++ b/lib/ssh/test/ssh_engine_SUITE.erl
@@ -126,10 +126,17 @@ simple_connect(Config) ->
load_engine() ->
case crypto:get_test_engine() of
{ok, Engine} ->
- try crypto:engine_load(<<"dynamic">>,
+ try
+ %% The test engine has it's own fake rsa sign/verify that
+ %% you don't want to use, so exclude it from methods to load:
+ Methods =
+ crypto:engine_get_all_methods() -- [engine_method_rsa],
+ crypto:engine_load(<<"dynamic">>,
[{<<"SO_PATH">>, Engine},
<<"LOAD">>],
- [])
+ [],
+ Methods
+ )
catch
error:notsup ->
{error, notsup}
diff --git a/lib/ssh/test/ssh_options_SUITE.erl b/lib/ssh/test/ssh_options_SUITE.erl
index daf62483cd..60d0da2a39 100644
--- a/lib/ssh/test/ssh_options_SUITE.erl
+++ b/lib/ssh/test/ssh_options_SUITE.erl
@@ -49,7 +49,7 @@
server_userpassword_option/1,
server_pwdfun_option/1,
server_pwdfun_4_option/1,
- server_pwdfun_4_option_repeat/1,
+ server_keyboard_interactive/1,
ssh_connect_arg4_timeout/1,
ssh_connect_negtimeout_parallel/1,
ssh_connect_negtimeout_sequential/1,
@@ -99,7 +99,7 @@ all() ->
server_userpassword_option,
server_pwdfun_option,
server_pwdfun_4_option,
- server_pwdfun_4_option_repeat,
+ server_keyboard_interactive,
{group, dir_options},
ssh_connect_timeout,
ssh_connect_arg4_timeout,
@@ -381,7 +381,7 @@ server_pwdfun_4_option(Config) ->
%%--------------------------------------------------------------------
-server_pwdfun_4_option_repeat(Config) ->
+server_keyboard_interactive(Config) ->
UserDir = proplists:get_value(user_dir, Config),
SysDir = proplists:get_value(data_dir, Config),
%% Test that the state works
@@ -396,19 +396,28 @@ server_pwdfun_4_option_repeat(Config) ->
{pwdfun,PWDFUN}]),
%% Try with passwords "incorrect", "Bad again" and finally "bar"
- KIFFUN = fun(_,_,_) ->
+ KIFFUN = fun(_Name, _Instr, _PromptInfos) ->
K={k,self()},
- case get(K) of
- undefined ->
- put(K,1),
- ["incorrect"];
- 2 ->
- put(K,3),
- ["bar"];
- S->
- put(K,S+1),
- ["Bad again"]
- end
+ Answer =
+ case get(K) of
+ undefined ->
+ put(K,1),
+ ["incorrect"];
+ 2 ->
+ put(K,3),
+ ["bar"];
+ S->
+ put(K,S+1),
+ ["Bad again"]
+ end,
+ ct:log("keyboard_interact_fun:~n"
+ " Name = ~p~n"
+ " Instruction = ~p~n"
+ " Prompts = ~p~n"
+ "~nAnswer:~n ~p~n",
+ [_Name, _Instr, _PromptInfos, Answer]),
+
+ Answer
end,
ConnectionRef2 =
diff --git a/lib/ssh/test/ssh_property_test_SUITE.erl b/lib/ssh/test/ssh_property_test_SUITE.erl
index 3318b86d39..9aaac898a0 100644
--- a/lib/ssh/test/ssh_property_test_SUITE.erl
+++ b/lib/ssh/test/ssh_property_test_SUITE.erl
@@ -46,8 +46,9 @@ groups() ->
[{messages, [], [decode,
decode_encode]},
{client_server, [], [client_server_sequential,
- client_server_parallel,
- client_server_parallel_multi]}
+ client_server_parallel
+ %% client_server_parallel_multi
+ ]}
].
@@ -62,7 +63,7 @@ end_per_suite(Config) ->
%%% if we run proper.
init_per_group(client_server, Config) ->
case proplists:get_value(property_test_tool,Config) of
- eqc -> Config;
+ proper -> Config;
X -> {skip, lists:concat([X," is not supported"])}
end;
init_per_group(_, Config) ->
diff --git a/lib/ssh/test/ssh_test_lib.erl b/lib/ssh/test/ssh_test_lib.erl
index 416cc301db..a1a7eebcde 100644
--- a/lib/ssh/test/ssh_test_lib.erl
+++ b/lib/ssh/test/ssh_test_lib.erl
@@ -408,6 +408,21 @@ ct:log("DataDir ~p:~n ~p~n~nSystDir ~p:~n ~p~n~nUserDir ~p:~n ~p",[DataDir, file
setup_ecdsa_known_host(Size, System, UserDir),
setup_ecdsa_auth_keys(Size, DataDir, UserDir).
+setup_eddsa(Alg, DataDir, UserDir) ->
+ {IdPriv, IdPub, HostPriv, HostPub} =
+ case Alg of
+ ed25519 -> {"id_ed25519", "id_ed25519.pub", "ssh_host_ed25519_key", "ssh_host_ed25519_key.pub"};
+ ed448 -> {"id_ed448", "id_ed448.pub", "ssh_host_ed448_key", "ssh_host_ed448_key.pub"}
+ end,
+ file:copy(filename:join(DataDir, IdPriv), filename:join(UserDir, IdPriv)),
+ System = filename:join(UserDir, "system"),
+ file:make_dir(System),
+ file:copy(filename:join(DataDir, HostPriv), filename:join(System, HostPriv)),
+ file:copy(filename:join(DataDir, HostPub), filename:join(System, HostPub)),
+ct:log("DataDir ~p:~n ~p~n~nSystDir ~p:~n ~p~n~nUserDir ~p:~n ~p",[DataDir, file:list_dir(DataDir), System, file:list_dir(System), UserDir, file:list_dir(UserDir)]),
+ setup_eddsa_known_host(HostPub, DataDir, UserDir),
+ setup_eddsa_auth_keys(IdPriv, DataDir, UserDir).
+
clean_dsa(UserDir) ->
del_dirs(filename:join(UserDir, "system")),
file:delete(filename:join(UserDir,"id_dsa")),
@@ -487,6 +502,11 @@ setup_ecdsa_known_host(_Size, SystemDir, UserDir) ->
[{Key, _}] = public_key:ssh_decode(SshBin, public_key),
setup_known_hosts(Key, UserDir).
+setup_eddsa_known_host(HostPub, SystemDir, UserDir) ->
+ {ok, SshBin} = file:read_file(filename:join(SystemDir, HostPub)),
+ [{Key, _}] = public_key:ssh_decode(SshBin, public_key),
+ setup_known_hosts(Key, UserDir).
+
setup_known_hosts(Key, UserDir) ->
{ok, Hostname} = inet:gethostname(),
{ok, {A, B, C, D}} = inet:getaddr(Hostname, inet),
@@ -529,6 +549,11 @@ setup_ecdsa_auth_keys(Size, Dir, UserDir) ->
PKey = #'ECPoint'{point = Q},
setup_auth_keys([{ {PKey,Param}, [{comment, "Test"}]}], UserDir).
+setup_eddsa_auth_keys(IdPriv, Dir, UserDir) ->
+ {ok, Pem} = file:read_file(filename:join(Dir, IdPriv)),
+ {ed_pri, Alg, Pub, _} = public_key:pem_entry_decode(hd(public_key:pem_decode(Pem))),
+ setup_auth_keys([{{ed_pub,Alg,Pub}, [{comment, "Test"}]}], UserDir).
+
setup_auth_keys(Keys, Dir) ->
AuthKeys = public_key:ssh_encode(Keys, auth_keys),
AuthKeysFile = filename:join(Dir, "authorized_keys"),
diff --git a/lib/ssl/doc/src/notes.xml b/lib/ssl/doc/src/notes.xml
index 673431ed0a..46fd8ab180 100644
--- a/lib/ssl/doc/src/notes.xml
+++ b/lib/ssl/doc/src/notes.xml
@@ -333,6 +333,38 @@
</section>
+<section><title>SSL 8.2.6.4</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Add engine support for RSA key exchange</p>
+ <p>
+ Own Id: OTP-15420</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>SSL 8.2.6.3</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Extend check for undelivered data at closing, could under
+ some circumstances fail to deliverd all data that was
+ acctualy recivied.</p>
+ <p>
+ Own Id: OTP-15412</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>SSL 8.2.6.2</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -3163,5 +3195,3 @@
</section>
</section>
</chapter>
-
-
diff --git a/lib/ssl/doc/src/ssl_app.xml b/lib/ssl/doc/src/ssl_app.xml
index f6d9021d4a..893919aeb4 100644
--- a/lib/ssl/doc/src/ssl_app.xml
+++ b/lib/ssl/doc/src/ssl_app.xml
@@ -171,6 +171,20 @@
shutdown gracefully. Defaults to 5000 milliseconds.
</p>
</item>
+
+ <tag><c><![CDATA[internal_active_n = integer() <optional>]]></c></tag>
+ <item>
+ <p>
+ For TLS connections this value is used to handle the
+ internal socket. As the implementation was changed from an
+ active once to an active N behavior (N = 100), for
+ performance reasons, this option exist for possible tweaking
+ or restoring of the old behavior (internal_active_n = 1) in
+ unforeseen scenarios. The option will not affect erlang
+ distribution over TLS that will always run in active N mode.
+ Added in ssl-9.1 (OTP-21.2).
+ </p>
+ </item>
</taglist>
</section>
diff --git a/lib/ssl/src/dtls_connection.erl b/lib/ssl/src/dtls_connection.erl
index 2a0b2b317d..37719ad439 100644
--- a/lib/ssl/src/dtls_connection.erl
+++ b/lib/ssl/src/dtls_connection.erl
@@ -39,7 +39,7 @@
-export([start_fsm/8, start_link/7, init/1, pids/1]).
%% State transition handling
--export([next_record/1, next_event/3, next_event/4, handle_common_event/4]).
+-export([next_event/3, next_event/4, handle_common_event/4]).
%% Handshake handling
-export([renegotiate/2, send_handshake/2,
@@ -50,7 +50,7 @@
-export([encode_alert/3, send_alert/2, send_alert_in_connection/2, close/5, protocol_name/0]).
%% Data handling
--export([encode_data/3, passive_receive/2, next_record_if_active/1,
+-export([encode_data/3, next_record/1,
send/3, socket/5, setopts/3, getopts/3]).
%% gen_statem state functions
@@ -162,9 +162,9 @@ next_record(State) ->
next_event(StateName, Record, State) ->
next_event(StateName, Record, State, []).
-next_event(connection = StateName, no_record,
+next_event(StateName, no_record,
#state{connection_states = #{current_read := #{epoch := CurrentEpoch}}} = State0, Actions) ->
- case next_record_if_active(State0) of
+ case next_record(State0) of
{no_record, State} ->
ssl_connection:hibernate_after(StateName, State, Actions);
{#ssl_tls{epoch = CurrentEpoch,
@@ -178,21 +178,18 @@ next_event(connection = StateName, no_record,
{#ssl_tls{epoch = Epoch,
type = ?HANDSHAKE,
version = _Version}, State1} = _Record when Epoch == CurrentEpoch-1 ->
- {State2, MoreActions} = send_handshake_flight(State1, CurrentEpoch),
- {NextRecord, State} = next_record(State2),
- next_event(StateName, NextRecord, State, Actions ++ MoreActions);
+ {State, MoreActions} = send_handshake_flight(State1, CurrentEpoch),
+ next_event(StateName, no_record, State, Actions ++ MoreActions);
%% From FLIGHT perspective CHANGE_CIPHER_SPEC is treated as a handshake
{#ssl_tls{epoch = Epoch,
type = ?CHANGE_CIPHER_SPEC,
version = _Version}, State1} = _Record when Epoch == CurrentEpoch-1 ->
- {State2, MoreActions} = send_handshake_flight(State1, CurrentEpoch),
- {NextRecord, State} = next_record(State2),
- next_event(StateName, NextRecord, State, Actions ++ MoreActions);
+ {State, MoreActions} = send_handshake_flight(State1, CurrentEpoch),
+ next_event(StateName, no_record, State, Actions ++ MoreActions);
{#ssl_tls{epoch = _Epoch,
- version = _Version}, State1} ->
+ version = _Version}, State} ->
%% TODO maybe buffer later epoch
- {Record, State} = next_record(State1),
- next_event(StateName, Record, State, Actions);
+ next_event(StateName, no_record, State, Actions);
{#alert{} = Alert, State} ->
{next_state, StateName, State, [{next_event, internal, Alert} | Actions]}
end;
@@ -210,24 +207,20 @@ next_event(connection = StateName, Record,
#ssl_tls{epoch = Epoch,
type = ?HANDSHAKE,
version = _Version} when Epoch == CurrentEpoch-1 ->
- {State1, MoreActions} = send_handshake_flight(State0, CurrentEpoch),
- {NextRecord, State} = next_record(State1),
- next_event(StateName, NextRecord, State, Actions ++ MoreActions);
+ {State, MoreActions} = send_handshake_flight(State0, CurrentEpoch),
+ next_event(StateName, no_record, State, Actions ++ MoreActions);
%% From FLIGHT perspective CHANGE_CIPHER_SPEC is treated as a handshake
#ssl_tls{epoch = Epoch,
type = ?CHANGE_CIPHER_SPEC,
version = _Version} when Epoch == CurrentEpoch-1 ->
- {State1, MoreActions} = send_handshake_flight(State0, CurrentEpoch),
- {NextRecord, State} = next_record(State1),
- next_event(StateName, NextRecord, State, Actions ++ MoreActions);
+ {State, MoreActions} = send_handshake_flight(State0, CurrentEpoch),
+ next_event(StateName, no_record, State, Actions ++ MoreActions);
_ ->
next_event(StateName, no_record, State0, Actions)
end;
next_event(StateName, Record,
#state{connection_states = #{current_read := #{epoch := CurrentEpoch}}} = State0, Actions) ->
case Record of
- no_record ->
- {next_state, StateName, State0, Actions};
#ssl_tls{epoch = CurrentEpoch,
version = Version} = Record ->
State = dtls_version(StateName, Version, State0),
@@ -236,8 +229,7 @@ next_event(StateName, Record,
#ssl_tls{epoch = _Epoch,
version = _Version} = _Record ->
%% TODO maybe buffer later epoch
- {Record, State} = next_record(State0),
- next_event(StateName, Record, State, Actions);
+ next_event(StateName, no_record, State0, Actions);
#alert{} = Alert ->
{next_state, StateName, State0, [{next_event, internal, Alert} | Actions]}
end.
@@ -254,8 +246,7 @@ handle_common_event(internal, #ssl_tls{type = ?HANDSHAKE,
try
case dtls_handshake:get_dtls_handshake(Version, Data, Buffers0) of
{[], Buffers} ->
- {Record, State} = next_record(State0#state{protocol_buffers = Buffers}),
- next_event(StateName, Record, State);
+ next_event(StateName, no_record, State0#state{protocol_buffers = Buffers});
{Packets, Buffers} ->
State = State0#state{protocol_buffers = Buffers},
Events = dtls_handshake_events(Packets),
@@ -291,15 +282,12 @@ handle_common_event(internal, #ssl_tls{type = _Unknown}, StateName, State) ->
renegotiate(#state{role = client} = State, Actions) ->
%% Handle same way as if server requested
%% the renegotiation
- {next_state, connection, State,
- [{next_event, internal, #hello_request{}} | Actions]};
-
+ next_event(connection, no_record, State, [{next_event, internal, #hello_request{}} | Actions]);
renegotiate(#state{role = server} = State0, Actions) ->
HelloRequest = ssl_handshake:hello_request(),
State1 = prepare_flight(State0),
- {State2, MoreActions} = send_handshake(HelloRequest, State1),
- {Record, State} = next_record(State2),
- next_event(hello, Record, State, Actions ++ MoreActions).
+ {State, MoreActions} = send_handshake(HelloRequest, State1),
+ next_event(hello, no_record, State, Actions ++ MoreActions).
send_handshake(Handshake, #state{connection_states = ConnectionStates} = State) ->
#{epoch := Epoch} = ssl_record:current_connection_state(ConnectionStates, write),
@@ -393,23 +381,6 @@ protocol_name() ->
encode_data(Data, Version, ConnectionStates0)->
dtls_record:encode_data(Data, Version, ConnectionStates0).
-passive_receive(State0 = #state{user_data_buffer = Buffer}, StateName) ->
- case Buffer of
- <<>> ->
- {Record, State} = next_record(State0),
- next_event(StateName, Record, State);
- _ ->
- {Record, State} = ssl_connection:read_application_data(<<>>, State0),
- next_event(StateName, Record, State)
- end.
-next_record_if_active(State =
- #state{socket_options =
- #socket_options{active = false}}) ->
- {no_record ,State};
-
-next_record_if_active(State) ->
- next_record(State).
-
send(Transport, {_, {{_,_}, _} = Socket}, Data) ->
send(Transport, Socket, Data);
send(Transport, Socket, Data) ->
@@ -451,15 +422,14 @@ init({call, From}, {start, Timeout},
HelloVersion = dtls_record:hello_version(Version, SslOpts#ssl_options.versions),
State1 = prepare_flight(State0#state{negotiated_version = Version}),
{State2, Actions} = send_handshake(Hello, State1#state{negotiated_version = HelloVersion}),
- State3 = State2#state{negotiated_version = Version, %% Requested version
- session =
- Session0#session{session_id = Hello#client_hello.session_id},
- start_or_recv_from = From,
- timer = Timer,
- flight_state = {retransmit, ?INITIAL_RETRANSMIT_TIMEOUT}
- },
- {Record, State} = next_record(State3),
- next_event(hello, Record, State, Actions);
+ State = State2#state{negotiated_version = Version, %% Requested version
+ session =
+ Session0#session{session_id = Hello#client_hello.session_id},
+ start_or_recv_from = From,
+ timer = Timer,
+ flight_state = {retransmit, ?INITIAL_RETRANSMIT_TIMEOUT}
+ },
+ next_event(hello, no_record, State, Actions);
init({call, _} = Type, Event, #state{role = server, data_tag = udp} = State) ->
Result = gen_handshake(?FUNCTION_NAME, Type, Event,
State#state{flight_state = {retransmit, ?INITIAL_RETRANSMIT_TIMEOUT},
@@ -469,7 +439,6 @@ init({call, _} = Type, Event, #state{role = server, data_tag = udp} = State) ->
max_ignored_alerts => 10}}),
erlang:send_after(dtls_v1:cookie_timeout(), self(), new_cookie_secret),
Result;
-
init({call, _} = Type, Event, #state{role = server} = State) ->
%% I.E. DTLS over sctp
gen_handshake(?FUNCTION_NAME, Type, Event, State#state{flight_state = reliable});
@@ -519,9 +488,9 @@ hello(internal, #client_hello{cookie = <<>>,
%% negotiated.
VerifyRequest = dtls_handshake:hello_verify_request(Cookie, ?HELLO_VERIFY_REQUEST_VERSION),
State1 = prepare_flight(State0#state{negotiated_version = Version}),
- {State2, Actions} = send_handshake(VerifyRequest, State1),
- {Record, State} = next_record(State2),
- next_event(?FUNCTION_NAME, Record, State#state{tls_handshake_history = ssl_handshake:init_handshake_history()}, Actions);
+ {State, Actions} = send_handshake(VerifyRequest, State1),
+ next_event(?FUNCTION_NAME, no_record,
+ State#state{tls_handshake_history = ssl_handshake:init_handshake_history()}, Actions);
hello(internal, #hello_verify_request{cookie = Cookie}, #state{role = client,
host = Host, port = Port,
ssl_options = SslOpts,
@@ -540,27 +509,29 @@ hello(internal, #hello_verify_request{cookie = Cookie}, #state{role = client,
State1 = prepare_flight(State0#state{tls_handshake_history = ssl_handshake:init_handshake_history()}),
{State2, Actions} = send_handshake(Hello, State1),
- State3 = State2#state{negotiated_version = Version, %% Requested version
- session =
- Session0#session{session_id =
- Hello#client_hello.session_id}},
- {Record, State} = next_record(State3),
- next_event(?FUNCTION_NAME, Record, State, Actions);
-hello(internal, #client_hello{extensions = Extensions} = Hello, #state{ssl_options = #ssl_options{handshake = hello},
- start_or_recv_from = From} = State) ->
+ State = State2#state{negotiated_version = Version, %% Requested version
+ session =
+ Session0#session{session_id =
+ Hello#client_hello.session_id}},
+ next_event(?FUNCTION_NAME, no_record, State, Actions);
+hello(internal, #client_hello{extensions = Extensions} = Hello,
+ #state{ssl_options = #ssl_options{handshake = hello},
+ start_or_recv_from = From} = State) ->
{next_state, user_hello, State#state{start_or_recv_from = undefined,
hello = Hello},
[{reply, From, {ok, ssl_connection:map_extensions(Extensions)}}]};
-hello(internal, #server_hello{extensions = Extensions} = Hello, #state{ssl_options = #ssl_options{handshake = hello},
- start_or_recv_from = From} = State) ->
+hello(internal, #server_hello{extensions = Extensions} = Hello,
+ #state{ssl_options = #ssl_options{handshake = hello},
+ start_or_recv_from = From} = State) ->
{next_state, user_hello, State#state{start_or_recv_from = undefined,
hello = Hello},
[{reply, From, {ok, ssl_connection:map_extensions(Extensions)}}]};
-hello(internal, #client_hello{cookie = Cookie} = Hello, #state{role = server,
- transport_cb = Transport,
- socket = Socket,
- protocol_specific = #{current_cookie_secret := Secret,
- previous_cookie_secret := PSecret}} = State0) ->
+hello(internal, #client_hello{cookie = Cookie} = Hello,
+ #state{role = server,
+ transport_cb = Transport,
+ socket = Socket,
+ protocol_specific = #{current_cookie_secret := Secret,
+ previous_cookie_secret := PSecret}} = State0) ->
{ok, {IP, Port}} = dtls_socket:peername(Transport, Socket),
case dtls_handshake:cookie(Secret, IP, Port, Hello) of
Cookie ->
@@ -595,8 +566,7 @@ hello(internal, {handshake, {#hello_verify_request{} = Handshake, _}}, State) ->
{next_state, ?FUNCTION_NAME, State, [{next_event, internal, Handshake}]};
hello(internal, #change_cipher_spec{type = <<1>>}, State0) ->
{State1, Actions0} = send_handshake_flight(State0, retransmit_epoch(?FUNCTION_NAME, State0)),
- {Record, State2} = next_record(State1),
- {next_state, ?FUNCTION_NAME, State, Actions} = next_event(?FUNCTION_NAME, Record, State2, Actions0),
+ {next_state, ?FUNCTION_NAME, State, Actions} = next_event(?FUNCTION_NAME, no_record, State1, Actions0),
%% This will reset the retransmission timer by repeating the enter state event
{repeat_state, State, Actions};
hello(info, Event, State) ->
@@ -647,8 +617,7 @@ certify(internal = Type, #server_hello_done{} = Event, State) ->
ssl_connection:certify(Type, Event, prepare_flight(State), ?MODULE);
certify(internal, #change_cipher_spec{type = <<1>>}, State0) ->
{State1, Actions0} = send_handshake_flight(State0, retransmit_epoch(?FUNCTION_NAME, State0)),
- {Record, State2} = next_record(State1),
- {next_state, ?FUNCTION_NAME, State, Actions} = next_event(?FUNCTION_NAME, Record, State2, Actions0),
+ {next_state, ?FUNCTION_NAME, State, Actions} = next_event(?FUNCTION_NAME, no_record, State1, Actions0),
%% This will reset the retransmission timer by repeating the enter state event
{repeat_state, State, Actions};
certify(state_timeout, Event, State) ->
@@ -701,13 +670,11 @@ connection(internal, #hello_request{}, #state{host = Host, port = Port,
Version = Hello#client_hello.client_version,
HelloVersion = dtls_record:hello_version(Version, SslOpts#ssl_options.versions),
State1 = prepare_flight(State0),
- {State2, Actions} = send_handshake(Hello, State1#state{negotiated_version = HelloVersion}),
- {Record, State} =
- next_record(
- State2#state{flight_state = {retransmit, ?INITIAL_RETRANSMIT_TIMEOUT},
- session = Session0#session{session_id
- = Hello#client_hello.session_id}}),
- next_event(hello, Record, State, Actions);
+ {State, Actions} = send_handshake(Hello, State1#state{negotiated_version = HelloVersion}),
+ next_event(hello, no_record, State#state{flight_state = {retransmit, ?INITIAL_RETRANSMIT_TIMEOUT},
+ session = Session0#session{session_id
+ = Hello#client_hello.session_id}},
+ Actions);
connection(internal, #client_hello{} = Hello, #state{role = server, allow_renegotiate = true} = State) ->
%% Mitigate Computational DoS attack
%% http://www.educatedguesswork.org/2011/10/ssltls_and_computational_dos.html
@@ -927,8 +894,7 @@ handle_state_timeout(flight_retransmission_timeout, StateName,
#state{flight_state = {retransmit, NextTimeout}} = State0) ->
{State1, Actions0} = send_handshake_flight(State0#state{flight_state = {retransmit, NextTimeout}},
retransmit_epoch(StateName, State0)),
- {Record, State2} = next_record(State1),
- {next_state, StateName, State, Actions} = next_event(StateName, Record, State2, Actions0),
+ {next_state, StateName, State, Actions} = next_event(StateName, no_record, State1, Actions0),
%% This will reset the retransmission timer by repeating the enter state event
{repeat_state, State, Actions}.
diff --git a/lib/ssl/src/dtls_record.erl b/lib/ssl/src/dtls_record.erl
index 9eb0d8e2d7..b7346d3ec8 100644
--- a/lib/ssl/src/dtls_record.erl
+++ b/lib/ssl/src/dtls_record.erl
@@ -499,23 +499,22 @@ encode_dtls_cipher_text(Type, {MajVer, MinVer}, Fragment,
WriteState#{sequence_number => Seq + 1}}.
encode_plain_text(Type, Version, Data, #{compression_state := CompS0,
+ cipher_state := CipherS0,
epoch := Epoch,
sequence_number := Seq,
- cipher_state := CipherS0,
security_parameters :=
#security_parameters{
cipher_type = ?AEAD,
- bulk_cipher_algorithm =
- BulkCipherAlgo,
+ bulk_cipher_algorithm = BCAlg,
compression_algorithm = CompAlg}
} = WriteState0) ->
{Comp, CompS1} = ssl_record:compress(CompAlg, Data, CompS0),
- AAD = calc_aad(Type, Version, Epoch, Seq),
+ AAD = start_additional_data(Type, Version, Epoch, Seq),
+ CipherS = ssl_record:nonce_seed(BCAlg, <<?UINT16(Epoch), ?UINT48(Seq)>>, CipherS0),
+ WriteState = WriteState0#{compression_state => CompS1,
+ cipher_state => CipherS},
TLSVersion = dtls_v1:corresponding_tls_version(Version),
- {CipherFragment, CipherS1} =
- ssl_cipher:cipher_aead(BulkCipherAlgo, CipherS0, Seq, AAD, Comp, TLSVersion),
- {CipherFragment, WriteState0#{compression_state => CompS1,
- cipher_state => CipherS1}};
+ ssl_record:cipher_aead(TLSVersion, Comp, WriteState, AAD);
encode_plain_text(Type, Version, Fragment, #{compression_state := CompS0,
epoch := Epoch,
sequence_number := Seq,
@@ -547,9 +546,10 @@ decode_cipher_text(#ssl_tls{type = Type, version = Version,
BulkCipherAlgo,
compression_algorithm = CompAlg}} = ReadState0,
ConnnectionStates0) ->
- AAD = calc_aad(Type, Version, Epoch, Seq),
+ AAD = start_additional_data(Type, Version, Epoch, Seq),
+ CipherS1 = ssl_record:nonce_seed(BulkCipherAlgo, <<?UINT16(Epoch), ?UINT48(Seq)>>, CipherS0),
TLSVersion = dtls_v1:corresponding_tls_version(Version),
- case ssl_cipher:decipher_aead(BulkCipherAlgo, CipherS0, Seq, AAD, CipherFragment, TLSVersion) of
+ case ssl_record:decipher_aead(BulkCipherAlgo, CipherS1, AAD, CipherFragment, TLSVersion) of
{PlainFragment, CipherState} ->
{Plain, CompressionS1} = ssl_record:uncompress(CompAlg,
PlainFragment, CompressionS0),
@@ -600,7 +600,7 @@ mac_hash({Major, Minor}, MacAlg, MacSecret, Epoch, SeqNo, Type, Length, Fragment
Fragment],
dtls_v1:hmac_hash(MacAlg, MacSecret, Value).
-calc_aad(Type, {MajVer, MinVer}, Epoch, SeqNo) ->
+start_additional_data(Type, {MajVer, MinVer}, Epoch, SeqNo) ->
<<?UINT16(Epoch), ?UINT48(SeqNo), ?BYTE(Type), ?BYTE(MajVer), ?BYTE(MinVer)>>.
%%--------------------------------------------------------------------
diff --git a/lib/ssl/src/inet_tls_dist.erl b/lib/ssl/src/inet_tls_dist.erl
index ca059603ae..a4f8bb7562 100644
--- a/lib/ssl/src/inet_tls_dist.erl
+++ b/lib/ssl/src/inet_tls_dist.erl
@@ -567,7 +567,7 @@ gen_close(Driver, Socket) ->
get_address_resolver(EpmdModule, Driver) ->
case erlang:function_exported(EpmdModule, address_please, 3) of
true -> {EpmdModule, address_please};
- _ -> {Driver, getaddr}
+ _ -> {erl_epmd, address_please}
end.
%% ------------------------------------------------------------
diff --git a/lib/ssl/src/ssl_cipher.erl b/lib/ssl/src/ssl_cipher.erl
index b23129dcdd..54c04c13e5 100644
--- a/lib/ssl/src/ssl_cipher.erl
+++ b/lib/ssl/src/ssl_cipher.erl
@@ -34,7 +34,7 @@
-include_lib("public_key/include/public_key.hrl").
-export([security_parameters/2, security_parameters/3,
- cipher_init/3, decipher/6, cipher/5, decipher_aead/6, cipher_aead/6,
+ cipher_init/3, nonce_seed/2, decipher/6, cipher/5, aead_encrypt/5, aead_decrypt/6,
suites/1, all_suites/1, crypto_support_filters/0,
chacha_suites/1, anonymous_suites/1, psk_suites/1, psk_suites_anon/1,
srp_suites/0, srp_suites_anon/0,
@@ -48,6 +48,8 @@
-type cipher_enum() :: integer().
+-export_type([cipher_enum/0]).
+
%%--------------------------------------------------------------------
-spec security_parameters(ssl_cipher_format:cipher_suite(), #security_parameters{}) ->
#security_parameters{}.
@@ -91,10 +93,15 @@ cipher_init(?RC4, IV, Key) ->
#cipher_state{iv = IV, key = Key, state = State};
cipher_init(?AES_GCM, IV, Key) ->
<<Nonce:64>> = random_bytes(8),
- #cipher_state{iv = IV, key = Key, nonce = Nonce};
+ #cipher_state{iv = IV, key = Key, nonce = Nonce, tag_len = 16};
+cipher_init(?CHACHA20_POLY1305, IV, Key) ->
+ #cipher_state{iv = IV, key = Key, tag_len = 16};
cipher_init(_BCA, IV, Key) ->
#cipher_state{iv = IV, key = Key}.
+nonce_seed(Seed, CipherState) ->
+ CipherState#cipher_state{nonce = Seed}.
+
%%--------------------------------------------------------------------
-spec cipher(cipher_enum(), #cipher_state{}, binary(), iodata(), ssl_record:ssl_version()) ->
{binary(), #cipher_state{}}.
@@ -126,32 +133,16 @@ cipher(?AES_CBC, CipherState, Mac, Fragment, Version) ->
crypto:block_encrypt(aes_cbc256, Key, IV, T)
end, block_size(aes_128_cbc), CipherState, Mac, Fragment, Version).
-%%--------------------------------------------------------------------
--spec cipher_aead(cipher_enum(), #cipher_state{}, integer(), binary(), iodata(), ssl_record:ssl_version()) ->
- {binary(), #cipher_state{}}.
-%%
-%% Description: Encrypts the data and protects associated data (AAD) using chipher
-%% described by cipher_enum() and updating the cipher state
-%% Use for suites that use authenticated encryption with associated data (AEAD)
-%%-------------------------------------------------------------------
-cipher_aead(?AES_GCM, CipherState, SeqNo, AAD, Fragment, Version) ->
- aead_cipher(aes_gcm, CipherState, SeqNo, AAD, Fragment, Version);
-cipher_aead(?CHACHA20_POLY1305, CipherState, SeqNo, AAD, Fragment, Version) ->
- aead_cipher(chacha20_poly1305, CipherState, SeqNo, AAD, Fragment, Version).
-
-aead_cipher(chacha20_poly1305, #cipher_state{key=Key} = CipherState, SeqNo, AAD0, Fragment, _Version) ->
- CipherLen = erlang:iolist_size(Fragment),
- AAD = <<AAD0/binary, ?UINT16(CipherLen)>>,
- Nonce = ?uint64(SeqNo),
- {Content, CipherTag} = crypto:block_encrypt(chacha20_poly1305, Key, Nonce, {AAD, Fragment}),
- {<<Content/binary, CipherTag/binary>>, CipherState};
-aead_cipher(Type, #cipher_state{key=Key, iv = IV0, nonce = Nonce} = CipherState, _SeqNo, AAD0, Fragment, _Version) ->
- CipherLen = erlang:iolist_size(Fragment),
- AAD = <<AAD0/binary, ?UINT16(CipherLen)>>,
- <<Salt:4/bytes, _/binary>> = IV0,
- IV = <<Salt/binary, Nonce:64/integer>>,
- {Content, CipherTag} = crypto:block_encrypt(Type, Key, IV, {AAD, Fragment}),
- {<<Nonce:64/integer, Content/binary, CipherTag/binary>>, CipherState#cipher_state{nonce = Nonce + 1}}.
+aead_encrypt(Type, Key, Nonce, Fragment, AdditionalData) ->
+ crypto:block_encrypt(aead_type(Type), Key, Nonce, {AdditionalData, Fragment}).
+
+aead_decrypt(Type, Key, Nonce, CipherText, CipherTag, AdditionalData) ->
+ crypto:block_decrypt(aead_type(Type), Key, Nonce, {AdditionalData, CipherText, CipherTag}).
+
+aead_type(?AES_GCM) ->
+ aes_gcm;
+aead_type(?CHACHA20_POLY1305) ->
+ chacha20_poly1305.
build_cipher_block(BlockSz, Mac, Fragment) ->
TotSz = byte_size(Mac) + erlang:iolist_size(Fragment) + 1,
@@ -218,19 +209,6 @@ decipher(?AES_CBC, HashSz, CipherState, Fragment, Version, PaddingCheck) ->
crypto:block_decrypt(aes_cbc256, Key, IV, T)
end, CipherState, HashSz, Fragment, Version, PaddingCheck).
-%%--------------------------------------------------------------------
--spec decipher_aead(cipher_enum(), #cipher_state{}, integer(), binary(), binary(), ssl_record:ssl_version()) ->
- {binary(), #cipher_state{}} | #alert{}.
-%%
-%% Description: Decrypts the data and checks the associated data (AAD) MAC using
-%% cipher described by cipher_enum() and updating the cipher state.
-%% Use for suites that use authenticated encryption with associated data (AEAD)
-%%-------------------------------------------------------------------
-decipher_aead(?AES_GCM, CipherState, SeqNo, AAD, Fragment, Version) ->
- aead_decipher(aes_gcm, CipherState, SeqNo, AAD, Fragment, Version);
-decipher_aead(?CHACHA20_POLY1305, CipherState, SeqNo, AAD, Fragment, Version) ->
- aead_decipher(chacha20_poly1305, CipherState, SeqNo, AAD, Fragment, Version).
-
block_decipher(Fun, #cipher_state{key=Key, iv=IV} = CipherState0,
HashSz, Fragment, Version, PaddingCheck) ->
try
@@ -261,34 +239,6 @@ block_decipher(Fun, #cipher_state{key=Key, iv=IV} = CipherState0,
?ALERT_REC(?FATAL, ?BAD_RECORD_MAC, decryption_failed)
end.
-aead_ciphertext_to_state(chacha20_poly1305, SeqNo, _IV, AAD0, Fragment, _Version) ->
- CipherLen = size(Fragment) - 16,
- <<CipherText:CipherLen/bytes, CipherTag:16/bytes>> = Fragment,
- AAD = <<AAD0/binary, ?UINT16(CipherLen)>>,
- Nonce = ?uint64(SeqNo),
- {Nonce, AAD, CipherText, CipherTag};
-aead_ciphertext_to_state(_, _SeqNo, <<Salt:4/bytes, _/binary>>, AAD0, Fragment, _Version) ->
- CipherLen = size(Fragment) - 24,
- <<ExplicitNonce:8/bytes, CipherText:CipherLen/bytes, CipherTag:16/bytes>> = Fragment,
- AAD = <<AAD0/binary, ?UINT16(CipherLen)>>,
- Nonce = <<Salt/binary, ExplicitNonce/binary>>,
- {Nonce, AAD, CipherText, CipherTag}.
-
-aead_decipher(Type, #cipher_state{key = Key, iv = IV} = CipherState,
- SeqNo, AAD0, Fragment, Version) ->
- try
- {Nonce, AAD, CipherText, CipherTag} = aead_ciphertext_to_state(Type, SeqNo, IV, AAD0, Fragment, Version),
- case crypto:block_decrypt(Type, Key, Nonce, {AAD, CipherText, CipherTag}) of
- Content when is_binary(Content) ->
- {Content, CipherState};
- _ ->
- ?ALERT_REC(?FATAL, ?BAD_RECORD_MAC, decryption_failed)
- end
- catch
- _:_ ->
- ?ALERT_REC(?FATAL, ?BAD_RECORD_MAC, decryption_failed)
- end.
-
%%--------------------------------------------------------------------
-spec suites(ssl_record:ssl_version()) -> [ssl_cipher_format:cipher_suite()].
%%
@@ -982,7 +932,7 @@ filter_suites_pubkey(ec, Ciphers, _, OtpCert) ->
ec_ecdhe_suites(Ciphers)),
filter_keyuse_suites(keyAgreement, Uses, CiphersSuites, ec_ecdh_suites(Ciphers)).
-filter_suites_signature(rsa, Ciphers, {3, N}) when N >= 3 ->
+filter_suites_signature(_, Ciphers, {3, N}) when N >= 3 ->
Ciphers;
filter_suites_signature(rsa, Ciphers, Version) ->
(Ciphers -- ecdsa_signed_suites(Ciphers, Version)) -- dsa_signed_suites(Ciphers, Version);
diff --git a/lib/ssl/src/ssl_cipher.hrl b/lib/ssl/src/ssl_cipher.hrl
index ba6a98b92a..2371e8bd32 100644
--- a/lib/ssl/src/ssl_cipher.hrl
+++ b/lib/ssl/src/ssl_cipher.hrl
@@ -48,7 +48,8 @@
iv,
key,
state,
- nonce
+ nonce,
+ tag_len
}).
%%% TLS_NULL_WITH_NULL_NULL is specified and is the initial state of a
diff --git a/lib/ssl/src/ssl_connection.erl b/lib/ssl/src/ssl_connection.erl
index 9f876add6c..4b406b4c1e 100644
--- a/lib/ssl/src/ssl_connection.erl
+++ b/lib/ssl/src/ssl_connection.erl
@@ -51,8 +51,8 @@
%% Alert and close handling
-export([handle_own_alert/4, handle_alert/3,
- handle_normal_shutdown/3, stop/2, stop_and_reply/3
- ]).
+ handle_normal_shutdown/3, stop/2, stop_and_reply/3,
+ handle_trusted_certs_db/1]).
%% Data handling
-export([read_application_data/2, internal_renegotiation/2]).
@@ -403,9 +403,8 @@ handle_alert(#alert{level = ?WARNING, description = ?NO_RENEGOTIATION} = Alert,
log_alert(SslOpts#ssl_options.log_alert, Role,
Connection:protocol_name(), StateName, Alert#alert{role = opposite_role(Role)}),
gen_statem:reply(From, {error, renegotiation_rejected}),
- State1 = Connection:reinit_handshake_data(State0),
- {Record, State} = Connection:next_record(State1#state{renegotiation = undefined}),
- Connection:next_event(connection, Record, State);
+ State = Connection:reinit_handshake_data(State0),
+ Connection:next_event(connection, no_record, State#state{renegotiation = undefined});
handle_alert(#alert{level = ?WARNING, description = ?NO_RENEGOTIATION} = Alert, StateName,
#state{role = Role,
@@ -414,22 +413,35 @@ handle_alert(#alert{level = ?WARNING, description = ?NO_RENEGOTIATION} = Alert,
log_alert(SslOpts#ssl_options.log_alert, Role,
Connection:protocol_name(), StateName, Alert#alert{role = opposite_role(Role)}),
gen_statem:reply(From, {error, renegotiation_rejected}),
- {Record, State1} = Connection:next_record(State0),
%% Go back to connection!
- State = Connection:reinit(State1#state{renegotiation = undefined}),
- Connection:next_event(connection, Record, State);
+ State = Connection:reinit(State0#state{renegotiation = undefined}),
+ Connection:next_event(connection, no_record, State);
%% Gracefully log and ignore all other warning alerts
handle_alert(#alert{level = ?WARNING} = Alert, StateName,
- #state{ssl_options = SslOpts, protocol_cb = Connection, role = Role} = State0) ->
+ #state{ssl_options = SslOpts, protocol_cb = Connection, role = Role} = State) ->
log_alert(SslOpts#ssl_options.log_alert, Role,
Connection:protocol_name(), StateName, Alert#alert{role = opposite_role(Role)}),
- {Record, State} = Connection:next_record(State0),
- Connection:next_event(StateName, Record, State).
+ Connection:next_event(StateName, no_record, State).
%%====================================================================
%% Data handling
%%====================================================================
+
+passive_receive(State0 = #state{user_data_buffer = Buffer}, StateName, Connection) ->
+ case Buffer of
+ <<>> ->
+ {Record, State} = Connection:next_record(State0),
+ Connection:next_event(StateName, Record, State);
+ _ ->
+ case read_application_data(<<>>, State0) of
+ {stop, _, _} = ShutdownError ->
+ ShutdownError;
+ {Record, State} ->
+ Connection:next_event(StateName, Record, State)
+ end
+ end.
+
read_application_data(Data, #state{user_application = {_Mon, Pid},
socket = Socket,
protocol_cb = Connection,
@@ -472,28 +484,26 @@ read_application_data(Data, #state{user_application = {_Mon, Pid},
Buffer =:= <<>> ->
%% Passive mode, wait for active once or recv
%% Active and empty, get more data
- Connection:next_record_if_active(State);
+ {no_record, State};
true -> %% We have more data
read_application_data(<<>>, State)
end
end;
{more, Buffer} -> % no reply, we need more data
- Connection:next_record(State0#state{user_data_buffer = Buffer});
+ {no_record, State0#state{user_data_buffer = Buffer}};
{passive, Buffer} ->
- Connection:next_record_if_active(State0#state{user_data_buffer = Buffer});
+ {no_record, State0#state{user_data_buffer = Buffer}};
{error,_Reason} -> %% Invalid packet in packet mode
deliver_packet_error(Connection:pids(State0),
Transport, Socket, SOpts, Buffer1, Pid, RecvFrom, Tracker, Connection),
stop(normal, State0)
end.
-dist_app_data(ClientData, #state{protocol_cb = Connection,
- erl_dist_data = #{dist_handle := undefined,
+dist_app_data(ClientData, #state{erl_dist_data = #{dist_handle := undefined,
dist_buffer := DistBuff} = DistData} = State) ->
- Connection:next_record_if_active(State#state{erl_dist_data = DistData#{dist_buffer => [ClientData, DistBuff]}});
+ {no_record, State#state{erl_dist_data = DistData#{dist_buffer => [ClientData, DistBuff]}}};
dist_app_data(ClientData, #state{erl_dist_data = #{dist_handle := DHandle,
dist_buffer := DistBuff} = ErlDistData,
- protocol_cb = Connection,
user_data_buffer = Buffer,
socket_options = SOpts} = State) ->
Data = merge_dist_data(DistBuff, ClientData),
@@ -502,7 +512,7 @@ dist_app_data(ClientData, #state{erl_dist_data = #{dist_handle := DHandle,
Buffer =:= <<>> ->
%% Passive mode, wait for active once or recv
%% Active and empty, get more data
- Connection:next_record_if_active(State#state{erl_dist_data = ErlDistData#{dist_buffer => <<>>}});
+ {no_record, State#state{erl_dist_data = ErlDistData#{dist_buffer => <<>>}}};
_ -> %% We have more data
read_application_data(<<>>, State)
catch error:_ ->
@@ -606,9 +616,7 @@ ssl_config(Opts, Role, State0, Type) ->
init({call, From}, {start, Timeout}, State0, Connection) ->
Timer = start_or_recv_cancel_timer(Timeout, From),
- {Record, State} = Connection:next_record(State0#state{start_or_recv_from = From,
- timer = Timer}),
- Connection:next_event(hello, Record, State);
+ Connection:next_event(hello, no_record, State0#state{start_or_recv_from = From, timer = Timer});
init({call, From}, {start, {Opts, EmOpts}, Timeout},
#state{role = Role, ssl_options = OrigSSLOptions,
socket_options = SockOpts} = State0, Connection) ->
@@ -621,8 +629,10 @@ init({call, From}, {start, {Opts, EmOpts}, Timeout},
catch throw:Error ->
stop_and_reply(normal, {reply, From, {error, Error}}, State0)
end;
-init({call, From}, Msg, State, Connection) ->
+init({call, From}, {new_user, _} = Msg, State, Connection) ->
handle_call(Msg, From, ?FUNCTION_NAME, State, Connection);
+init({call, From}, _Msg, _State, _Connection) ->
+ {keep_state_and_data, [{reply, From, {error, notsup_on_transport_accept_socket}}]};
init(_Type, _Event, _State, _Connection) ->
{keep_state_and_data, [postpone]}.
@@ -719,20 +729,19 @@ abbreviated(internal, #finished{verify_data = Data} = Finished,
%% only allowed to send next_protocol message after change cipher spec
%% & before finished message and it is not allowed during renegotiation
abbreviated(internal, #next_protocol{selected_protocol = SelectedProtocol},
- #state{role = server, expecting_next_protocol_negotiation = true} = State0,
+ #state{role = server, expecting_next_protocol_negotiation = true} = State,
Connection) ->
- {Record, State} =
- Connection:next_record(State0#state{negotiated_protocol = SelectedProtocol}),
- Connection:next_event(?FUNCTION_NAME, Record,
- State#state{expecting_next_protocol_negotiation = false});
+ Connection:next_event(?FUNCTION_NAME, no_record,
+ State#state{negotiated_protocol = SelectedProtocol,
+ expecting_next_protocol_negotiation = false});
abbreviated(internal,
#change_cipher_spec{type = <<1>>},
- #state{connection_states = ConnectionStates0} = State0, Connection) ->
+ #state{connection_states = ConnectionStates0} = State, Connection) ->
ConnectionStates1 =
ssl_record:activate_pending_connection_state(ConnectionStates0, read, Connection),
- {Record, State} = Connection:next_record(State0#state{connection_states =
- ConnectionStates1}),
- Connection:next_event(?FUNCTION_NAME, Record, State#state{expecting_finished = true});
+ Connection:next_event(?FUNCTION_NAME, no_record, State#state{connection_states =
+ ConnectionStates1,
+ expecting_finished = true});
abbreviated(info, Msg, State, _) ->
handle_info(Msg, ?FUNCTION_NAME, State);
abbreviated(Type, Msg, State, Connection) ->
@@ -761,9 +770,7 @@ certify(internal, #certificate{asn1_certificates = []},
ssl_options = #ssl_options{verify = verify_peer,
fail_if_no_peer_cert = false}} =
State0, Connection) ->
- {Record, State} =
- Connection:next_record(State0#state{client_certificate_requested = false}),
- Connection:next_event(?FUNCTION_NAME, Record, State);
+ Connection:next_event(?FUNCTION_NAME, no_record, State0#state{client_certificate_requested = false});
certify(internal, #certificate{},
#state{role = server,
negotiated_version = Version,
@@ -831,24 +838,23 @@ certify(internal, #certificate_request{},
Version, ?FUNCTION_NAME, State);
certify(internal, #certificate_request{},
#state{session = #session{own_certificate = undefined},
- role = client} = State0, Connection) ->
+ role = client} = State, Connection) ->
%% The client does not have a certificate and will send an empty reply, the server may fail
%% or accept the connection by its own preference. No signature algorihms needed as there is
%% no certificate to verify.
- {Record, State} = Connection:next_record(State0),
- Connection:next_event(?FUNCTION_NAME, Record, State#state{client_certificate_requested = true});
+ Connection:next_event(?FUNCTION_NAME, no_record, State#state{client_certificate_requested = true});
certify(internal, #certificate_request{} = CertRequest,
#state{session = #session{own_certificate = Cert},
role = client,
ssl_options = #ssl_options{signature_algs = SupportedHashSigns},
- negotiated_version = Version} = State0, Connection) ->
+ negotiated_version = Version} = State, Connection) ->
case ssl_handshake:select_hashsign(CertRequest, Cert, SupportedHashSigns, ssl:tls_version(Version)) of
#alert {} = Alert ->
- handle_own_alert(Alert, Version, ?FUNCTION_NAME, State0);
- NegotiatedHashSign ->
- {Record, State} = Connection:next_record(State0#state{client_certificate_requested = true}),
- Connection:next_event(?FUNCTION_NAME, Record,
- State#state{cert_hashsign_algorithm = NegotiatedHashSign})
+ handle_own_alert(Alert, Version, ?FUNCTION_NAME, State);
+ NegotiatedHashSign ->
+ Connection:next_event(?FUNCTION_NAME, no_record,
+ State#state{client_certificate_requested = true,
+ cert_hashsign_algorithm = NegotiatedHashSign})
end;
%% PSK and RSA_PSK might bypass the Server-Key-Exchange
certify(internal, #server_hello_done{},
@@ -957,7 +963,7 @@ cipher(internal, #certificate_verify{signature = Signature,
negotiated_version = Version,
session = #session{master_secret = MasterSecret},
tls_handshake_history = Handshake
- } = State0, Connection) ->
+ } = State, Connection) ->
TLSVersion = ssl:tls_version(Version),
%% Use negotiated value if TLS-1.2 otherwhise return default
@@ -965,11 +971,10 @@ cipher(internal, #certificate_verify{signature = Signature,
case ssl_handshake:certificate_verify(Signature, PublicKeyInfo,
TLSVersion, HashSign, MasterSecret, Handshake) of
valid ->
- {Record, State} = Connection:next_record(State0),
- Connection:next_event(?FUNCTION_NAME, Record,
+ Connection:next_event(?FUNCTION_NAME, no_record,
State#state{cert_hashsign_algorithm = HashSign});
#alert{} = Alert ->
- handle_own_alert(Alert, Version, ?FUNCTION_NAME, State0)
+ handle_own_alert(Alert, Version, ?FUNCTION_NAME, State)
end;
%% client must send a next protocol message if we are expecting it
cipher(internal, #finished{},
@@ -1003,18 +1008,18 @@ cipher(internal, #finished{verify_data = Data} = Finished,
%% & before finished message and it is not allowed during renegotiation
cipher(internal, #next_protocol{selected_protocol = SelectedProtocol},
#state{role = server, expecting_next_protocol_negotiation = true,
- expecting_finished = true} = State0, Connection) ->
- {Record, State} =
- Connection:next_record(State0#state{negotiated_protocol = SelectedProtocol}),
- Connection:next_event(?FUNCTION_NAME, Record,
- State#state{expecting_next_protocol_negotiation = false});
+ expecting_finished = true} = State, Connection) ->
+ Connection:next_event(?FUNCTION_NAME, no_record,
+ State#state{expecting_next_protocol_negotiation = false,
+ negotiated_protocol = SelectedProtocol
+ });
cipher(internal, #change_cipher_spec{type = <<1>>}, #state{connection_states = ConnectionStates0} =
- State0, Connection) ->
- ConnectionStates1 =
+ State, Connection) ->
+ ConnectionStates =
ssl_record:activate_pending_connection_state(ConnectionStates0, read, Connection),
- {Record, State} = Connection:next_record(State0#state{connection_states =
- ConnectionStates1}),
- Connection:next_event(?FUNCTION_NAME, Record, State#state{expecting_finished = true});
+ Connection:next_event(?FUNCTION_NAME, no_record, State#state{connection_states =
+ ConnectionStates,
+ expecting_finished = true});
cipher(Type, Msg, State, Connection) ->
handle_common_event(Type, Msg, ?FUNCTION_NAME, State, Connection).
@@ -1027,9 +1032,9 @@ connection({call, RecvFrom}, {recv, N, Timeout},
#state{protocol_cb = Connection, socket_options =
#socket_options{active = false}} = State0, Connection) ->
Timer = start_or_recv_cancel_timer(Timeout, RecvFrom),
- Connection:passive_receive(State0#state{bytes_to_read = N,
- start_or_recv_from = RecvFrom,
- timer = Timer}, ?FUNCTION_NAME);
+ passive_receive(State0#state{bytes_to_read = N,
+ start_or_recv_from = RecvFrom,
+ timer = Timer}, ?FUNCTION_NAME, Connection);
connection({call, From}, renegotiate, #state{protocol_cb = Connection} = State,
Connection) ->
Connection:renegotiate(State#state{renegotiation = {true, From}}, []);
@@ -1071,7 +1076,7 @@ connection(cast, {dist_handshake_complete, DHandle},
connection(info, Msg, State, _) ->
handle_info(Msg, ?FUNCTION_NAME, State);
connection(internal, {recv, _}, State, Connection) ->
- Connection:passive_receive(State, ?FUNCTION_NAME);
+ passive_receive(State, ?FUNCTION_NAME, Connection);
connection(Type, Msg, State, Connection) ->
handle_common_event(Type, Msg, ?FUNCTION_NAME, State, Connection).
@@ -1090,6 +1095,12 @@ downgrade(internal, #alert{description = ?CLOSE_NOTIFY},
downgrade(timeout, downgrade, #state{downgrade = {_, From}} = State, _) ->
gen_statem:reply(From, {error, timeout}),
stop(normal, State);
+downgrade(
+ info, {CloseTag, Socket},
+ #state{socket = Socket, close_tag = CloseTag, downgrade = {_, From}} =
+ State, _) ->
+ gen_statem:reply(From, {error, CloseTag}),
+ stop(normal, State);
downgrade(Type, Event, State, Connection) ->
handle_common_event(Type, Event, ?FUNCTION_NAME, State, Connection).
@@ -1124,15 +1135,15 @@ handle_common_event(internal, {application_data, Data}, StateName, State0, Conne
case read_application_data(Data, State0) of
{stop, _, _} = Stop->
Stop;
- {Record, State} ->
- case Connection:next_event(StateName, Record, State) of
- {next_state, StateName, State} ->
- hibernate_after(StateName, State, []);
- {next_state, StateName, State, Actions} ->
- hibernate_after(StateName, State, Actions);
- {stop, _, _} = Stop ->
- Stop
- end
+ {Record, State1} ->
+ case Connection:next_event(StateName, Record, State1) of
+ {next_state, StateName, State} ->
+ hibernate_after(StateName, State, []);
+ {next_state, StateName, State, Actions} ->
+ hibernate_after(StateName, State, Actions);
+ {stop, _, _} = Stop ->
+ Stop
+ end
end;
handle_common_event(internal, #change_cipher_spec{type = <<1>>}, StateName,
#state{negotiated_version = Version} = State, _) ->
@@ -1162,23 +1173,31 @@ handle_call({close, _} = Close, From, StateName, State, _Connection) ->
stop_and_reply(
{shutdown, normal},
{reply, From, Result}, State#state{terminated = true});
-handle_call({shutdown, How0}, From, StateName,
+handle_call({shutdown, read_write = How}, From, StateName,
#state{transport_cb = Transport,
socket = Socket} = State, _) ->
- case How0 of
- How when How == write; How == both ->
- send_alert(?ALERT_REC(?WARNING, ?CLOSE_NOTIFY),
- StateName, State);
- _ ->
- ok
- end,
+ try send_alert(?ALERT_REC(?WARNING, ?CLOSE_NOTIFY),
+ StateName, State) of
+ _ ->
+ case Transport:shutdown(Socket, How) of
+ ok ->
+ {next_state, StateName, State#state{terminated = true}, [{reply, From, ok}]};
+ Error ->
+ {stop, StateName, State#state{terminated = true}, [{reply, From, Error}]}
+ end
+ catch
+ throw:Return ->
+ Return
+ end;
+handle_call({shutdown, How0}, From, StateName,
+ #state{transport_cb = Transport,
+ socket = Socket} = State, _) ->
case Transport:shutdown(Socket, How0) of
ok ->
- {keep_state_and_data, [{reply, From, ok}]};
+ {next_state, StateName, State, [{reply, From, ok}]};
Error ->
- gen_statem:reply(From, {error, Error}),
- stop(normal, State)
+ {stop, StateName, State, [{reply, From, Error}]}
end;
handle_call({recv, _N, _Timeout}, From, _,
#state{socket_options =
@@ -1340,15 +1359,15 @@ terminate(downgrade = Reason, connection, #state{protocol_cb = Connection,
handle_trusted_certs_db(State),
Connection:close(Reason, Socket, Transport, undefined, undefined);
terminate(Reason, connection, #state{protocol_cb = Connection,
- connection_states = ConnectionStates,
- ssl_options = #ssl_options{padding_check = Check},
- transport_cb = Transport, socket = Socket
- } = State) ->
+ connection_states = ConnectionStates,
+ ssl_options = #ssl_options{padding_check = Check},
+ transport_cb = Transport, socket = Socket
+ } = State) ->
handle_trusted_certs_db(State),
Alert = terminate_alert(Reason),
%% Send the termination ALERT if possible
- catch (ok = Connection:send_alert_in_connection(Alert, State)),
- Connection:close(Reason, Socket, Transport, ConnectionStates, Check);
+ catch (Connection:send_alert_in_connection(Alert, State)),
+ Connection:close({timeout, ?DEFAULT_TIMEOUT}, Socket, Transport, ConnectionStates, Check);
terminate(Reason, _StateName, #state{transport_cb = Transport, protocol_cb = Connection,
socket = Socket
} = State) ->
@@ -1445,13 +1464,12 @@ new_server_hello(#server_hello{cipher_suite = CipherSuite,
negotiated_version = Version} = State0, Connection) ->
try server_certify_and_key_exchange(State0, Connection) of
#state{} = State1 ->
- {State2, Actions} = server_hello_done(State1, Connection),
+ {State, Actions} = server_hello_done(State1, Connection),
Session =
Session0#session{session_id = SessionId,
cipher_suite = CipherSuite,
compression_method = Compression},
- {Record, State} = Connection:next_record(State2#state{session = Session}),
- Connection:next_event(certify, Record, State, Actions)
+ Connection:next_event(certify, no_record, State#state{session = Session}, Actions)
catch
#alert{} = Alert ->
handle_own_alert(Alert, Version, hello, State0)
@@ -1466,10 +1484,9 @@ resumed_server_hello(#state{session = Session,
{_, ConnectionStates1} ->
State1 = State0#state{connection_states = ConnectionStates1,
session = Session},
- {State2, Actions} =
+ {State, Actions} =
finalize_handshake(State1, abbreviated, Connection),
- {Record, State} = Connection:next_record(State2),
- Connection:next_event(abbreviated, Record, State, Actions);
+ Connection:next_event(abbreviated, no_record, State, Actions);
#alert{} = Alert ->
handle_own_alert(Alert, Version, hello, State0)
end.
@@ -1491,10 +1508,8 @@ handle_peer_cert(Role, PeerCert, PublicKeyInfo,
Session#session{peer_certificate = PeerCert},
public_key_info = PublicKeyInfo},
#{key_exchange := KeyAlgorithm} = ssl_cipher_format:suite_definition(CipherSuite),
- State2 = handle_peer_cert_key(Role, PeerCert, PublicKeyInfo, KeyAlgorithm, State1),
-
- {Record, State} = Connection:next_record(State2),
- Connection:next_event(certify, Record, State).
+ State = handle_peer_cert_key(Role, PeerCert, PublicKeyInfo, KeyAlgorithm, State1),
+ Connection:next_event(certify, no_record, State).
handle_peer_cert_key(client, _,
{?'id-ecPublicKey', #'ECPoint'{point = _ECPoint} = PublicKey,
@@ -1552,11 +1567,10 @@ client_certify_and_key_exchange(#state{negotiated_version = Version} =
try do_client_certify_and_key_exchange(State0, Connection) of
State1 = #state{} ->
{State2, Actions} = finalize_handshake(State1, certify, Connection),
- State3 = State2#state{
- %% Reinitialize
- client_certificate_requested = false},
- {Record, State} = Connection:next_record(State3),
- Connection:next_event(cipher, Record, State, Actions)
+ State = State2#state{
+ %% Reinitialize
+ client_certificate_requested = false},
+ Connection:next_event(cipher, no_record, State, Actions)
catch
throw:#alert{} = Alert ->
handle_own_alert(Alert, Version, certify, State0)
@@ -1965,10 +1979,9 @@ calculate_master_secret(PremasterSecret,
ConnectionStates0, server) of
{MasterSecret, ConnectionStates} ->
Session = Session0#session{master_secret = MasterSecret},
- State1 = State0#state{connection_states = ConnectionStates,
+ State = State0#state{connection_states = ConnectionStates,
session = Session},
- {Record, State} = Connection:next_record(State1),
- Connection:next_event(Next, Record, State);
+ Connection:next_event(Next, no_record, State);
#alert{} = Alert ->
handle_own_alert(Alert, Version, certify, State0)
end.
@@ -2041,10 +2054,9 @@ calculate_secret(#server_ecdh_params{curve = ECCurve, public = ECServerPubKey},
calculate_secret(#server_psk_params{
hint = IdentityHint},
- State0, Connection) ->
+ State, Connection) ->
%% store for later use
- {Record, State} = Connection:next_record(State0#state{psk_identity = IdentityHint}),
- Connection:next_event(certify, Record, State);
+ Connection:next_event(certify, no_record, State#state{psk_identity = IdentityHint});
calculate_secret(#server_dhe_psk_params{
dh_params = #server_dh_params{dh_p = Prime, dh_g = Base}} = ServerKey,
@@ -2337,9 +2349,8 @@ prepare_connection(#state{renegotiation = Renegotiate,
start_or_recv_from = RecvFrom} = State0, Connection)
when Renegotiate =/= {false, first},
RecvFrom =/= undefined ->
- State1 = Connection:reinit(State0),
- {Record, State} = Connection:next_record(State1),
- {Record, ack_connection(State)};
+ State = Connection:reinit(State0),
+ {no_record, ack_connection(State)};
prepare_connection(State0, Connection) ->
State = Connection:reinit(State0),
{no_record, ack_connection(State)}.
@@ -2393,26 +2404,23 @@ handle_new_session(NewId, CipherSuite, Compression,
Session = Session0#session{session_id = NewId,
cipher_suite = CipherSuite,
compression_method = Compression},
- {Record, State} = Connection:next_record(State0#state{session = Session}),
- Connection:next_event(certify, Record, State).
+ Connection:next_event(certify, no_record, State0#state{session = Session}).
handle_resumed_session(SessId, #state{connection_states = ConnectionStates0,
negotiated_version = Version,
host = Host, port = Port,
protocol_cb = Connection,
session_cache = Cache,
- session_cache_cb = CacheCb} = State0) ->
+ session_cache_cb = CacheCb} = State) ->
Session = CacheCb:lookup(Cache, {{Host, Port}, SessId}),
case ssl_handshake:master_secret(ssl:tls_version(Version), Session,
ConnectionStates0, client) of
{_, ConnectionStates} ->
- {Record, State} =
- Connection:next_record(State0#state{
- connection_states = ConnectionStates,
- session = Session}),
- Connection:next_event(abbreviated, Record, State);
+ Connection:next_event(abbreviated, no_record, State#state{
+ connection_states = ConnectionStates,
+ session = Session});
#alert{} = Alert ->
- handle_own_alert(Alert, Version, hello, State0)
+ handle_own_alert(Alert, Version, hello, State)
end.
make_premaster_secret({MajVer, MinVer}, rsa) ->
@@ -2462,10 +2470,7 @@ handle_active_option(false, connection = StateName, To, Reply, State) ->
handle_active_option(_, connection = StateName0, To, Reply, #state{protocol_cb = Connection,
user_data_buffer = <<>>} = State0) ->
- %% Need data, set active once
- {Record, State1} = Connection:next_record_if_active(State0),
- %% Note: Renogotiation may cause StateName0 =/= StateName
- case Connection:next_event(StateName0, Record, State1) of
+ case Connection:next_event(StateName0, no_record, State0) of
{next_state, StateName, State} ->
hibernate_after(StateName, State, [{reply, To, Reply}]);
{next_state, StateName, State, Actions} ->
diff --git a/lib/ssl/src/ssl_handshake.erl b/lib/ssl/src/ssl_handshake.erl
index dc89fb0029..14df1d2e02 100644
--- a/lib/ssl/src/ssl_handshake.erl
+++ b/lib/ssl/src/ssl_handshake.erl
@@ -925,6 +925,13 @@ premaster_secret(EncSecret, #'RSAPrivateKey'{} = RSAPrivateKey) ->
catch
_:_ ->
throw(?ALERT_REC(?FATAL, ?DECRYPT_ERROR))
+ end;
+premaster_secret(EncSecret, #{algorithm := rsa} = Engine) ->
+ try crypto:private_decrypt(rsa, EncSecret, maps:remove(algorithm, Engine),
+ [{rsa_pad, rsa_pkcs1_padding}])
+ catch
+ _:_ ->
+ throw(?ALERT_REC(?FATAL, ?DECRYPT_ERROR))
end.
%%====================================================================
%% Extensions handling
diff --git a/lib/ssl/src/ssl_internal.hrl b/lib/ssl/src/ssl_internal.hrl
index fd246e2550..63e751440a 100644
--- a/lib/ssl/src/ssl_internal.hrl
+++ b/lib/ssl/src/ssl_internal.hrl
@@ -60,6 +60,7 @@
-define(CDR_MAGIC, "GIOP").
-define(CDR_HDR_SIZE, 12).
+-define(INTERNAL_ACTIVE_N, 100).
-define(DEFAULT_TIMEOUT, 5000).
-define(NO_DIST_POINT, "http://dummy/no_distribution_point").
diff --git a/lib/ssl/src/ssl_pem_cache.erl b/lib/ssl/src/ssl_pem_cache.erl
index b7d23ef01e..41bca2f7b5 100644
--- a/lib/ssl/src/ssl_pem_cache.erl
+++ b/lib/ssl/src/ssl_pem_cache.erl
@@ -45,7 +45,7 @@
-record(state, {
pem_cache,
- last_pem_check :: erlang:timestamp(),
+ last_pem_check :: integer(),
clear :: integer()
}).
@@ -134,8 +134,9 @@ init([Name]) ->
PemCache = ssl_pkix_db:create_pem_cache(Name),
Interval = pem_check_interval(),
erlang:send_after(Interval, self(), clear_pem_cache),
+ erlang:system_time(second),
{ok, #state{pem_cache = PemCache,
- last_pem_check = os:timestamp(),
+ last_pem_check = erlang:convert_time_unit(os:system_time(), native, second),
clear = Interval
}}.
@@ -183,7 +184,7 @@ handle_cast({invalidate_pem, File}, #state{pem_cache = Db} = State) ->
handle_info(clear_pem_cache, #state{pem_cache = PemCache,
clear = Interval,
last_pem_check = CheckPoint} = State) ->
- NewCheckPoint = os:timestamp(),
+ NewCheckPoint = erlang:convert_time_unit(os:system_time(), native, second),
start_pem_cache_validator(PemCache, CheckPoint),
erlang:send_after(Interval, self(), clear_pem_cache),
{noreply, State#state{last_pem_check = NewCheckPoint}};
@@ -229,24 +230,14 @@ init_pem_cache_validator([CacheName, PemCache, CheckPoint]) ->
CheckPoint, PemCache).
pem_cache_validate({File, _}, CheckPoint) ->
- case file:read_file_info(File, []) of
- {ok, #file_info{mtime = Time}} ->
- case is_before_checkpoint(Time, CheckPoint) of
- true ->
- ok;
- false ->
- invalidate_pem(File)
- end;
+ case file:read_file_info(File, [{time, posix}]) of
+ {ok, #file_info{mtime = Time}} when Time < CheckPoint ->
+ ok;
_ ->
invalidate_pem(File)
end,
CheckPoint.
-is_before_checkpoint(Time, CheckPoint) ->
- calendar:datetime_to_gregorian_seconds(
- calendar:now_to_datetime(CheckPoint)) -
- calendar:datetime_to_gregorian_seconds(Time) > 0.
-
pem_check_interval() ->
case application:get_env(ssl, ssl_pem_cache_clean) of
{ok, Interval} when is_integer(Interval) ->
diff --git a/lib/ssl/src/ssl_record.erl b/lib/ssl/src/ssl_record.erl
index 659e1485ac..b9d1320ef3 100644
--- a/lib/ssl/src/ssl_record.erl
+++ b/lib/ssl/src/ssl_record.erl
@@ -45,7 +45,7 @@
-export([compress/3, uncompress/3, compressions/0]).
%% Payload encryption/decryption
--export([cipher/4, decipher/4, cipher_aead/4, is_correct_mac/2]).
+-export([cipher/4, decipher/4, cipher_aead/4, decipher_aead/5, is_correct_mac/2, nonce_seed/3]).
-export_type([ssl_version/0, ssl_atom_version/0, connection_states/0, connection_state/0]).
@@ -306,22 +306,20 @@ cipher(Version, Fragment,
{CipherFragment, CipherS1} =
ssl_cipher:cipher(BulkCipherAlgo, CipherS0, MacHash, Fragment, Version),
{CipherFragment, WriteState0#{cipher_state => CipherS1}}.
-%% %%--------------------------------------------------------------------
-%% -spec cipher_aead(ssl_version(), iodata(), connection_state(), MacHash::binary()) ->
-%% {CipherFragment::binary(), connection_state()}.
-%% %%
-%% %% Description: Payload encryption
+%%--------------------------------------------------------------------
+-spec cipher_aead(ssl_version(), iodata(), connection_state(), AAD::binary()) ->
+ {CipherFragment::binary(), connection_state()}.
+
+%% Description: Payload encryption
%% %%--------------------------------------------------------------------
cipher_aead(Version, Fragment,
#{cipher_state := CipherS0,
- sequence_number := SeqNo,
security_parameters :=
#security_parameters{bulk_cipher_algorithm =
BulkCipherAlgo}
} = WriteState0, AAD) ->
-
{CipherFragment, CipherS1} =
- ssl_cipher:cipher_aead(BulkCipherAlgo, CipherS0, SeqNo, AAD, Fragment, Version),
+ cipher_aead(BulkCipherAlgo, CipherS0, AAD, Fragment, Version),
{CipherFragment, WriteState0#{cipher_state => CipherS1}}.
%%--------------------------------------------------------------------
@@ -344,10 +342,39 @@ decipher(Version, CipherFragment,
#alert{} = Alert ->
Alert
end.
+%%--------------------------------------------------------------------
+-spec decipher_aead(ssl_cipher:cipher_enum(), #cipher_state{},
+ binary(), binary(), ssl_record:ssl_version()) ->
+ {binary(), #cipher_state{}} | #alert{}.
+%%
+%% Description: Decrypts the data and checks the associated data (AAD) MAC using
+%% cipher described by cipher_enum() and updating the cipher state.
+%% Use for suites that use authenticated encryption with associated data (AEAD)
+%%-------------------------------------------------------------------
+decipher_aead(Type, #cipher_state{key = Key} = CipherState, AAD0, CipherFragment, _) ->
+ try
+ Nonce = decrypt_nonce(Type, CipherState, CipherFragment),
+ {AAD, CipherText, CipherTag} = aead_ciphertext_split(Type, CipherState, CipherFragment, AAD0),
+ case ssl_cipher:aead_decrypt(Type, Key, Nonce, CipherText, CipherTag, AAD) of
+ Content when is_binary(Content) ->
+ {Content, CipherState};
+ _ ->
+ ?ALERT_REC(?FATAL, ?BAD_RECORD_MAC, decryption_failed)
+ end
+ catch
+ _:_ ->
+ ?ALERT_REC(?FATAL, ?BAD_RECORD_MAC, decryption_failed)
+ end.
+
+nonce_seed(?CHACHA20_POLY1305, Seed, CipherState) ->
+ ssl_cipher:nonce_seed(Seed, CipherState);
+nonce_seed(_,_, CipherState) ->
+ CipherState.
%%--------------------------------------------------------------------
%%% Internal functions
%%--------------------------------------------------------------------
+
empty_connection_state(ConnectionEnd, BeastMitigation) ->
SecParams = empty_security_params(ConnectionEnd),
#{security_parameters => SecParams,
@@ -400,3 +427,37 @@ initial_security_params(ConnectionEnd) ->
compression_algorithm = ?NULL},
ssl_cipher:security_parameters(?TLS_NULL_WITH_NULL_NULL, SecParams).
+cipher_aead(?CHACHA20_POLY1305 = Type, #cipher_state{key=Key} = CipherState, AAD0, Fragment, _Version) ->
+ AAD = end_additional_data(AAD0, erlang:iolist_size(Fragment)),
+ Nonce = encrypt_nonce(Type, CipherState),
+ {Content, CipherTag} = ssl_cipher:aead_encrypt(Type, Key, Nonce, Fragment, AAD),
+ {<<Content/binary, CipherTag/binary>>, CipherState};
+cipher_aead(Type, #cipher_state{key=Key, nonce = ExplicitNonce} = CipherState, AAD0, Fragment, _Version) ->
+ AAD = end_additional_data(AAD0, erlang:iolist_size(Fragment)),
+ Nonce = encrypt_nonce(Type, CipherState),
+ {Content, CipherTag} = ssl_cipher:aead_encrypt(Type, Key, Nonce, Fragment, AAD),
+ {<<ExplicitNonce:64/integer, Content/binary, CipherTag/binary>>, CipherState#cipher_state{nonce = ExplicitNonce + 1}}.
+
+encrypt_nonce(?CHACHA20_POLY1305, #cipher_state{nonce = Nonce, iv = IV}) ->
+ crypto:exor(<<?UINT32(0), Nonce/binary>>, IV);
+encrypt_nonce(?AES_GCM, #cipher_state{iv = IV, nonce = ExplicitNonce}) ->
+ <<Salt:4/bytes, _/binary>> = IV,
+ <<Salt/binary, ExplicitNonce:64/integer>>.
+
+decrypt_nonce(?CHACHA20_POLY1305, #cipher_state{nonce = Nonce, iv = IV}, _) ->
+ crypto:exor(<<Nonce:96/unsigned-big-integer>>, IV);
+decrypt_nonce(?AES_GCM, #cipher_state{iv = <<Salt:4/bytes, _/binary>>}, <<ExplicitNonce:8/bytes, _/binary>>) ->
+ <<Salt/binary, ExplicitNonce/binary>>.
+
+aead_ciphertext_split(?CHACHA20_POLY1305, #cipher_state{tag_len = Len}, CipherTextFragment, AAD) ->
+ CipherLen = size(CipherTextFragment) - Len,
+ <<CipherText:CipherLen/bytes, CipherTag:Len/bytes>> = CipherTextFragment,
+ {end_additional_data(AAD, CipherLen), CipherText, CipherTag};
+aead_ciphertext_split(?AES_GCM, #cipher_state{tag_len = Len}, CipherTextFragment, AAD) ->
+ CipherLen = size(CipherTextFragment) - (Len + 8), %% 8 is length of explicit Nonce
+ << _:8/bytes, CipherText:CipherLen/bytes, CipherTag:Len/bytes>> = CipherTextFragment,
+ {end_additional_data(AAD, CipherLen), CipherText, CipherTag}.
+
+end_additional_data(AAD, Len) ->
+ <<AAD/binary, ?UINT16(Len)>>.
+
diff --git a/lib/ssl/src/tls_connection.erl b/lib/ssl/src/tls_connection.erl
index adb4f6d9ea..0f986b5e21 100644
--- a/lib/ssl/src/tls_connection.erl
+++ b/lib/ssl/src/tls_connection.erl
@@ -46,7 +46,7 @@
-export([start_fsm/8, start_link/8, init/1, pids/1]).
%% State transition handling
--export([next_record/1, next_event/3, next_event/4,
+-export([next_event/3, next_event/4,
handle_common_event/4]).
%% Handshake handling
@@ -61,7 +61,7 @@
encode_alert/3, close/5, protocol_name/0]).
%% Data handling
--export([encode_data/3, passive_receive/2, next_record_if_active/1,
+-export([encode_data/3, next_record/1,
send/3, socket/5, setopts/3, getopts/3]).
%% gen_statem state functions
@@ -161,30 +161,30 @@ next_record(#state{protocol_buffers =
{Alert, State}
end;
next_record(#state{protocol_buffers = #protocol_buffers{tls_packets = [], tls_cipher_texts = []},
- socket = Socket,
+ protocol_specific = #{active_n_toggle := true, active_n := N} = ProtocolSpec,
+ socket = Socket,
close_tag = CloseTag,
transport_cb = Transport} = State) ->
- case tls_socket:setopts(Transport, Socket, [{active,once}]) of
- ok ->
- {no_record, State};
- _ ->
- self() ! {CloseTag, Socket},
- {no_record, State}
- end;
+ case tls_socket:setopts(Transport, Socket, [{active, N}]) of
+ ok ->
+ {no_record, State#state{protocol_specific = ProtocolSpec#{active_n_toggle => false}}};
+ _ ->
+ self() ! {CloseTag, Socket},
+ {no_record, State}
+ end;
next_record(State) ->
{no_record, State}.
next_event(StateName, Record, State) ->
next_event(StateName, Record, State, []).
-
-next_event(connection = StateName, no_record, State0, Actions) ->
- case next_record_if_active(State0) of
- {no_record, State} ->
- ssl_connection:hibernate_after(StateName, State, Actions);
- {#ssl_tls{} = Record, State} ->
- {next_state, StateName, State, [{next_event, internal, {protocol_record, Record}} | Actions]};
- {#alert{} = Alert, State} ->
- {next_state, StateName, State, [{next_event, internal, Alert} | Actions]}
+next_event(StateName, no_record, State0, Actions) ->
+ case next_record(State0) of
+ {no_record, State} ->
+ {next_state, StateName, State, Actions};
+ {#ssl_tls{} = Record, State} ->
+ {next_state, StateName, State, [{next_event, internal, {protocol_record, Record}} | Actions]};
+ {#alert{} = Alert, State} ->
+ {next_state, StateName, State, [{next_event, internal, Alert} | Actions]}
end;
next_event(StateName, Record, State, Actions) ->
case Record of
@@ -207,22 +207,21 @@ handle_common_event(internal, #ssl_tls{type = ?HANDSHAKE, fragment = Data},
ssl_options = Options} = State0) ->
try
{Packets, Buf} = tls_handshake:get_tls_handshake(Version,Data,Buf0, Options),
- State1 =
+ State =
State0#state{protocol_buffers =
Buffers#protocol_buffers{tls_handshake_buffer = Buf}},
case Packets of
[] ->
assert_buffer_sanity(Buf, Options),
- {Record, State} = next_record(State1),
- next_event(StateName, Record, State);
+ next_event(StateName, no_record, State);
_ ->
Events = tls_handshake_events(Packets),
case StateName of
connection ->
- ssl_connection:hibernate_after(StateName, State1, Events);
+ ssl_connection:hibernate_after(StateName, State, Events);
_ ->
{next_state, StateName,
- State1#state{unprocessed_handshake_events = unprocessed_events(Events)}, Events}
+ State#state{unprocessed_handshake_events = unprocessed_events(Events)}, Events}
end
end
catch throw:#alert{} = Alert ->
@@ -277,11 +276,10 @@ renegotiate(#state{role = server,
{BinMsg, ConnectionStates} =
tls_record:encode_handshake(Frag, Version, ConnectionStates0),
send(Transport, Socket, BinMsg),
- State1 = State0#state{connection_states =
+ State = State0#state{connection_states =
ConnectionStates,
tls_handshake_history = Hs0},
- {Record, State} = next_record(State1),
- next_event(hello, Record, State, Actions).
+ next_event(hello, no_record, State, Actions).
send_handshake(Handshake, State) ->
send_handshake_flight(queue_handshake(Handshake, State)).
@@ -367,13 +365,11 @@ send_alert_in_connection(#alert{description = ?CLOSE_NOTIFY} = Alert, State) ->
send_alert_in_connection(Alert,
#state{protocol_specific = #{sender := Sender}}) ->
tls_sender:send_alert(Sender, Alert).
-send_sync_alert(Alert, #state{protocol_specific = #{sender := Sender}}= State) ->
- tls_sender:send_and_ack_alert(Sender, Alert),
- receive
- {Sender, ack_alert} ->
- ok
- after ?DEFAULT_TIMEOUT ->
- %% Sender is blocked terminate anyway
+send_sync_alert(
+ Alert, #state{protocol_specific = #{sender := Sender}} = State) ->
+ try tls_sender:send_and_ack_alert(Sender, Alert)
+ catch
+ _:_ ->
throw({stop, {shutdown, own_alert}, State})
end.
@@ -411,23 +407,6 @@ protocol_name() ->
encode_data(Data, Version, ConnectionStates0)->
tls_record:encode_data(Data, Version, ConnectionStates0).
-passive_receive(State0 = #state{user_data_buffer = Buffer}, StateName) ->
- case Buffer of
- <<>> ->
- {Record, State} = next_record(State0),
- next_event(StateName, Record, State);
- _ ->
- {Record, State} = ssl_connection:read_application_data(<<>>, State0),
- next_event(StateName, Record, State)
- end.
-
-next_record_if_active(State =
- #state{socket_options =
- #socket_options{active = false}}) ->
- {no_record ,State};
-next_record_if_active(State) ->
- next_record(State).
-
send(Transport, Socket, Data) ->
tls_socket:send(Transport, Socket, Data).
@@ -469,15 +448,14 @@ init({call, From}, {start, Timeout},
{BinMsg, ConnectionStates, Handshake} =
encode_handshake(Hello, HelloVersion, ConnectionStates0, Handshake0),
send(Transport, Socket, BinMsg),
- State1 = State0#state{connection_states = ConnectionStates,
- negotiated_version = Version, %% Requested version
- session =
- Session0#session{session_id = Hello#client_hello.session_id},
- tls_handshake_history = Handshake,
- start_or_recv_from = From,
+ State = State0#state{connection_states = ConnectionStates,
+ negotiated_version = Version, %% Requested version
+ session =
+ Session0#session{session_id = Hello#client_hello.session_id},
+ tls_handshake_history = Handshake,
+ start_or_recv_from = From,
timer = Timer},
- {Record, State} = next_record(State1),
- next_event(hello, Record, State);
+ next_event(hello, no_record, State);
init(Type, Event, State) ->
gen_handshake(?FUNCTION_NAME, Type, Event, State).
@@ -612,36 +590,33 @@ connection(internal, #hello_request{},
connection_states = ConnectionStates} = State0) ->
Hello = tls_handshake:client_hello(Host, Port, ConnectionStates, SslOpts,
Cache, CacheCb, Renegotiation, Cert),
- {State1, Actions} = send_handshake(Hello, State0),
- {Record, State} =
- next_record(
- State1#state{session = Session0#session{session_id
- = Hello#client_hello.session_id}}),
- next_event(hello, Record, State, Actions);
+ {State, Actions} = send_handshake(Hello, State0),
+ next_event(hello, no_record, State#state{session = Session0#session{session_id
+ = Hello#client_hello.session_id}}, Actions);
connection(internal, #client_hello{} = Hello,
#state{role = server, allow_renegotiate = true, connection_states = CS,
%%protocol_cb = Connection,
protocol_specific = #{sender := Sender}
- } = State0) ->
+ } = State) ->
%% Mitigate Computational DoS attack
%% http://www.educatedguesswork.org/2011/10/ssltls_and_computational_dos.html
%% http://www.thc.org/thc-ssl-dos/ Rather than disabling client
%% initiated renegotiation we will disallow many client initiated
%% renegotiations immediately after each other.
erlang:send_after(?WAIT_TO_ALLOW_RENEGOTIATION, self(), allow_renegotiate),
- {Record, State} = next_record(State0#state{allow_renegotiate = false,
- renegotiation = {true, peer}}),
{ok, Write} = tls_sender:renegotiate(Sender),
- next_event(hello, Record, State#state{connection_states = CS#{current_write => Write}},
+ next_event(hello, no_record, State#state{connection_states = CS#{current_write => Write},
+ allow_renegotiate = false,
+ renegotiation = {true, peer}
+ },
[{next_event, internal, Hello}]);
connection(internal, #client_hello{},
#state{role = server, allow_renegotiate = false,
protocol_cb = Connection} = State0) ->
Alert = ?ALERT_REC(?WARNING, ?NO_RENEGOTIATION),
send_alert_in_connection(Alert, State0),
- State1 = Connection:reinit_handshake_data(State0),
- {Record, State} = next_record(State1),
- next_event(?FUNCTION_NAME, Record, State);
+ State = Connection:reinit_handshake_data(State0),
+ next_event(?FUNCTION_NAME, no_record, State);
connection(Type, Event, State) ->
ssl_connection:?FUNCTION_NAME(Type, Event, State, ?MODULE).
@@ -658,6 +633,12 @@ downgrade(Type, Event, State) ->
callback_mode() ->
state_functions.
+
+terminate(
+ {shutdown, sender_died, Reason}, _StateName,
+ #state{socket = Socket, transport_cb = Transport} = State) ->
+ ssl_connection:handle_trusted_certs_db(State),
+ close(Reason, Socket, Transport, undefined, undefined);
terminate(Reason, StateName, State) ->
catch ssl_connection:terminate(Reason, StateName, State),
ensure_sender_terminate(Reason, State).
@@ -684,6 +665,13 @@ initial_state(Role, Sender, Host, Port, Socket, {SSLOptions, SocketOptions, Trac
_ ->
ssl_session_cache
end,
+
+ InternalActiveN = case application:get_env(ssl, internal_active_n) of
+ {ok, N} when is_integer(N) andalso (not IsErlDist) ->
+ N;
+ _ ->
+ ?INTERNAL_ACTIVE_N
+ end,
UserMonitor = erlang:monitor(process, User),
@@ -710,7 +698,10 @@ initial_state(Role, Sender, Host, Port, Socket, {SSLOptions, SocketOptions, Trac
protocol_cb = ?MODULE,
tracker = Tracker,
flight_buffer = [],
- protocol_specific = #{sender => Sender}
+ protocol_specific = #{sender => Sender,
+ active_n => InternalActiveN,
+ active_n_toggle => true
+ }
}.
erl_dist_data(true) ->
@@ -771,7 +762,8 @@ tls_handshake_events(Packets) ->
%% raw data from socket, upack records
handle_info({Protocol, _, Data}, StateName,
- #state{data_tag = Protocol} = State0) ->
+ #state{data_tag = Protocol
+ } = State0) ->
case next_tls_record(Data, StateName, State0) of
{Record, State} ->
next_event(StateName, Record, State);
@@ -779,10 +771,16 @@ handle_info({Protocol, _, Data}, StateName,
ssl_connection:handle_normal_shutdown(Alert, StateName, State0),
ssl_connection:stop({shutdown, own_alert}, State0)
end;
+handle_info({tcp_passive, Socket}, StateName, #state{socket = Socket,
+ protocol_specific = PS
+ } = State) ->
+ next_event(StateName, no_record, State#state{protocol_specific = PS#{active_n_toggle => true}});
handle_info({CloseTag, Socket}, StateName,
#state{socket = Socket, close_tag = CloseTag,
socket_options = #socket_options{active = Active},
protocol_buffers = #protocol_buffers{tls_cipher_texts = CTs},
+ user_data_buffer = Buffer,
+ protocol_specific = PS,
negotiated_version = Version} = State) ->
%% Note that as of TLS 1.1,
@@ -790,7 +788,7 @@ handle_info({CloseTag, Socket}, StateName,
%% session not be resumed. This is a change from TLS 1.0 to conform
%% with widespread implementation practice.
- case (Active == false) andalso (CTs =/= []) of
+ case (Active == false) andalso ((CTs =/= []) or (Buffer =/= <<>>)) of
false ->
case Version of
{1, N} when N >= 1 ->
@@ -808,8 +806,9 @@ handle_info({CloseTag, Socket}, StateName,
true ->
%% Fixes non-delivery of final TLS record in {active, once}.
%% Basically allows the application the opportunity to set {active, once} again
- %% and then receive the final message.
- next_event(StateName, no_record, State)
+ %% and then receive the final message. Set internal active_n to zero
+ %% to ensure socket close message is sent if there is not enough data to deliver.
+ next_event(StateName, no_record, State#state{protocol_specific = PS#{active_n_toggle => true}})
end;
handle_info({'EXIT', Sender, Reason}, _,
#state{protocol_specific = #{sender := Sender}} = State) ->
diff --git a/lib/ssl/src/tls_record.erl b/lib/ssl/src/tls_record.erl
index f1aca8c801..cf0690f2a5 100644
--- a/lib/ssl/src/tls_record.erl
+++ b/lib/ssl/src/tls_record.erl
@@ -113,7 +113,7 @@ encode_handshake(Frag, Version,
ConnectionStates) ->
case iolist_size(Frag) of
N when N > ?MAX_PLAIN_TEXT_LENGTH ->
- Data = split_bin(iolist_to_binary(Frag), ?MAX_PLAIN_TEXT_LENGTH, Version, BCA, BeastMitigation),
+ Data = split_bin(iolist_to_binary(Frag), Version, BCA, BeastMitigation),
encode_iolist(?HANDSHAKE, Data, Version, ConnectionStates);
_ ->
encode_plain_text(?HANDSHAKE, Version, Frag, ConnectionStates)
@@ -150,7 +150,7 @@ encode_data(Frag, Version,
security_parameters :=
#security_parameters{bulk_cipher_algorithm = BCA}}} =
ConnectionStates) ->
- Data = split_bin(Frag, ?MAX_PLAIN_TEXT_LENGTH, Version, BCA, BeastMitigation),
+ Data = split_bin(Frag, Version, BCA, BeastMitigation),
encode_iolist(?APPLICATION_DATA, Data, Version, ConnectionStates).
%%====================================================================
@@ -176,14 +176,15 @@ decode_cipher_text(#ssl_tls{type = Type, version = Version,
BulkCipherAlgo,
compression_algorithm = CompAlg}
} = ReadState0} = ConnnectionStates0, _) ->
- AAD = calc_aad(Type, Version, ReadState0),
- case ssl_cipher:decipher_aead(BulkCipherAlgo, CipherS0, Seq, AAD, CipherFragment, Version) of
- {PlainFragment, CipherS1} ->
+ AAD = start_additional_data(Type, Version, ReadState0),
+ CipherS1 = ssl_record:nonce_seed(BulkCipherAlgo, <<?UINT64(Seq)>>, CipherS0),
+ case ssl_record:decipher_aead(BulkCipherAlgo, CipherS1, AAD, CipherFragment, Version) of
+ {PlainFragment, CipherState} ->
{Plain, CompressionS1} = ssl_record:uncompress(CompAlg,
PlainFragment, CompressionS0),
ConnnectionStates = ConnnectionStates0#{
current_read => ReadState0#{
- cipher_state => CipherS1,
+ cipher_state => CipherState,
sequence_number => Seq + 1,
compression_state => CompressionS1}},
{CipherText#ssl_tls{fragment = Plain}, ConnnectionStates};
@@ -453,15 +454,20 @@ encode_iolist(Type, Data, Version, ConnectionStates0) ->
{lists:reverse(EncodedMsg), ConnectionStates}.
%%--------------------------------------------------------------------
do_encode_plain_text(Type, Version, Data, #{compression_state := CompS0,
- security_parameters :=
+ cipher_state := CipherS0,
+ sequence_number := Seq,
+ security_parameters :=
#security_parameters{
cipher_type = ?AEAD,
+ bulk_cipher_algorithm = BCAlg,
compression_algorithm = CompAlg}
} = WriteState0) ->
{Comp, CompS1} = ssl_record:compress(CompAlg, Data, CompS0),
- WriteState1 = WriteState0#{compression_state => CompS1},
- AAD = calc_aad(Type, Version, WriteState1),
- ssl_record:cipher_aead(Version, Comp, WriteState1, AAD);
+ CipherS = ssl_record:nonce_seed(BCAlg, <<?UINT64(Seq)>>, CipherS0),
+ WriteState = WriteState0#{compression_state => CompS1,
+ cipher_state => CipherS},
+ AAD = start_additional_data(Type, Version, WriteState),
+ ssl_record:cipher_aead(Version, Comp, WriteState, AAD);
do_encode_plain_text(Type, Version, Data, #{compression_state := CompS0,
security_parameters :=
#security_parameters{compression_algorithm = CompAlg}
@@ -473,33 +479,32 @@ do_encode_plain_text(Type, Version, Data, #{compression_state := CompS0,
do_encode_plain_text(_,_,_,CS) ->
exit({cs, CS}).
%%--------------------------------------------------------------------
-calc_aad(Type, {MajVer, MinVer},
+start_additional_data(Type, {MajVer, MinVer},
#{sequence_number := SeqNo}) ->
<<?UINT64(SeqNo), ?BYTE(Type), ?BYTE(MajVer), ?BYTE(MinVer)>>.
%% 1/n-1 splitting countermeasure Rizzo/Duong-Beast, RC4 chiphers are
%% not vulnerable to this attack.
-split_bin(<<FirstByte:8, Rest/binary>>, ChunkSize, Version, BCA, one_n_minus_one) when
+split_bin(<<FirstByte:8, Rest/binary>>, Version, BCA, one_n_minus_one) when
BCA =/= ?RC4 andalso ({3, 1} == Version orelse
{3, 0} == Version) ->
- do_split_bin(Rest, ChunkSize, [[FirstByte]]);
+ [[FirstByte]|do_split_bin(Rest)];
%% 0/n splitting countermeasure for clients that are incompatible with 1/n-1
%% splitting.
-split_bin(Bin, ChunkSize, Version, BCA, zero_n) when
+split_bin(Bin, Version, BCA, zero_n) when
BCA =/= ?RC4 andalso ({3, 1} == Version orelse
{3, 0} == Version) ->
- do_split_bin(Bin, ChunkSize, [[<<>>]]);
-split_bin(Bin, ChunkSize, _, _, _) ->
- do_split_bin(Bin, ChunkSize, []).
+ [<<>>|do_split_bin(Bin)];
+split_bin(Bin, _, _, _) ->
+ do_split_bin(Bin).
-do_split_bin(<<>>, _, Acc) ->
- lists:reverse(Acc);
-do_split_bin(Bin, ChunkSize, Acc) ->
+do_split_bin(<<>>) -> [];
+do_split_bin(Bin) ->
case Bin of
- <<Chunk:ChunkSize/binary, Rest/binary>> ->
- do_split_bin(Rest, ChunkSize, [Chunk | Acc]);
+ <<Chunk:?MAX_PLAIN_TEXT_LENGTH/binary, Rest/binary>> ->
+ [Chunk|do_split_bin(Rest)];
_ ->
- lists:reverse(Acc, [Bin])
+ [Bin]
end.
%%--------------------------------------------------------------------
lowest_list_protocol_version(Ver, []) ->
diff --git a/lib/ssl/src/tls_sender.erl b/lib/ssl/src/tls_sender.erl
index f5379f60e3..ee2b7be4f4 100644
--- a/lib/ssl/src/tls_sender.erl
+++ b/lib/ssl/src/tls_sender.erl
@@ -29,7 +29,7 @@
%% API
-export([start/0, start/1, initialize/2, send_data/2, send_alert/2,
- send_and_ack_alert/2, renegotiate/1, setopts/2,
+ send_and_ack_alert/2, setopts/2, renegotiate/1,
update_connection_state/3, dist_tls_socket/1, dist_handshake_complete/3]).
%% gen_statem callbacks
@@ -97,12 +97,12 @@ send_alert(Pid, Alert) ->
gen_statem:cast(Pid, Alert).
%%--------------------------------------------------------------------
--spec send_and_ack_alert(pid(), #alert{}) -> ok.
+-spec send_and_ack_alert(pid(), #alert{}) -> _.
%% Description: TLS connection process wants to send an Alert
%% in the connection state and recive an ack.
%%--------------------------------------------------------------------
send_and_ack_alert(Pid, Alert) ->
- gen_statem:cast(Pid, {ack_alert, Alert}).
+ gen_statem:call(Pid, {ack_alert, Alert}, ?DEFAULT_TIMEOUT).
%%--------------------------------------------------------------------
-spec setopts(pid(), [{packet, integer() | atom()}]) -> ok | {error, term()}.
%% Description: Send application data
@@ -200,8 +200,9 @@ connection({call, From}, renegotiate,
#data{connection_states = #{current_write := Write}} = StateData) ->
{next_state, handshake, StateData, [{reply, From, {ok, Write}}]};
connection({call, From}, {application_data, AppData},
- #data{socket_options = SockOpts} = StateData) ->
- case encode_packet(AppData, SockOpts) of
+ #data{socket_options = #socket_options{packet = Packet}} =
+ StateData) ->
+ case encode_packet(Packet, AppData) of
{error, _} = Error ->
{next_state, ?FUNCTION_NAME, StateData, [{reply, From, Error}]};
Data ->
@@ -217,17 +218,30 @@ connection({call, From}, dist_get_tls_socket,
tracker = Tracker} = StateData) ->
TLSSocket = Connection:socket([Pid, self()], Transport, Socket, Connection, Tracker),
{next_state, ?FUNCTION_NAME, StateData, [{reply, From, {ok, TLSSocket}}]};
-connection({call, From}, {dist_handshake_complete, _Node, DHandle}, #data{connection_pid = Pid} = StateData) ->
+connection({call, From}, {dist_handshake_complete, _Node, DHandle},
+ #data{connection_pid = Pid,
+ socket_options = #socket_options{packet = Packet}} =
+ StateData) ->
ok = erlang:dist_ctrl_input_handler(DHandle, Pid),
ok = ssl_connection:dist_handshake_complete(Pid, DHandle),
%% From now on we execute on normal priority
process_flag(priority, normal),
- Events = dist_data_events(DHandle, []),
- {next_state, ?FUNCTION_NAME, StateData#data{dist_handle = DHandle}, [{reply, From, ok} | Events]};
-connection(cast, {ack_alert, #alert{} = Alert}, #data{connection_pid = Pid} =StateData0) ->
+ {next_state, ?FUNCTION_NAME, StateData#data{dist_handle = DHandle},
+ [{reply, From, ok}
+ | case dist_data(DHandle, Packet) of
+ [] ->
+ [];
+ Data ->
+ [{next_event, internal,
+ {application_packets,{self(),undefined},Data}}]
+ end]};
+connection({call, From}, {ack_alert, #alert{} = Alert}, StateData0) ->
StateData = send_tls_alert(Alert, StateData0),
- Pid ! {self(), ack_alert},
- {next_state, ?FUNCTION_NAME, StateData};
+ {next_state, ?FUNCTION_NAME, StateData,
+ [{reply,From,ok}]};
+connection(internal, {application_packets, From, Data}, StateData) ->
+ send_application_data(Data, From, ?FUNCTION_NAME, StateData);
+%%
connection(cast, #alert{} = Alert, StateData0) ->
StateData = send_tls_alert(Alert, StateData0),
{next_state, ?FUNCTION_NAME, StateData};
@@ -237,9 +251,19 @@ connection(cast, {new_write, WritesState, Version},
StateData#data{connection_states =
ConnectionStates0#{current_write => WritesState},
negotiated_version = Version}};
-connection(info, dist_data, #data{dist_handle = DHandle} = StateData) ->
- Events = dist_data_events(DHandle, []),
- {next_state, ?FUNCTION_NAME, StateData, Events};
+%%
+connection(info, dist_data,
+ #data{dist_handle = DHandle,
+ socket_options = #socket_options{packet = Packet}} =
+ StateData) ->
+ {next_state, ?FUNCTION_NAME, StateData,
+ case dist_data(DHandle, Packet) of
+ [] ->
+ [];
+ Data ->
+ [{next_event, internal,
+ {application_packets,{self(),undefined},Data}}]
+ end};
connection(info, tick, StateData) ->
consume_ticks(),
{next_state, ?FUNCTION_NAME, StateData,
@@ -272,6 +296,8 @@ handshake(cast, {new_write, WritesState, Version},
StateData#data{connection_states =
ConnectionStates0#{current_write => WritesState},
negotiated_version = Version}};
+handshake(internal, {application_packets,_,_}, _) ->
+ {keep_state_and_data, [postpone]};
handshake(info, Msg, StateData) ->
handle_info(Msg, ?FUNCTION_NAME, StateData).
@@ -319,7 +345,7 @@ handle_info({'DOWN', Monitor, _, _, _}, _,
#data{connection_monitor = Monitor} = StateData) ->
{stop, normal, StateData};
handle_info(_,_,_) ->
- {keep_state_and_data}.
+ keep_state_and_data.
send_tls_alert(Alert, #data{negotiated_version = Version,
socket = Socket,
@@ -342,12 +368,13 @@ send_application_data(Data, From, StateName,
renegotiate_at = RenegotiateAt} = StateData0) ->
case time_to_renegotiate(Data, ConnectionStates0, RenegotiateAt) of
true ->
- ssl_connection:internal_renegotiation(Pid, ConnectionStates0),
+ ssl_connection:internal_renegotiation(Pid, ConnectionStates0),
{next_state, handshake, StateData0,
- [{next_event, {call, From}, {application_data, Data}}]};
+ [{next_event, internal, {application_packets, From, Data}}]};
false ->
{Msgs, ConnectionStates} =
- Connection:encode_data(Data, Version, ConnectionStates0),
+ Connection:encode_data(
+ iolist_to_binary(Data), Version, ConnectionStates0),
StateData = StateData0#data{connection_states = ConnectionStates},
case Connection:send(Transport, Socket, Msgs) of
ok when DistHandle =/= undefined ->
@@ -361,21 +388,18 @@ send_application_data(Data, From, StateName,
end
end.
-encode_packet(Data, #socket_options{packet=Packet}) ->
+-compile({inline, encode_packet/2}).
+encode_packet(Packet, Data) ->
+ Len = iolist_size(Data),
case Packet of
- 1 -> encode_size_packet(Data, 8, (1 bsl 8) - 1);
- 2 -> encode_size_packet(Data, 16, (1 bsl 16) - 1);
- 4 -> encode_size_packet(Data, 32, (1 bsl 32) - 1);
- _ -> Data
- end.
-
-encode_size_packet(Bin, Size, Max) ->
- Len = erlang:byte_size(Bin),
- case Len > Max of
- true ->
- {error, {badarg, {packet_to_large, Len, Max}}};
- false ->
- <<Len:Size, Bin/binary>>
+ 1 when Len < (1 bsl 8) -> [<<Len:8>>,Data];
+ 2 when Len < (1 bsl 16) -> [<<Len:16>>,Data];
+ 4 when Len < (1 bsl 32) -> [<<Len:32>>,Data];
+ N when N =:= 1; N =:= 2; N =:= 4 ->
+ {error,
+ {badarg, {packet_to_large, Len, (1 bsl (Packet bsl 3)) - 1}}};
+ _ ->
+ Data
end.
set_opts(SocketOptions, [{packet, N}]) ->
@@ -409,14 +433,18 @@ call(FsmPid, Event) ->
%%---------------Erlang distribution --------------------------------------
-dist_data_events(DHandle, Events) ->
+dist_data(DHandle, Packet) ->
case erlang:dist_ctrl_get_data(DHandle) of
none ->
erlang:dist_ctrl_get_data_notification(DHandle),
- lists:reverse(Events);
+ [];
Data ->
- Event = {next_event, {call, {self(), undefined}}, {application_data, Data}},
- dist_data_events(DHandle, [Event | Events])
+ %% This is encode_packet(4, Data) without Len check
+ %% since the emulator will always deliver a Data
+ %% smaller than 4 GB, and the distribution will
+ %% therefore always have to use {packet,4}
+ Len = iolist_size(Data),
+ [<<Len:32>>,Data|dist_data(DHandle, Packet)]
end.
consume_ticks() ->
diff --git a/lib/ssl/test/ssl_ECC_SUITE.erl b/lib/ssl/test/ssl_ECC_SUITE.erl
index c93f066825..a5309e866b 100644
--- a/lib/ssl/test/ssl_ECC_SUITE.erl
+++ b/lib/ssl/test/ssl_ECC_SUITE.erl
@@ -395,10 +395,25 @@ client_ecdhe_rsa_server_ecdhe_ecdsa_client_custom(Config) ->
end.
mix_sign(Config) ->
- {COpts0, SOpts0} = ssl_test_lib:make_mix_cert(Config),
+ mix_sign_rsa_peer(Config),
+ mix_sign_ecdsa_peer(Config).
+
+mix_sign_ecdsa_peer(Config) ->
+ {COpts0, SOpts0} = ssl_test_lib:make_mix_cert([{mix, peer_ecc} |Config]),
COpts = ssl_test_lib:ssl_options(COpts0, Config),
SOpts = ssl_test_lib:ssl_options(SOpts0, Config),
ECDHE_ECDSA =
ssl:filter_cipher_suites(ssl:cipher_suites(default, 'tlsv1.2'),
[{key_exchange, fun(ecdhe_ecdsa) -> true; (_) -> false end}]),
ssl_test_lib:basic_test(COpts, [{ciphers, ECDHE_ECDSA} | SOpts], Config).
+
+
+mix_sign_rsa_peer(Config) ->
+ {COpts0, SOpts0} = ssl_test_lib:make_mix_cert([{mix, peer_rsa} |Config]),
+ COpts = ssl_test_lib:ssl_options(COpts0, Config),
+ SOpts = ssl_test_lib:ssl_options(SOpts0, Config),
+ ECDHE_RSA =
+ ssl:filter_cipher_suites(ssl:cipher_suites(default, 'tlsv1.2'),
+ [{key_exchange, fun(ecdhe_rsa) -> true; (_) -> false end}]),
+ ssl_test_lib:basic_test(COpts, [{ciphers, ECDHE_RSA} | SOpts], Config).
+
diff --git a/lib/ssl/test/ssl_basic_SUITE.erl b/lib/ssl/test/ssl_basic_SUITE.erl
index 4585ea7306..9633800da5 100644
--- a/lib/ssl/test/ssl_basic_SUITE.erl
+++ b/lib/ssl/test/ssl_basic_SUITE.erl
@@ -244,7 +244,9 @@ error_handling_tests()->
recv_active_once,
recv_error_handling,
call_in_error_state,
- close_in_error_state
+ close_in_error_state,
+ abuse_transport_accept_socket,
+ controlling_process_transport_accept_socket
].
error_handling_tests_tls()->
@@ -1095,16 +1097,19 @@ tls_closed_in_active_once(Config) when is_list(Config) ->
end.
tls_closed_in_active_once_loop(Socket) ->
- ssl:setopts(Socket, [{active, once}]),
- receive
- {ssl, Socket, _} ->
- tls_closed_in_active_once_loop(Socket);
- {ssl_closed, Socket} ->
- ok
- after 5000 ->
- no_ssl_closed_received
+ case ssl:setopts(Socket, [{active, once}]) of
+ ok ->
+ receive
+ {ssl, Socket, _} ->
+ tls_closed_in_active_once_loop(Socket);
+ {ssl_closed, Socket} ->
+ ok
+ after 5000 ->
+ no_ssl_closed_received
+ end;
+ {error, closed} ->
+ ok
end.
-
%%--------------------------------------------------------------------
connect_dist() ->
[{doc,"Test a simple connect as is used by distribution"}].
@@ -4054,7 +4059,51 @@ close_in_error_state(Config) when is_list(Config) ->
Other ->
ct:fail(Other)
end.
+%%--------------------------------------------------------------------
+abuse_transport_accept_socket() ->
+ [{doc,"Only ssl:handshake and ssl:controlling_process is allowed for transport_accept:sockets"}].
+abuse_transport_accept_socket(Config) when is_list(Config) ->
+ ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
+ ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+
+ Server = ssl_test_lib:start_server_transport_abuse_socket([{node, ServerNode},
+ {port, 0},
+ {from, self()},
+ {options, ServerOpts}]),
+ Port = ssl_test_lib:inet_port(Server),
+ Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {ssl_test_lib, no_result, []}},
+ {options, ClientOpts}]),
+ ssl_test_lib:check_result(Server, ok),
+ ssl_test_lib:close(Server),
+ ssl_test_lib:close(Client).
+
+%%--------------------------------------------------------------------
+controlling_process_transport_accept_socket() ->
+ [{doc,"Only ssl:handshake and ssl:controlling_process is allowed for transport_accept:sockets"}].
+controlling_process_transport_accept_socket(Config) when is_list(Config) ->
+ ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
+ ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+
+ Server = ssl_test_lib:start_server_transport_control([{node, ServerNode},
+ {port, 0},
+ {from, self()},
+ {options, ServerOpts}]),
+ Port = ssl_test_lib:inet_port(Server),
+
+ _Client = ssl_test_lib:start_client_error([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {options, ClientOpts}]),
+ ssl_test_lib:check_result(Server, ok),
+ ssl_test_lib:close(Server).
+
+%%--------------------------------------------------------------------
run_error_server_close([Pid | Opts]) ->
{ok, Listen} = ssl:listen(0, Opts),
{ok,{_, Port}} = ssl:sockname(Listen),
@@ -5174,14 +5223,14 @@ get_invalid_inet_option(Socket) ->
tls_shutdown_result(Socket, server) ->
ssl:send(Socket, "Hej"),
- ssl:shutdown(Socket, write),
+ ok = ssl:shutdown(Socket, write),
{ok, "Hej hopp"} = ssl:recv(Socket, 8),
ok;
tls_shutdown_result(Socket, client) ->
- {ok, "Hej"} = ssl:recv(Socket, 3),
ssl:send(Socket, "Hej hopp"),
- ssl:shutdown(Socket, write),
+ ok = ssl:shutdown(Socket, write),
+ {ok, "Hej"} = ssl:recv(Socket, 3),
ok.
tls_shutdown_write_result(Socket, server) ->
diff --git a/lib/ssl/test/ssl_bench_SUITE.erl b/lib/ssl/test/ssl_bench_SUITE.erl
index 3fe6338d69..13097b08b6 100644
--- a/lib/ssl/test/ssl_bench_SUITE.erl
+++ b/lib/ssl/test/ssl_bench_SUITE.erl
@@ -44,6 +44,7 @@ init_per_suite(Config) ->
nonode@nohost ->
{skipped, "Node not distributed"};
_ ->
+ ssl_test_lib:clean_start(),
[{server_node, ssl_bench_test_lib:setup(perf_server)}|Config]
end.
diff --git a/lib/ssl/test/ssl_bench_test_lib.erl b/lib/ssl/test/ssl_bench_test_lib.erl
index e5cbb911bd..47bcd41608 100644
--- a/lib/ssl/test/ssl_bench_test_lib.erl
+++ b/lib/ssl/test/ssl_bench_test_lib.erl
@@ -58,13 +58,13 @@ setup(Name) ->
Path = code:get_path(),
true = rpc:call(Node, code, set_path, [Path]),
ok = rpc:call(Node, ?MODULE, setup_server, [node()]),
- io:format("Client (~p) using ~s~n",[node(), code:which(ssl)]),
+ io:format("Client (~p) using ~ts~n",[node(), code:which(ssl)]),
(Node =:= node()) andalso restrict_schedulers(client),
Node.
setup_server(ClientNode) ->
(ClientNode =:= node()) andalso restrict_schedulers(server),
- io:format("Server (~p) using ~s~n",[node(), code:which(ssl)]),
+ io:format("Server (~p) using ~ts~n",[node(), code:which(ssl)]),
ok.
restrict_schedulers(Type) ->
diff --git a/lib/ssl/test/ssl_dist_bench_SUITE.erl b/lib/ssl/test/ssl_dist_bench_SUITE.erl
index 3c7904cf24..7409b69639 100644
--- a/lib/ssl/test/ssl_dist_bench_SUITE.erl
+++ b/lib/ssl/test/ssl_dist_bench_SUITE.erl
@@ -32,6 +32,8 @@
-export(
[setup/1,
roundtrip/1,
+ throughput_0/1,
+ throughput_64/1,
throughput_1024/1,
throughput_4096/1,
throughput_16384/1,
@@ -55,7 +57,9 @@ groups() ->
{setup, [{repeat, 1}], [setup]},
{roundtrip, [{repeat, 1}], [roundtrip]},
{throughput, [{repeat, 1}],
- [throughput_1024,
+ [throughput_0,
+ throughput_64,
+ throughput_1024,
throughput_4096,
throughput_16384,
throughput_65536,
@@ -247,8 +251,9 @@ setup(A, B, Prefix, HA, HB) ->
[] = ssl_apply(HB, erlang, nodes, []),
{SetupTime, CycleTime} =
ssl_apply(HA, fun () -> setup_runner(A, B, Rounds) end),
- [] = ssl_apply(HA, erlang, nodes, []),
- [] = ssl_apply(HB, erlang, nodes, []),
+ ok = ssl_apply(HB, fun () -> setup_wait_nodedown(A, 10000) end),
+ %% [] = ssl_apply(HA, erlang, nodes, []),
+ %% [] = ssl_apply(HB, erlang, nodes, []),
SetupSpeed = round((Rounds*1000000*1000) / SetupTime),
CycleSpeed = round((Rounds*1000000*1000) / CycleTime),
_ = report(Prefix++" Setup", SetupSpeed, "setups/1000s"),
@@ -275,6 +280,22 @@ setup_loop(A, B, T, N) ->
setup_loop(A, B, Time + T, N - 1)
end.
+setup_wait_nodedown(A, Time) ->
+ ok = net_kernel:monitor_nodes(true),
+ case nodes() of
+ [] ->
+ ok;
+ [A] ->
+ receive
+ {nodedown,A} ->
+ ok;
+ Unexpected ->
+ {error,{unexpected,Unexpected}}
+ after Time ->
+ {error,timeout}
+ end
+ end.
+
%%----------------
%% Roundtrip speed
@@ -334,6 +355,18 @@ roundtrip_client(Pid, Mon, StartTime, N) ->
%%-----------------
%% Throughput speed
+throughput_0(Config) ->
+ run_nodepair_test(
+ fun (A, B, Prefix, HA, HB) ->
+ throughput(A, B, Prefix, HA, HB, 500000, 0)
+ end, Config).
+
+throughput_64(Config) ->
+ run_nodepair_test(
+ fun (A, B, Prefix, HA, HB) ->
+ throughput(A, B, Prefix, HA, HB, 500000, 64)
+ end, Config).
+
throughput_1024(Config) ->
run_nodepair_test(
fun (A, B, Prefix, HA, HB) ->
@@ -373,45 +406,198 @@ throughput_1048576(Config) ->
throughput(A, B, Prefix, HA, HB, Packets, Size) ->
[] = ssl_apply(HA, erlang, nodes, []),
[] = ssl_apply(HB, erlang, nodes, []),
- Time =
+ #{time := Time,
+ dist_stats := DistStats,
+ client_msacc_stats := ClientMsaccStats,
+ client_prof := ClientProf,
+ server_msacc_stats := ServerMsaccStats,
+ server_prof := ServerProf} =
ssl_apply(HA, fun () -> throughput_runner(A, B, Packets, Size) end),
[B] = ssl_apply(HA, erlang, nodes, []),
[A] = ssl_apply(HB, erlang, nodes, []),
- Speed = round((Packets*Size*1000000) / (1024*Time)),
+ ClientMsaccStats =:= undefined orelse
+ msacc:print(ClientMsaccStats),
+ io:format("DistStats: ~p~n", [DistStats]),
+ Overhead =
+ 50 % Distribution protocol headers (empirical) (TLS+=54)
+ + byte_size(erlang:term_to_binary([0|<<>>])), % Benchmark overhead
+ Bytes = Packets * (Size + Overhead),
+ io:format("~w bytes, ~.4g s~n", [Bytes,Time/1000000]),
+ ClientMsaccStats =:= undefined orelse
+ io:format(
+ "Sender core usage ratio: ~.4g ns/byte~n",
+ [msacc:stats(system_runtime, ClientMsaccStats)*1000/Bytes]),
+ ServerMsaccStats =:= undefined orelse
+ begin
+ io:format(
+ "Receiver core usage ratio: ~.4g ns/byte~n",
+ [msacc:stats(system_runtime, ServerMsaccStats)*1000/Bytes]),
+ msacc:print(ServerMsaccStats)
+ end,
+ io:format("******* ClientProf:~n", []), prof_print(ClientProf),
+ io:format("******* ServerProf:~n", []), prof_print(ServerProf),
+ Speed = round((Bytes * 1000000) / (1024 * Time)),
report(Prefix++" Throughput_"++integer_to_list(Size), Speed, "kB/s").
%% Runs on node A and spawns a server on node B
throughput_runner(A, B, Rounds, Size) ->
Payload = payload(Size),
- ClientPid = self(),
[A] = rpc:call(B, erlang, nodes, []),
+ ClientPid = self(),
ServerPid =
erlang:spawn(
B,
fun () -> throughput_server(ClientPid, Rounds) end),
ServerMon = erlang:monitor(process, ServerPid),
- microseconds(
- throughput_client(
- ServerPid, ServerMon, Payload, start_time(), Rounds)).
+ msacc:available() andalso
+ begin
+ msacc:stop(),
+ msacc:reset(),
+ msacc:start(),
+ ok
+ end,
+ prof_start(),
+ {Time,ServerMsaccStats,ServerProf} =
+ throughput_client(ServerPid, ServerMon, Payload, Rounds),
+ prof_stop(),
+ ClientMsaccStats =
+ case msacc:available() of
+ true ->
+ MStats = msacc:stats(),
+ msacc:stop(),
+ MStats;
+ false ->
+ undefined
+ end,
+ ClientProf = prof_end(),
+ [{_Node,Socket}] = dig_dist_node_sockets(),
+ DistStats = inet:getstat(Socket),
+ #{time => microseconds(Time),
+ dist_stats => DistStats,
+ client_msacc_stats => ClientMsaccStats,
+ client_prof => ClientProf,
+ server_msacc_stats => ServerMsaccStats,
+ server_prof => ServerProf}.
+
+dig_dist_node_sockets() ->
+ [case DistCtrl of
+ {_Node,Socket} = NodeSocket when is_port(Socket) ->
+ NodeSocket;
+ {Node,DistCtrlPid} when is_pid(DistCtrlPid) ->
+ [{links,DistCtrlLinks}] = process_info(DistCtrlPid, [links]),
+ case [S || S <- DistCtrlLinks, is_port(S)] of
+ [Socket] ->
+ {Node,Socket};
+ [] ->
+ [{monitors,[{process,DistSenderPid}]}] =
+ process_info(DistCtrlPid, [monitors]),
+ [{links,DistSenderLinks}] =
+ process_info(DistSenderPid, [links]),
+ [Socket] = [S || S <- DistSenderLinks, is_port(S)],
+ {Node,Socket}
+ end
+ end || DistCtrl <- erlang:system_info(dist_ctrl)].
+
-throughput_server(_Pid, 0) ->
- ok;
throughput_server(Pid, N) ->
+ msacc:available() andalso
+ begin
+ msacc:stop(),
+ msacc:reset(),
+ msacc:start(),
+ ok
+ end,
+ prof_start(),
+ throughput_server_loop(Pid, N).
+
+throughput_server_loop(_Pid, 0) ->
+ prof_stop(),
+ MsaccStats =
+ case msacc:available() of
+ true ->
+ msacc:stop(),
+ MStats = msacc:stats(),
+ msacc:reset(),
+ MStats;
+ false ->
+ undefined
+ end,
+ Prof = prof_end(),
+ exit({ok,MsaccStats,Prof});
+throughput_server_loop(Pid, N) ->
receive
- [N|_] ->
- throughput_server(Pid, N-1)
+ {Pid, N, _} ->
+ throughput_server_loop(Pid, N-1)
end.
-throughput_client(_Pid, Mon, _Payload, StartTime, 0) ->
+throughput_client(Pid, Mon, Payload, N) ->
+ throughput_client_loop(Pid, Mon, Payload, N, start_time()).
+
+throughput_client_loop(_Pid, Mon, _Payload, 0, StartTime) ->
receive
- {'DOWN', Mon, _, _, normal} ->
- elapsed_time(StartTime);
+ {'DOWN', Mon, _, _, {ok,MsaccStats,Prof}} ->
+ {elapsed_time(StartTime),MsaccStats,Prof};
{'DOWN', Mon, _, _, Other} ->
exit(Other)
end;
-throughput_client(Pid, Mon, Payload, StartTime, N) ->
- Pid ! [N|Payload],
- throughput_client(Pid, Mon, Payload, StartTime, N - 1).
+throughput_client_loop(Pid, Mon, Payload, N, StartTime) ->
+ Pid ! {self(), N, Payload},
+ throughput_client_loop(Pid, Mon, Payload, N - 1, StartTime).
+
+
+-define(prof, none). % none | cprof | eprof
+
+-if(?prof =:= cprof).
+prof_start() ->
+ cprof:stop(),
+ cprof:start(),
+ ok.
+-elif(?prof =:= eprof).
+prof_start() ->
+ {ok,_} = eprof:start(),
+ profiling = eprof:start_profiling(processes()),
+ ok.
+-elif(?prof =:= none).
+prof_start() ->
+ ok.
+-endif.
+
+-if(?prof =:= cprof).
+prof_stop() ->
+ cprof:pause(),
+ ok.
+-elif(?prof =:= eprof).
+prof_stop() ->
+ _ = eprof:stop_profiling(),
+ ok.
+-elif(?prof =:= none).
+prof_stop() ->
+ ok.
+-endif.
+
+-if(?prof =:= cprof).
+prof_end() ->
+ Prof = cprof:analyse(),
+ cprof:stop(),
+ Prof.
+-elif(?prof =:= eprof).
+prof_end() ->
+ eprof:dump_data().
+-elif(?prof =:= none).
+prof_end() ->
+ [].
+-endif.
+
+-if(?prof =:= cprof).
+prof_print(Prof) ->
+ io:format("~p.~n", [Prof]).
+-elif(?prof =:= eprof).
+prof_print(Dump) ->
+ eprof:analyze(undefined, total, [], Dump).
+-elif(?prof =:= none).
+prof_print([]) ->
+ ok.
+-endif.
%%%-------------------------------------------------------------------
%%% Test cases helpers
diff --git a/lib/ssl/test/ssl_engine_SUITE.erl b/lib/ssl/test/ssl_engine_SUITE.erl
index 1423c99dc2..a39a62e550 100644
--- a/lib/ssl/test/ssl_engine_SUITE.erl
+++ b/lib/ssl/test/ssl_engine_SUITE.erl
@@ -46,10 +46,17 @@ init_per_suite(Config) ->
ssl_test_lib:clean_start(),
case crypto:get_test_engine() of
{ok, EngineName} ->
- try crypto:engine_load(<<"dynamic">>,
- [{<<"SO_PATH">>, EngineName},
- <<"LOAD">>],
- []) of
+ try
+ %% The test engine has it's own fake rsa sign/verify that
+ %% you don't want to use, so exclude it from methods to load:
+ Methods =
+ crypto:engine_get_all_methods() -- [engine_method_rsa],
+ crypto:engine_load(<<"dynamic">>,
+ [{<<"SO_PATH">>, EngineName},
+ <<"LOAD">>],
+ [],
+ Methods)
+ of
{ok, Engine} ->
[{engine, Engine} |Config];
{error, Reason} ->
@@ -90,12 +97,14 @@ end_per_testcase(_TestCase, Config) ->
private_key(Config) when is_list(Config) ->
ClientFileBase = filename:join([proplists:get_value(priv_dir, Config), "client_engine"]),
ServerFileBase = filename:join([proplists:get_value(priv_dir, Config), "server_engine"]),
+ Ext = x509_test:extensions([{key_usage, [digitalSignature, keyEncipherment]}]),
#{server_config := ServerConf,
client_config := ClientConf} = GenCertData =
public_key:pkix_test_data(#{server_chain =>
#{root => [{key, ssl_test_lib:hardcode_rsa_key(1)}],
intermediates => [[{key, ssl_test_lib:hardcode_rsa_key(2)}]],
- peer => [{key, ssl_test_lib:hardcode_rsa_key(3)}
+ peer => [{extensions, Ext},
+ {key, ssl_test_lib:hardcode_rsa_key(3)}
]},
client_chain =>
#{root => [{key, ssl_test_lib:hardcode_rsa_key(4)}],
@@ -131,6 +140,12 @@ private_key(Config) when is_list(Config) ->
%% Test with engine
test_tls_connection(EngineServerConf, EngineClientConf, Config),
+ %% Test with engine and rsa keyexchange
+ RSASuites = all_kex_rsa_suites([{tls_version, 'tlsv1.2'} | Config]),
+
+ test_tls_connection([{ciphers, RSASuites}, {versions, ['tlsv1.2']} | EngineServerConf],
+ [{ciphers, RSASuites}, {versions, ['tlsv1.2']} | EngineClientConf], Config),
+
%% Test with engine and present file arugments
test_tls_connection(EngineFileServerConf, EngineFileClientConf, Config),
@@ -160,3 +175,8 @@ test_tls_connection(ServerConf, ClientConf, Config) ->
ssl_test_lib:check_result(Server, ok, Client, ok),
ssl_test_lib:close(Server),
ssl_test_lib:close(Client).
+
+all_kex_rsa_suites(Config) ->
+ Version = proplists:get_value(tls_version, Config),
+ All = ssl:cipher_suites(all, Version),
+ ssl:filter_cipher_suites(All,[{key_exchange, fun(rsa) -> true;(_) -> false end}]).
diff --git a/lib/ssl/test/ssl_packet_SUITE.erl b/lib/ssl/test/ssl_packet_SUITE.erl
index ebf8ddbfac..9af1ae0e3f 100644
--- a/lib/ssl/test/ssl_packet_SUITE.erl
+++ b/lib/ssl/test/ssl_packet_SUITE.erl
@@ -725,7 +725,7 @@ packet_switch(Config) when is_list(Config) ->
{options, [{nodelay, true}, {packet, 2} |
ClientOpts]}]),
- ssl_test_lib:check_result(Client, ok),
+ ssl_test_lib:check_result(Client, ok, Server, ok),
ssl_test_lib:close(Server),
ssl_test_lib:close(Client).
diff --git a/lib/ssl/test/ssl_payload_SUITE.erl b/lib/ssl/test/ssl_payload_SUITE.erl
index 5939800001..1f9b6a5772 100644
--- a/lib/ssl/test/ssl_payload_SUITE.erl
+++ b/lib/ssl/test/ssl_payload_SUITE.erl
@@ -71,7 +71,10 @@ init_per_suite(Config) ->
try crypto:start() of
ok ->
ssl_test_lib:clean_start(),
- {ok, _} = make_certs:all(proplists:get_value(data_dir, Config), proplists:get_value(priv_dir, Config)),
+ {ok, _} =
+ make_certs:all(
+ proplists:get_value(data_dir, Config),
+ proplists:get_value(priv_dir, Config)),
ssl_test_lib:cert_options(Config)
catch _:_ ->
{skip, "Crypto did not start"}
@@ -103,12 +106,13 @@ end_per_group(GroupName, Config) ->
Config
end.
-init_per_testcase(TestCase, Config) when TestCase == server_echos_passive_huge;
- TestCase == server_echos_active_once_huge;
- TestCase == server_echos_active_huge;
- TestCase == client_echos_passive_huge;
- TestCase == client_echos_active_once_huge;
- TestCase == client_echos_active_huge ->
+init_per_testcase(TestCase, Config)
+ when TestCase == server_echos_passive_huge;
+ TestCase == server_echos_active_once_huge;
+ TestCase == server_echos_active_huge;
+ TestCase == client_echos_passive_huge;
+ TestCase == client_echos_active_once_huge;
+ TestCase == client_echos_active_huge ->
case erlang:system_info(system_architecture) of
"sparc-sun-solaris2.10" ->
{skip,"Will take to long time on an old Sparc"};
@@ -117,12 +121,13 @@ init_per_testcase(TestCase, Config) when TestCase == server_echos_passive_huge;
Config
end;
-init_per_testcase(TestCase, Config) when TestCase == server_echos_passive_big;
- TestCase == server_echos_active_once_big;
- TestCase == server_echos_active_big;
- TestCase == client_echos_passive_big;
- TestCase == client_echos_active_once_big;
- TestCase == client_echos_active_big ->
+init_per_testcase(TestCase, Config)
+ when TestCase == server_echos_passive_big;
+ TestCase == server_echos_active_once_big;
+ TestCase == server_echos_active_big;
+ TestCase == client_echos_passive_big;
+ TestCase == client_echos_active_once_big;
+ TestCase == client_echos_active_big ->
ct:timetrap({seconds, 60}),
Config;
@@ -144,11 +149,10 @@ server_echos_passive_small(Config) when is_list(Config) ->
ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
-
- Str = "1234567890",
-
- server_echos_passive(Str, 1000, ClientOpts, ServerOpts,
- ClientNode, ServerNode, Hostname).
+ %%
+ Data = binary:copy(<<"1234567890">>, 100),
+ server_echos_passive(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname).
%%--------------------------------------------------------------------
@@ -160,11 +164,10 @@ server_echos_active_once_small(Config) when is_list(Config) ->
ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
-
- Str = "1234567890",
-
- server_echos_active_once(Str, 1000, ClientOpts, ServerOpts,
- ClientNode, ServerNode, Hostname).
+ %%
+ Data = binary:copy(<<"1234567890">>, 100),
+ server_echos_active_once(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname).
%%--------------------------------------------------------------------
@@ -176,11 +179,10 @@ server_echos_active_small(Config) when is_list(Config) ->
ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
-
- Str = "1234567890",
-
- server_echos_active(Str, 1000, ClientOpts, ServerOpts,
- ClientNode, ServerNode, Hostname).
+ %%
+ Data = binary:copy(<<"1234567890">>, 100),
+ server_echos_active(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname).
%%--------------------------------------------------------------------
client_echos_passive_small() ->
@@ -191,11 +193,10 @@ client_echos_passive_small(Config) when is_list(Config) ->
ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
-
- Str = "1234567890",
-
- client_echos_passive(Str, 1000, ClientOpts, ServerOpts, ClientNode,
- ServerNode, Hostname).
+ %%
+ Data = binary:copy(<<"1234567890">>, 100),
+ client_echos_passive(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname).
%%--------------------------------------------------------------------
client_echos_active_once_small() ->
@@ -206,11 +207,10 @@ client_echos_active_once_small(Config) when is_list(Config) ->
ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
-
- Str = "1234567890",
-
- client_echos_active_once(Str, 1000, ClientOpts, ServerOpts, ClientNode,
- ServerNode, Hostname).
+ %%
+ Data = binary:copy(<<"1234567890">>, 100),
+ client_echos_active_once(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname).
%%--------------------------------------------------------------------
client_echos_active_small() ->
@@ -221,11 +221,10 @@ client_echos_active_small(Config) when is_list(Config) ->
ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
-
- Str = "1234567890",
-
- client_echos_active(Str, 1000, ClientOpts, ServerOpts, ClientNode,
- ServerNode, Hostname).
+ %%
+ Data = binary:copy(<<"1234567890">>, 100),
+ client_echos_active(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname).
%%--------------------------------------------------------------------
@@ -237,11 +236,10 @@ server_echos_passive_big(Config) when is_list(Config) ->
ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
-
- Str = "1234567890",
-
- server_echos_passive(Str, 50000, ClientOpts, ServerOpts, ClientNode,
- ServerNode, Hostname).
+ %%
+ Data = binary:copy(<<"1234567890">>, 5000),
+ server_echos_passive(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname).
%%--------------------------------------------------------------------
@@ -253,11 +251,10 @@ server_echos_active_once_big(Config) when is_list(Config) ->
ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
-
- Str = "1234567890",
-
- server_echos_active_once(Str, 50000, ClientOpts, ServerOpts, ClientNode,
- ServerNode, Hostname).
+ %%
+ Data = binary:copy(<<"1234567890">>, 5000),
+ server_echos_active_once(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname).
%%--------------------------------------------------------------------
@@ -269,11 +266,10 @@ server_echos_active_big(Config) when is_list(Config) ->
ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
-
- Str = "1234567890",
-
- server_echos_active(Str, 50000, ClientOpts, ServerOpts, ClientNode,
- ServerNode, Hostname).
+ %%
+ Data = binary:copy(<<"1234567890">>, 5000),
+ server_echos_active(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname).
%%--------------------------------------------------------------------
client_echos_passive_big() ->
@@ -284,11 +280,10 @@ client_echos_passive_big(Config) when is_list(Config) ->
ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
-
- Str = "1234567890",
-
- client_echos_passive(Str, 50000, ClientOpts, ServerOpts, ClientNode,
- ServerNode, Hostname).
+ %%
+ Data = binary:copy(<<"1234567890">>, 5000),
+ client_echos_passive(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname).
%%--------------------------------------------------------------------
client_echos_active_once_big() ->
@@ -299,11 +294,10 @@ client_echos_active_once_big(Config) when is_list(Config) ->
ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
-
- Str = "1234567890",
-
- client_echos_active_once(Str, 50000, ClientOpts, ServerOpts, ClientNode,
- ServerNode, Hostname).
+ %%
+ Data = binary:copy(<<"1234567890">>, 5000),
+ client_echos_active_once(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname).
%%--------------------------------------------------------------------
client_echos_active_big() ->
@@ -314,11 +308,10 @@ client_echos_active_big(Config) when is_list(Config) ->
ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
-
- Str = "1234567890",
-
- client_echos_active(Str, 50000, ClientOpts, ServerOpts, ClientNode,
- ServerNode, Hostname).
+ %%
+ Data = binary:copy(<<"1234567890">>, 5000),
+ client_echos_active(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname).
%%--------------------------------------------------------------------
server_echos_passive_huge() ->
@@ -329,11 +322,10 @@ server_echos_passive_huge(Config) when is_list(Config) ->
ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
-
- Str = "1234567890",
-
- server_echos_passive(Str, 500000, ClientOpts, ServerOpts, ClientNode,
- ServerNode, Hostname).
+ %%
+ Data = binary:copy(<<"1234567890">>, 50000),
+ server_echos_passive(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname).
%%--------------------------------------------------------------------
server_echos_active_once_huge() ->
@@ -344,11 +336,10 @@ server_echos_active_once_huge(Config) when is_list(Config) ->
ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
-
- Str = "1234567890",
-
- server_echos_active_once(Str, 500000, ClientOpts, ServerOpts, ClientNode,
- ServerNode, Hostname).
+ %%
+ Data = binary:copy(<<"1234567890">>, 50000),
+ server_echos_active_once(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname).
%%--------------------------------------------------------------------
server_echos_active_huge() ->
@@ -359,11 +350,10 @@ server_echos_active_huge(Config) when is_list(Config) ->
ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
-
- Str = "1234567890",
-
- server_echos_active(Str, 500000, ClientOpts, ServerOpts, ClientNode,
- ServerNode, Hostname).
+ %%
+ Data = binary:copy(<<"1234567890">>, 50000),
+ server_echos_active(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname).
%%--------------------------------------------------------------------
client_echos_passive_huge() ->
@@ -374,10 +364,10 @@ client_echos_passive_huge(Config) when is_list(Config) ->
ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
-
- Str = "1234567890",
- client_echos_passive(Str, 500000, ClientOpts, ServerOpts, ClientNode,
- ServerNode, Hostname).
+ %%
+ Data = binary:copy(<<"1234567890">>, 50000),
+ client_echos_passive(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname).
%%--------------------------------------------------------------------
client_echos_active_once_huge() ->
@@ -388,10 +378,10 @@ client_echos_active_once_huge(Config) when is_list(Config) ->
ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
-
- Str = "1234567890",
- client_echos_active_once(Str, 500000, ClientOpts, ServerOpts, ClientNode,
- ServerNode, Hostname).
+ %%
+ Data = binary:copy(<<"1234567890">>, 50000),
+ client_echos_active_once(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname).
%%--------------------------------------------------------------------
client_echos_active_huge() ->
@@ -402,293 +392,299 @@ client_echos_active_huge(Config) when is_list(Config) ->
ClientOpts = ssl_test_lib:ssl_options(client_opts, Config),
ServerOpts = ssl_test_lib:ssl_options(server_opts, Config),
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
-
- Str = "1234567890",
- client_echos_active(Str, 500000, ClientOpts, ServerOpts, ClientNode,
- ServerNode, Hostname).
+ %%
+ Data = binary:copy(<<"1234567890">>, 50000),
+ client_echos_active(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname).
%%--------------------------------------------------------------------
%% Internal functions ------------------------------------------------
%%--------------------------------------------------------------------
-server_echos_passive(Data, Length, ClientOpts, ServerOpts,
- ClientNode, ServerNode, Hostname) ->
- Server = ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
- {from, self()},
- {mfa,
- {?MODULE, echoer,
- [Data, Length]}},
- {options,
- [{active, false},{mode, binary}
- | ServerOpts]}]),
- Port = ssl_test_lib:inet_port(Server),
- Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
- {host, Hostname},
- {from, self()},
- {mfa,
- {?MODULE, sender,
- [Data,
- Length]}},
- {options,
- [{active, false}, {mode, binary} |
- ClientOpts]}]),
+server_echos_passive(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname) ->
+ Length = byte_size(Data),
+ Server =
+ ssl_test_lib:start_server(
+ [{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {?MODULE, echoer, [Length]}},
+ {options, [{active, false}, {mode, binary} | ServerOpts]}]),
+ Port = ssl_test_lib:inet_port(Server),
+ Client =
+ ssl_test_lib:start_client(
+ [{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, sender, [Data]}},
+ {options, [{active, false}, {mode, binary} | ClientOpts]}]),
+ %%
ssl_test_lib:check_result(Server, ok, Client, ok),
-
+ %%
ssl_test_lib:close(Server),
ssl_test_lib:close(Client).
-server_echos_active_once(Data, Length, ClientOpts, ServerOpts, ClientNode,
- ServerNode, Hostname) ->
- Server = ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
- {from, self()},
- {mfa,
- {?MODULE, echoer_once,
- [Data, Length]}},
- {options, [{active, once},
- {mode, binary}|
- ServerOpts]}]),
+server_echos_active_once(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname) ->
+ Length = byte_size(Data),
+ Server =
+ ssl_test_lib:start_server(
+ [{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {?MODULE, echoer_active_once, [Length]}},
+ {options, [{active, once}, {mode, binary} | ServerOpts]}]),
Port = ssl_test_lib:inet_port(Server),
- Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
- {host, Hostname},
- {from, self()},
- {mfa,
- {?MODULE, sender_once,
- [Data, Length]}},
- {options, [{active, once},
- {mode, binary} |
- ClientOpts]}]),
+ Client =
+ ssl_test_lib:start_client(
+ [{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, sender_active_once, [Data]}},
+ {options, [{active, once}, {mode, binary} | ClientOpts]}]),
+ %%
ssl_test_lib:check_result(Server, ok, Client, ok),
-
+ %%
ssl_test_lib:close(Server),
ssl_test_lib:close(Client).
-server_echos_active(Data, Length, ClientOpts, ServerOpts,
- ClientNode, ServerNode, Hostname) ->
- Server = ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
- {from, self()},
- {mfa,
- {?MODULE, echoer_active,
- [Data, Length]}},
- {options,
- [{active, true},
- {mode, binary} | ServerOpts]}]),
+server_echos_active(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname) ->
+ Length = byte_size(Data),
+ Server =
+ ssl_test_lib:start_server(
+ [{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {?MODULE, echoer_active, [Length]}},
+ {options, [{active, true}, {mode, binary} | ServerOpts]}]),
Port = ssl_test_lib:inet_port(Server),
- Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
- {host, Hostname},
- {from, self()},
- {mfa,
- {?MODULE, sender_active,
- [Data,
- Length]}},
- {options,
- [{active, true}, {mode, binary}
- | ClientOpts]}]),
+ Client =
+ ssl_test_lib:start_client(
+ [{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, sender_active, [Data]}},
+ {options, [{active, true}, {mode, binary} | ClientOpts]}]),
+ %%
ssl_test_lib:check_result(Server, ok, Client, ok),
-
+ %%
ssl_test_lib:close(Server),
ssl_test_lib:close(Client).
-client_echos_passive(Data, Length, ClientOpts, ServerOpts,
- ClientNode, ServerNode, Hostname) ->
- Server = ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
- {from, self()},
- {mfa,
- {?MODULE, sender,
- [Data, Length]}},
- {options,
- [{active, false}, {mode, binary} |
- ServerOpts]}]),
+client_echos_passive(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname) ->
+ Length = byte_size(Data),
+ Server =
+ ssl_test_lib:start_server(
+ [{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {?MODULE, sender, [Data]}},
+ {options, [{active, false}, {mode, binary} | ServerOpts]}]),
Port = ssl_test_lib:inet_port(Server),
- Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
- {host, Hostname},
- {from, self()},
- {mfa,
- {?MODULE, echoer,
- [Data,
- Length]}},
- {options,
- [{active, false}, {mode, binary}
- | ClientOpts]}]),
+ Client =
+ ssl_test_lib:start_client(
+ [{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, echoer, [Length]}},
+ {options, [{active, false}, {mode, binary} | ClientOpts]}]),
+ %%
ssl_test_lib:check_result(Server, ok, Client, ok),
-
+ %%
ssl_test_lib:close(Server),
ssl_test_lib:close(Client).
-client_echos_active_once(Data, Length,
- ClientOpts, ServerOpts, ClientNode, ServerNode,
- Hostname) ->
- Server = ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
- {from, self()},
- {mfa,
- {?MODULE, sender_once,
- [Data, Length]}},
- {options, [{active, once},
- {mode, binary} |
- ServerOpts]}]),
+client_echos_active_once(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname) ->
+ Length = byte_size(Data),
+ Server =
+ ssl_test_lib:start_server(
+ [{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {?MODULE, sender_active_once, [Data]}},
+ {options, [{active, once}, {mode, binary} | ServerOpts]}]),
Port = ssl_test_lib:inet_port(Server),
- Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
- {host, Hostname},
- {from, self()},
- {mfa,
- {?MODULE, echoer_once,
- [Data,
- Length]}},
- {options,[{active, once},
- {mode, binary}
- | ClientOpts]}]),
+ Client =
+ ssl_test_lib:start_client(
+ [{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, echoer_active_once, [Length]}},
+ {options,[{active, once}, {mode, binary} | ClientOpts]}]),
+ %%
ssl_test_lib:check_result(Server, ok, Client, ok),
-
+ %%
ssl_test_lib:close(Server),
ssl_test_lib:close(Client).
-client_echos_active(Data, Length, ClientOpts, ServerOpts, ClientNode,
- ServerNode,
- Hostname) ->
- Server = ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
- {from, self()},
- {mfa,
- {?MODULE, sender_active,
- [Data, Length]}},
- {options, [{active, true},
- {mode, binary}
- | ServerOpts]}]),
+client_echos_active(
+ Data, ClientOpts, ServerOpts, ClientNode, ServerNode, Hostname) ->
+ Length = byte_size(Data),
+ Server =
+ ssl_test_lib:start_server(
+ [{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {?MODULE, sender_active, [Data]}},
+ {options, [{active, true}, {mode, binary} | ServerOpts]}]),
Port = ssl_test_lib:inet_port(Server),
- Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
- {host, Hostname},
- {from, self()},
- {mfa,
- {?MODULE, echoer_active,
- [Data,
- Length]}},
- {options, [{active, true},
- {mode, binary}
- | ClientOpts]}]),
+ Client =
+ ssl_test_lib:start_client(
+ [{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {?MODULE, echoer_active, [Length]}},
+ {options, [{active, true}, {mode, binary} | ClientOpts]}]),
+ %
ssl_test_lib:check_result(Server, ok, Client, ok),
-
+ %%
ssl_test_lib:close(Server),
ssl_test_lib:close(Client).
-send(_, _, _, 0,_) ->
- ok;
-send(Socket, Data, Size, Repeate,F) ->
- NewData = lists:duplicate(Size div 10, Data),
- ssl:send(Socket, NewData),
- F(),
- send(Socket, Data, Size, Repeate - 1,F).
+
+send(Socket, Data, Count, Verify) ->
+ send(Socket, Data, Count, <<>>, Verify).
+%%
+send(_Socket, _Data, 0, Acc, _Verify) ->
+ Acc;
+send(Socket, Data, Count, Acc, Verify) ->
+ ok = ssl:send(Socket, Data),
+ NewAcc = Verify(Acc),
+ send(Socket, Data, Count - 1, NewAcc, Verify).
+
-sender(Socket, Data, Size) ->
- ok = send(Socket, Data, Size, 100, fun() -> do_recv(Socket, Data, Size, <<>>, false) end),
+sender(Socket, Data) ->
ct:log("Sender recv: ~p~n", [ssl:getopts(Socket, [active])]),
+ <<>> =
+ send(
+ Socket, Data, 100,
+ fun(Acc) -> verify_recv(Socket, Data, Acc) end),
ok.
-sender_once(Socket, Data, Size) ->
- send(Socket, Data, Size, 100,
- fun() -> do_active_once(Socket, Data, Size, <<>>, false) end),
- ct:log("Sender active once: ~p~n",
- [ssl:getopts(Socket, [active])]),
+sender_active_once(Socket, Data) ->
+ ct:log("Sender active once: ~p~n", [ssl:getopts(Socket, [active])]),
+ <<>> =
+ send(
+ Socket, Data, 100,
+ fun(Acc) -> verify_active_once(Socket, Data, Acc) end),
ok.
-sender_active(Socket, Data, Size) ->
- F = fun() -> do_active(Socket, Data, Size, <<>>, false) end,
- send(Socket, Data, Size, 100, F),
+sender_active(Socket, Data) ->
ct:log("Sender active: ~p~n", [ssl:getopts(Socket, [active])]),
+ <<>> =
+ send(
+ Socket, Data, 100,
+ fun(Acc) -> verify_active(Socket, Data, Acc) end),
ok.
-echoer(Socket, Data, Size) ->
+
+echoer(Socket, Size) ->
ct:log("Echoer recv: ~p~n", [ssl:getopts(Socket, [active])]),
- echo(fun() -> do_recv(Socket, Data, Size, <<>>, true) end, 100).
+ echo_recv(Socket, Size * 100).
-echoer_once(Socket, Data, Size) ->
- ct:log("Echoer active once: ~p ~n",
- [ssl:getopts(Socket, [active])]),
- echo(fun() -> do_active_once(Socket, Data, Size, <<>>, true) end, 100).
+echoer_active_once(Socket, Size) ->
+ ct:log("Echoer active once: ~p~n", [ssl:getopts(Socket, [active])]),
+ echo_active_once(Socket, Size * 100).
-echoer_active(Socket, Data, Size) ->
+echoer_active(Socket, Size) ->
ct:log("Echoer active: ~p~n", [ssl:getopts(Socket, [active])]),
- echo(fun() -> do_active(Socket, Data, Size, <<>>, true) end, 100).
-
-echo(_Fun, 0) -> ok;
-echo(Fun, N) ->
- Fun(),
- echo(Fun, N-1).
+ echo_active(Socket, Size * 100).
+
+
+%% Receive Size bytes
+echo_recv(Socket, Size) ->
+ {ok, Data} = ssl:recv(Socket, 0),
+ ok = ssl:send(Socket, Data),
+ NewSize = Size - byte_size(Data),
+ if
+ 0 < NewSize ->
+ echo_recv(Socket, NewSize);
+ 0 == NewSize ->
+ ok
+ end.
+%% Verify that received data is SentData, return any superflous data
+verify_recv(Socket, SentData, Acc) ->
+ {ok, NewData} = ssl:recv(Socket, 0),
+ SentSize = byte_size(SentData),
+ NewAcc = <<Acc/binary, NewData/binary>>,
+ NewSize = byte_size(NewAcc),
+ if
+ SentSize < NewSize ->
+ {SentData,Rest} = split_binary(NewAcc, SentSize),
+ Rest;
+ NewSize < SentSize ->
+ verify_recv(Socket, SentData, NewAcc);
+ true ->
+ SentData = NewAcc,
+ <<>>
+ end.
-do_recv(_Socket, _Data, 0, _Acc, true) ->
- ok;
-do_recv(_Socket, Data, 0, Acc, false) ->
- Data = lists:sublist(binary_to_list(Acc), 10);
+%% Receive Size bytes
+echo_active_once(Socket, Size) ->
+ receive
+ {ssl, Socket, Data} ->
+ ok = ssl:send(Socket, Data),
+ NewSize = Size - byte_size(Data),
+ ssl:setopts(Socket, [{active, once}]),
+ if
+ 0 < NewSize ->
+ echo_active_once(Socket, NewSize);
+ 0 == NewSize ->
+ ok
+ end
+ end.
-do_recv(Socket, Data, Size, Acc, Echo) ->
- {ok, NewData} = ssl:recv(Socket, 0),
- NewSize = size(NewData),
- case Echo of
- true ->
- ssl:send(Socket, NewData),
- NewSize = size(NewData),
- do_recv(Socket, Data, Size - NewSize, [], Echo);
- false ->
- case size(Acc) < 10 of
- true ->
- do_recv(Socket, Data, Size - NewSize,
- <<Acc/binary, NewData/binary>>, Echo);
- false ->
- do_recv(Socket, Data, Size - NewSize, Acc, Echo)
- end
+%% Verify that received data is SentData, return any superflous data
+verify_active_once(Socket, SentData, Acc) ->
+ receive
+ {ssl, Socket, Data} ->
+ SentSize = byte_size(SentData),
+ NewAcc = <<Acc/binary, Data/binary>>,
+ NewSize = byte_size(NewAcc),
+ ssl:setopts(Socket, [{active, once}]),
+ if
+ SentSize < NewSize ->
+ {SentData,Rest} = split_binary(NewAcc, SentSize),
+ Rest;
+ NewSize < SentSize ->
+ verify_active_once(Socket, SentData, NewAcc);
+ true ->
+ SentData = NewAcc,
+ <<>>
+ end
end.
-do_active_once(_Socket, _Data, 0, _Acc, true) ->
- ok;
-do_active_once(_Socket, Data, 0, Acc, false) ->
- Data = lists:sublist(binary_to_list(Acc), 10);
-do_active_once(Socket, Data, Size, Acc, Echo) ->
- receive
- {ssl, Socket, NewData} ->
- NewSize = size(NewData),
- case Echo of
- true ->
- ssl:send(Socket, NewData),
- ssl:setopts(Socket, [{active, once}]),
- do_active_once(Socket, Data, Size - NewSize, [], Echo);
- false ->
- case size(Acc) < 10 of
- true ->
- ssl:setopts(Socket, [{active, once}]),
- do_active_once(Socket, Data, Size - NewSize,
- <<Acc/binary, NewData/binary>>,
- Echo);
- false ->
- ssl:setopts(Socket, [{active, once}]),
- do_active_once(Socket, Data,
- Size - NewSize, Acc, Echo)
- end
- end
+%% Receive Size bytes
+echo_active(Socket, Size) ->
+ receive
+ {ssl, Socket, Data} ->
+ ok = ssl:send(Socket, Data),
+ NewSize = Size - byte_size(Data),
+ if
+ 0 < NewSize ->
+ echo_active(Socket, NewSize);
+ 0 == NewSize ->
+ ok
+ end
end.
-
-do_active(_Socket, _Data, 0, _Acc, true) ->
- ok;
-do_active(_Socket, Data, 0, Acc, false) ->
- Data = lists:sublist(binary_to_list(Acc), 10);
-
-do_active(Socket, Data, Size, Acc, Echo) ->
- receive
- {ssl, Socket, NewData} ->
- NewSize = size(NewData),
- case Echo of
- true ->
- ssl:send(Socket, NewData),
- do_active(Socket, Data, Size - NewSize, [], Echo);
- false ->
- case size(Acc) < 10 of
- true ->
- do_active(Socket, Data, Size - NewSize,
- <<Acc/binary, NewData/binary>>,
- Echo);
- false ->
- do_active(Socket, Data,
- Size - NewSize, Acc, Echo)
- end
- end
+
+%% Verify that received data is SentData, return any superflous data
+verify_active(Socket, SentData, Acc) ->
+ receive
+ {ssl, Socket, Data} ->
+ SentSize = byte_size(SentData),
+ NewAcc = <<Acc/binary, Data/binary>>,
+ NewSize = byte_size(NewAcc),
+ if
+ SentSize < NewSize ->
+ {SentData,Rest} = split_binary(NewAcc, SentSize),
+ Rest;
+ NewSize < SentSize ->
+ verify_active(Socket, SentData, NewAcc);
+ true ->
+ SentData = NewAcc,
+ <<>>
+ end
end.
diff --git a/lib/ssl/test/ssl_test_lib.erl b/lib/ssl/test/ssl_test_lib.erl
index 39a5bcaad6..a8d62d6c4e 100644
--- a/lib/ssl/test/ssl_test_lib.erl
+++ b/lib/ssl/test/ssl_test_lib.erl
@@ -26,6 +26,7 @@
%% Note: This directive should only be used in test suites.
-compile(export_all).
+-compile(nowarn_export_all).
-record(sslsocket, { fd = nil, pid = nil}).
-define(SLEEP, 1000).
@@ -196,6 +197,55 @@ connect(ListenSocket, Node, _, _, Timeout, Opts, _) ->
rpc:call(Node, ssl, ssl_accept, [AcceptSocket, Opts, Timeout]),
AcceptSocket.
+
+start_server_transport_abuse_socket(Args) ->
+ Result = spawn_link(?MODULE, transport_accept_abuse, [Args]),
+ receive
+ {listen, up} ->
+ Result
+ end.
+
+start_server_transport_control(Args) ->
+ Result = spawn_link(?MODULE, transport_switch_control, [Args]),
+ receive
+ {listen, up} ->
+ Result
+ end.
+
+
+transport_accept_abuse(Opts) ->
+ Node = proplists:get_value(node, Opts),
+ Port = proplists:get_value(port, Opts),
+ Options = proplists:get_value(options, Opts),
+ Pid = proplists:get_value(from, Opts),
+ Transport = proplists:get_value(transport, Opts, ssl),
+ ct:log("~p:~p~nssl:listen(~p, ~p)~n", [?MODULE,?LINE, Port, Options]),
+ {ok, ListenSocket} = rpc:call(Node, Transport, listen, [Port, Options]),
+ Pid ! {listen, up},
+ send_selected_port(Pid, Port, ListenSocket),
+ {ok, AcceptSocket} = rpc:call(Node, ssl, transport_accept,
+ [ListenSocket]),
+ {error, _} = rpc:call(Node, ssl, connection_information, [AcceptSocket]),
+ _ = rpc:call(Node, ssl, handshake, [AcceptSocket, infinity]),
+ Pid ! {self(), ok}.
+
+
+transport_switch_control(Opts) ->
+ Node = proplists:get_value(node, Opts),
+ Port = proplists:get_value(port, Opts),
+ Options = proplists:get_value(options, Opts),
+ Pid = proplists:get_value(from, Opts),
+ Transport = proplists:get_value(transport, Opts, ssl),
+ ct:log("~p:~p~nssl:listen(~p, ~p)~n", [?MODULE,?LINE, Port, Options]),
+ {ok, ListenSocket} = rpc:call(Node, Transport, listen, [Port, Options]),
+ Pid ! {listen, up},
+ send_selected_port(Pid, Port, ListenSocket),
+ {ok, AcceptSocket} = rpc:call(Node, ssl, transport_accept,
+ [ListenSocket]),
+ ok = rpc:call(Node, ssl, controlling_process, [AcceptSocket, self()]),
+ Pid ! {self(), ok}.
+
+
remove_close_msg(0) ->
ok;
remove_close_msg(ReconnectTimes) ->
@@ -693,20 +743,12 @@ make_mix_cert(Config) ->
Ext = x509_test:extensions([{key_usage, [digitalSignature]}]),
Digest = {digest, appropriate_sha(crypto:supports())},
CurveOid = hd(tls_v1:ecc_curves(0)),
- ClientFileBase = filename:join([proplists:get_value(priv_dir, Config), "mix"]),
- ServerFileBase = filename:join([proplists:get_value(priv_dir, Config), "mix"]),
- ClientChain = [[Digest, {key, {namedCurve, CurveOid}}],
- [Digest, {key, hardcode_rsa_key(1)}],
- [Digest, {key, {namedCurve, CurveOid}}, {extensions, Ext}]
- ],
- ServerChain = [[Digest, {key, {namedCurve, CurveOid}}],
- [Digest, {key, hardcode_rsa_key(2)}],
- [Digest, {key, {namedCurve, CurveOid}},{extensions, Ext}]
- ],
+ Mix = proplists:get_value(mix, Config, peer_ecc),
ClientChainType =ServerChainType = mix,
+ {ClientChain, ServerChain} = mix(Mix, Digest, CurveOid, Ext),
CertChainConf = gen_conf(ClientChainType, ServerChainType, ClientChain, ServerChain),
- ClientFileBase = filename:join([proplists:get_value(priv_dir, Config), atom_to_list(ClientChainType)]),
- ServerFileBase = filename:join([proplists:get_value(priv_dir, Config), atom_to_list(ServerChainType)]),
+ ClientFileBase = filename:join([proplists:get_value(priv_dir, Config), "mix" ++ atom_to_list(Mix)]),
+ ServerFileBase = filename:join([proplists:get_value(priv_dir, Config), "mix" ++ atom_to_list(Mix)]),
GenCertData = public_key:pkix_test_data(CertChainConf),
[{server_config, ServerConf},
{client_config, ClientConf}] =
@@ -715,6 +757,28 @@ make_mix_cert(Config) ->
[{reuseaddr, true}, {verify, verify_peer} | ServerConf]
}.
+mix(peer_ecc, Digest, CurveOid, Ext) ->
+ ClientChain = [[Digest, {key, {namedCurve, CurveOid}}],
+ [Digest, {key, hardcode_rsa_key(1)}],
+ [Digest, {key, {namedCurve, CurveOid}}, {extensions, Ext}]
+ ],
+ ServerChain = [[Digest, {key, {namedCurve, CurveOid}}],
+ [Digest, {key, hardcode_rsa_key(2)}],
+ [Digest, {key, {namedCurve, CurveOid}},{extensions, Ext}]
+ ],
+ {ClientChain, ServerChain};
+
+mix(peer_rsa, Digest, CurveOid, Ext) ->
+ ClientChain = [[Digest, {key, {namedCurve, CurveOid}}],
+ [Digest, {key, {namedCurve, CurveOid}}],
+ [Digest, {key, hardcode_rsa_key(1)}, {extensions, Ext}]
+ ],
+ ServerChain = [[Digest, {key, {namedCurve, CurveOid}}],
+ [Digest, {key, {namedCurve, CurveOid}}],
+ [Digest, {key, hardcode_rsa_key(2)},{extensions, Ext}]
+ ],
+ {ClientChain, ServerChain}.
+
make_ecdsa_cert(Config) ->
CryptoSupport = crypto:supports(),
case proplists:get_bool(ecdsa, proplists:get_value(public_keys, CryptoSupport)) of
@@ -1643,10 +1707,10 @@ openssl_dsa_support() ->
true;
"LibreSSL" ++ _ ->
false;
- "OpenSSL 1.1" ++ Rest ->
+ "OpenSSL 1.1" ++ _Rest ->
false;
"OpenSSL 1.0.1" ++ Rest ->
- hd(Rest) >= s;
+ hd(Rest) >= $s;
_ ->
true
end.
@@ -1683,8 +1747,6 @@ openssl_sane_client_cert() ->
false;
"LibreSSL 2.0" ++ _ ->
false;
- "LibreSSL 2.0" ++ _ ->
- false;
"OpenSSL 1.0.1s-freebsd" ->
false;
"OpenSSL 1.0.0" ++ _ ->
diff --git a/lib/stdlib/doc/src/assert_hrl.xml b/lib/stdlib/doc/src/assert_hrl.xml
index 4dc7299609..fb27954235 100644
--- a/lib/stdlib/doc/src/assert_hrl.xml
+++ b/lib/stdlib/doc/src/assert_hrl.xml
@@ -46,7 +46,7 @@
is the macro name, for example, <c>assertEqual</c>. <c>Info</c> is a list
of tagged values, such as <c>[{module, M}, {line, L}, ...]</c>, which
gives more information about the location and cause of the exception. All
- entries in the <c>Info</c> list are optional; do not rely programatically
+ entries in the <c>Info</c> list are optional; do not rely programmatically
on any of them being present.</p>
<p>Each assert macro has a corresponding version with an extra argument,
diff --git a/lib/stdlib/doc/src/beam_lib.xml b/lib/stdlib/doc/src/beam_lib.xml
index 26d0724aaf..213170df7f 100644
--- a/lib/stdlib/doc/src/beam_lib.xml
+++ b/lib/stdlib/doc/src/beam_lib.xml
@@ -180,8 +180,8 @@ io:fwrite("~s~n", [erl_prettypr:format(erl_syntax:form_list(AC))]).</code>
<name name="beam"/>
<desc>
<p>Each of the functions described below accept either the
- module name, the filename, or a binary containing the BEAM
- module.</p>
+ filename (as a string) or a binary containing the BEAM
+ module.</p>
</desc>
</datatype>
<datatype>
diff --git a/lib/stdlib/doc/src/calendar.xml b/lib/stdlib/doc/src/calendar.xml
index 6b4fa7f98a..5aee635c38 100644
--- a/lib/stdlib/doc/src/calendar.xml
+++ b/lib/stdlib/doc/src/calendar.xml
@@ -403,7 +403,11 @@
default is <c>second</c>. If some other unit is given
(<c>millisecond</c>, <c>microsecond</c>, or
<c>nanosecond</c>), the formatted string includes a
- fraction of a second.</p>
+ fraction of a second. The number of fractional second
+ digits is three, six, or nine depending on what time unit
+ is chosen. Notice that trailing zeros are not removed from
+ the fraction.
+ </p>
</item>
</taglist>
<pre>
diff --git a/lib/stdlib/doc/src/epp.xml b/lib/stdlib/doc/src/epp.xml
index 1dc0161398..d803d259aa 100644
--- a/lib/stdlib/doc/src/epp.xml
+++ b/lib/stdlib/doc/src/epp.xml
@@ -124,6 +124,10 @@
<fsummary>Open a file for preprocessing.</fsummary>
<desc>
<p>Opens a file for preprocessing.</p>
+ <p>If you want to change the file name of the implicit -file()
+ attributes inserted during preprocessing, you can do with
+ <c>{source_name, <anno>SourceName</anno>}</c>. If unset it will
+ default to the name of the opened file.</p>
<p>If <c>extra</c> is specified in
<c><anno>Options</anno></c>, the return value is
<c>{ok, <anno>Epp</anno>, <anno>Extra</anno>}</c> instead
@@ -169,6 +173,10 @@
<p>Preprocesses and parses an Erlang source file.
Notice that tuple <c>{eof, <anno>Line</anno>}</c> returned at the
end of the file is included as a "form".</p>
+ <p>If you want to change the file name of the implicit -file()
+ attributes inserted during preprocessing, you can do with
+ <c>{source_name, <anno>SourceName</anno>}</c>. If unset it will
+ default to the name of the opened file.</p>
<p>If <c>extra</c> is specified in
<c><anno>Options</anno></c>, the return value is
<c>{ok, [<anno>Form</anno>], <anno>Extra</anno>}</c> instead
diff --git a/lib/stdlib/doc/src/gen_event.xml b/lib/stdlib/doc/src/gen_event.xml
index f793ec7fdf..fc34e51216 100644
--- a/lib/stdlib/doc/src/gen_event.xml
+++ b/lib/stdlib/doc/src/gen_event.xml
@@ -775,7 +775,7 @@ gen_event:stop -----> Module:terminate/2
<p>This callback is optional, so callback modules need not
export it. The <c>gen_event</c> module provides a default
implementation of this function that logs about the unexpected
- <c>Info</c> message, drops it and returns <c>{noreply, State}</c>.</p>
+ <c>Info</c> message, drops it and returns <c>{ok, State}</c>.</p>
</note>
<p>This function is called for each installed event handler when
an event manager receives any other message than an event or
diff --git a/lib/stdlib/doc/src/lists.xml b/lib/stdlib/doc/src/lists.xml
index c3d5d7e07a..e4215a5336 100644
--- a/lib/stdlib/doc/src/lists.xml
+++ b/lib/stdlib/doc/src/lists.xml
@@ -850,14 +850,6 @@ splitwith(Pred, List) ->
> <input>lists:subtract("123212", "212").</input>
"312".</pre>
<p><c>lists:subtract(A, B)</c> is equivalent to <c>A -- B</c>.</p>
- <warning>
- <p>The complexity of <c>lists:subtract(A, B)</c> is proportional to
- <c>length(A)*length(B)</c>, meaning that it is very slow if both
- <c>A</c> and <c>B</c> are long lists. (If both lists are long, it
- is a much better choice to use ordered lists and
- <seealso marker="ordsets#subtract/2">
- <c>ordsets:subtract/2</c></seealso>.</p>
- </warning>
</desc>
</func>
diff --git a/lib/stdlib/doc/src/notes.xml b/lib/stdlib/doc/src/notes.xml
index d800885b16..039f087708 100644
--- a/lib/stdlib/doc/src/notes.xml
+++ b/lib/stdlib/doc/src/notes.xml
@@ -504,6 +504,21 @@
</section>
+<section><title>STDLIB 3.4.5.1</title>
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>List subtraction (The <c>--</c> operator) will now
+ yield properly on large inputs.</p>
+ <p>
+ Own Id: OTP-15371</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>STDLIB 3.4.5</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -1658,6 +1673,21 @@
</section>
+<section><title>STDLIB 2.8.0.1</title>
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>List subtraction (The <c>--</c> operator) will now
+ yield properly on large inputs.</p>
+ <p>
+ Own Id: OTP-15371</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>STDLIB 2.8</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -7827,4 +7857,3 @@
</section>
</section>
</chapter>
-
diff --git a/lib/stdlib/src/beam_lib.erl b/lib/stdlib/src/beam_lib.erl
index 01181b1097..3386cfcbe6 100644
--- a/lib/stdlib/src/beam_lib.erl
+++ b/lib/stdlib/src/beam_lib.erl
@@ -53,7 +53,7 @@
%%-------------------------------------------------------------------------
--type beam() :: module() | file:filename() | binary().
+-type beam() :: file:filename() | binary().
-type debug_info() :: {DbgiVersion :: atom(), Backend :: module(), Data :: term()} | 'no_debug_info'.
-type forms() :: [erl_parse:abstract_form() | erl_parse:form_info()].
diff --git a/lib/stdlib/src/calendar.erl b/lib/stdlib/src/calendar.erl
index 9a600c1972..bb5d450cd6 100644
--- a/lib/stdlib/src/calendar.erl
+++ b/lib/stdlib/src/calendar.erl
@@ -693,14 +693,11 @@ local_offset(SystemTime, Unit) ->
UniversalSecs = datetime_to_gregorian_seconds(UniversalTime),
LocalSecs - UniversalSecs.
+fraction_str(1, _Time) ->
+ "";
fraction_str(Factor, Time) ->
- case Time rem Factor of
- 0 ->
- "";
- Fraction ->
- FS = io_lib:fwrite(".~*..0B", [log10(Factor), abs(Fraction)]),
- string:trim(FS, trailing, "0")
- end.
+ Fraction = Time rem Factor,
+ io_lib:fwrite(".~*..0B", [log10(Factor), abs(Fraction)]).
fraction(second, _) ->
0;
diff --git a/lib/stdlib/src/epp.erl b/lib/stdlib/src/epp.erl
index cc34d4bdd3..181a524db6 100644
--- a/lib/stdlib/src/epp.erl
+++ b/lib/stdlib/src/epp.erl
@@ -117,6 +117,7 @@ open(Name, File, StartLocation, Path, Pdm) ->
{'ok', Epp} | {'ok', Epp, Extra} | {'error', ErrorDescriptor} when
Options :: [{'default_encoding', DefEncoding :: source_encoding()} |
{'includes', IncludePath :: [DirectoryName :: file:name()]} |
+ {'source_name', SourceName :: file:name()} |
{'macros', PredefMacros :: macros()} |
{'name',FileName :: file:name()} |
'extra'],
@@ -248,6 +249,7 @@ parse_file(Ifile, Path, Predefs) ->
{'ok', [Form]} | {'ok', [Form], Extra} | {error, OpenError} when
FileName :: file:name(),
Options :: [{'includes', IncludePath :: [DirectoryName :: file:name()]} |
+ {'source_name', SourceName :: file:name()} |
{'macros', PredefMacros :: macros()} |
{'default_encoding', DefEncoding :: source_encoding()} |
'extra'],
@@ -540,9 +542,10 @@ server(Pid, Name, Options, #epp{pre_opened=PreOpened}=St) ->
init_server(Pid, Name, Options, St)
end.
-init_server(Pid, Name, Options, St0) ->
+init_server(Pid, FileName, Options, St0) ->
+ SourceName = proplists:get_value(source_name, Options, FileName),
Pdm = proplists:get_value(macros, Options, []),
- Ms0 = predef_macros(Name),
+ Ms0 = predef_macros(FileName),
case user_predef(Pdm, Ms0) of
{ok,Ms1} ->
#epp{file = File, location = AtLocation} = St0,
@@ -552,14 +555,14 @@ init_server(Pid, Name, Options, St0) ->
epp_reply(Pid, {ok,self(),Encoding}),
%% ensure directory of current source file is
%% first in path
- Path = [filename:dirname(Name) |
+ Path = [filename:dirname(FileName) |
proplists:get_value(includes, Options, [])],
- St = St0#epp{delta=0, name=Name, name2=Name,
+ St = St0#epp{delta=0, name=SourceName, name2=SourceName,
path=Path, macs=Ms1,
default_encoding=DefEncoding},
From = wait_request(St),
Anno = erl_anno:new(AtLocation),
- enter_file_reply(From, file_name(Name), Anno,
+ enter_file_reply(From, file_name(SourceName), Anno,
AtLocation, code),
wait_req_scan(St);
{error,E} ->
diff --git a/lib/stdlib/src/erl_lint.erl b/lib/stdlib/src/erl_lint.erl
index e9ac2fcdff..e0cd68617b 100644
--- a/lib/stdlib/src/erl_lint.erl
+++ b/lib/stdlib/src/erl_lint.erl
@@ -2262,8 +2262,7 @@ expr({'fun',Line,Body}, Vt, St) ->
{[],St};
{function,M,F,A} ->
%% New in R15.
- {Bvt, St1} = expr_list([M,F,A], Vt, St),
- {vtupdate(Bvt, Vt),St1}
+ expr_list([M,F,A], Vt, St)
end;
expr({named_fun,_,'_',Cs}, Vt, St) ->
fun_clauses(Cs, Vt, St);
diff --git a/lib/stdlib/src/ms_transform.erl b/lib/stdlib/src/ms_transform.erl
index 3845e35e9b..6d243e1bec 100644
--- a/lib/stdlib/src/ms_transform.erl
+++ b/lib/stdlib/src/ms_transform.erl
@@ -946,6 +946,7 @@ real_guard_function(node,0) -> true;
real_guard_function(node,1) -> true;
real_guard_function(round,1) -> true;
real_guard_function(size,1) -> true;
+real_guard_function(bit_size,1) -> true;
real_guard_function(map_size,1) -> true;
real_guard_function(map_get,2) -> true;
real_guard_function(tl,1) -> true;
diff --git a/lib/stdlib/src/stdlib.appup.src b/lib/stdlib/src/stdlib.appup.src
index 8d1cc09a8b..8c0b186288 100644
--- a/lib/stdlib/src/stdlib.appup.src
+++ b/lib/stdlib/src/stdlib.appup.src
@@ -19,8 +19,10 @@
{"%VSN%",
%% Up from - max one major revision back
[{<<"3\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.*
- {<<"3\\.5(\\.[0-9]+)*">>,[restart_new_emulator]}],% OTP-21.*
+ {<<"3\\.5(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-21.0
+ {<<"3\\.6(\\.[0-9]+)*">>,[restart_new_emulator]}],% OTP-21.1
%% Down to - max one major revision back
[{<<"3\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.*
- {<<"3\\.5(\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-20.*
+ {<<"3\\.5(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-21.0
+ {<<"3\\.6(\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-21.1
}.
diff --git a/lib/stdlib/test/calendar_SUITE.erl b/lib/stdlib/test/calendar_SUITE.erl
index 55118e251c..df62c0921d 100644
--- a/lib/stdlib/test/calendar_SUITE.erl
+++ b/lib/stdlib/test/calendar_SUITE.erl
@@ -183,14 +183,15 @@ rfc3339(Config) when is_list(Config) ->
D = [{time_designator, $\s}],
Z = [{offset, "Z"}],
- "1985-04-12T23:20:50.52Z" = test_parse("1985-04-12T23:20:50.52Z", Ms),
- "1985-04-12T23:20:50.52Z" = test_parse("1985-04-12t23:20:50.52z", Ms),
- "1985-04-12T21:20:50.52Z" =
+ "1985-04-12T23:20:50.520Z" = test_parse("1985-04-12T23:20:50.52Z", Ms),
+ "1985-04-12T23:20:50.520Z" = test_parse("1985-04-12t23:20:50.52z", Ms),
+ "1985-04-12T21:20:50.520Z" =
test_parse("1985-04-12T23:20:50.52+02:00", Ms),
"1985-04-12T23:20:50Z" = test_parse("1985-04-12T23:20:50.52Z", S),
- "1985-04-12T23:20:50.52Z" = test_parse("1985-04-12T23:20:50.52Z", Ms),
- "1985-04-12T23:20:50.52Z" = test_parse("1985-04-12t23:20:50.52z", Mys),
- "1985-04-12 21:20:50.52Z" =
+ "1985-04-12T23:20:50.520Z" = test_parse("1985-04-12T23:20:50.52Z", Ms),
+ "1985-04-12T23:20:50.520000Z" =
+ test_parse("1985-04-12t23:20:50.52z", Mys),
+ "1985-04-12 21:20:50.520000000Z" =
test_parse("1985-04-12 23:20:50.52+02:00", Ns++D),
"1985-04-12T23:20:50Z" = test_parse("1985-04-12T23:20:50.52Z"),
"1996-12-20T00:39:57Z" = test_parse("1996-12-19T16:39:57-08:00"),
@@ -221,17 +222,20 @@ rfc3339(Config) when is_list(Config) ->
"1970-01-02T00:00:00Z" = test_parse("1970-01-01T23:59:60Z"),
"1970-01-02T00:00:00Z" = test_parse("1970-01-01T23:59:60.5Z"),
"1970-01-02T00:00:00Z" = test_parse("1970-01-01T23:59:60.55Z"),
- "1970-01-02T00:00:00.55Z" = test_parse("1970-01-01T23:59:60.55Z", Ms),
- "1970-01-02T00:00:00.55Z" = test_parse("1970-01-01T23:59:60.55Z", Mys),
- "1970-01-02T00:00:00.55Z" = test_parse("1970-01-01T23:59:60.55Z", Ns),
+ "1970-01-02T00:00:00.550Z" = test_parse("1970-01-01T23:59:60.55Z", Ms),
+ "1970-01-02T00:00:00.550000Z" =
+ test_parse("1970-01-01T23:59:60.55Z", Mys),
+ "1970-01-02T00:00:00.550000000Z" =
+ test_parse("1970-01-01T23:59:60.55Z", Ns),
"1970-01-02T00:00:00.999999Z" =
test_parse("1970-01-01T23:59:60.999999Z", Mys),
- "1970-01-02T00:00:01Z" =
+ "1970-01-02T00:00:01.000Z" =
test_parse("1970-01-01T23:59:60.999999Z", Ms),
"1970-01-01T00:00:00Z" = test_parse("1970-01-01T00:00:00+00:00"),
"1970-01-01T00:00:00Z" = test_parse("1970-01-01T00:00:00-00:00"),
"1969-12-31T00:01:00Z" = test_parse("1970-01-01T00:00:00+23:59"),
- "1918-11-11T09:00:00Z" = test_parse("1918-11-11T11:00:00+02:00", Mys),
+ "1918-11-11T09:00:00.000000Z" =
+ test_parse("1918-11-11T11:00:00+02:00", Mys),
"1970-01-01T00:00:00.000001Z" =
test_parse("1970-01-01T00:00:00.000001Z", Mys),
@@ -242,26 +246,26 @@ rfc3339(Config) when is_list(Config) ->
test_time(erlang:system_time(millisecond), Ms),
test_time(erlang:system_time(microsecond), Mys++[{offset, "-02:20"}]),
- T = erlang:system_time(second),
- TS = do_format(T, []),
- TS = do_format(T * 1000, Ms),
- TS = do_format(T * 1000 * 1000, Mys),
- TS = do_format(T * 1000 * 1000 * 1000, Ns),
-
946720800 = TO = do_parse("2000-01-01 10:00:00Z", []),
Str = "2000-01-01T10:02:00+00:02",
Str = do_format(TO, [{offset, 120}]),
- Str = do_format(TO * 1000, [{offset, 120 * 1000}]++Ms),
- Str = do_format(TO * 1000 * 1000, [{offset, 120 * 1000 * 1000}]++Mys),
- Str = do_format(TO * 1000 * 1000 * 1000,
- [{offset, 120 * 1000 * 1000 * 1000}]++Ns),
+ "2000-01-01T10:02:00.000+00:02" =
+ do_format(TO * 1000, [{offset, 120 * 1000}]++Ms),
+ "2000-01-01T10:02:00.000000+00:02" =
+ do_format(TO * 1000 * 1000, [{offset, 120 * 1000 * 1000}]++Mys),
+ "2000-01-01T10:02:00.000000000+00:02" =
+ do_format(TO * 1000 * 1000 * 1000,
+ [{offset, 120 * 1000 * 1000 * 1000}]++Ns),
NStr = "2000-01-01T09:58:00-00:02",
NStr = do_format(TO, [{offset, -120}]),
- NStr = do_format(TO * 1000, [{offset, -120 * 1000}]++Ms),
- NStr = do_format(TO * 1000 * 1000, [{offset, -120 * 1000 * 1000}]++Mys),
- NStr = do_format(TO * 1000 * 1000 * 1000,
- [{offset, -120 * 1000 * 1000 * 1000}]++Ns),
+ "2000-01-01T09:58:00.000-00:02" =
+ do_format(TO * 1000, [{offset, -120 * 1000}]++Ms),
+ "2000-01-01T09:58:00.000000-00:02" =
+ do_format(TO * 1000 * 1000, [{offset, -120 * 1000 * 1000}]++Mys),
+ "2000-01-01T09:58:00.000000000-00:02" =
+ do_format(TO * 1000 * 1000 * 1000,
+ [{offset, -120 * 1000 * 1000 * 1000}]++Ns),
543210000 = do_parse("1970-01-01T00:00:00.54321Z", Ns),
54321000 = do_parse("1970-01-01T00:00:00.054321Z", Ns),
@@ -278,18 +282,18 @@ rfc3339(Config) when is_list(Config) ->
-1613833200000000 = do_parse("1918-11-11T11:00:00+02:00", Mys),
-1613833200000000 = do_parse("1918-11-11T09:00:00Z", Mys),
- "1970-01-01T00:00:00Z" = do_format_z(0, Mys),
+ "1970-01-01T00:00:00.000000Z" = do_format_z(0, Mys),
"1970-01-01T00:00:01Z" = do_format_z(1, S),
"1970-01-01T00:00:00.001Z" = do_format_z(1, Ms),
"1970-01-01T00:00:00.000001Z" = do_format_z(1, Mys),
"1970-01-01T00:00:00.000000001Z" = do_format_z(1, Ns),
- "1970-01-01T00:00:01Z" = do_format_z(1000000, Mys),
- "1970-01-01T00:00:00.54321Z" = do_format_z(543210, Mys),
+ "1970-01-01T00:00:01.000000Z" = do_format_z(1000000, Mys),
+ "1970-01-01T00:00:00.543210Z" = do_format_z(543210, Mys),
"1970-01-01T00:00:00.543Z" = do_format_z(543, Ms),
- "1970-01-01T00:00:00.54321Z" = do_format_z(543210000, Ns),
- "1970-01-01T00:00:06.54321Z" = do_format_z(6543210, Mys),
- "1979-06-21T12:12:12Z" = do_format_z(298815132000000, Mys),
- "1918-11-11T13:00:00Z" = do_format_z(-1613818800000000, Mys),
+ "1970-01-01T00:00:00.543210000Z" = do_format_z(543210000, Ns),
+ "1970-01-01T00:00:06.543210Z" = do_format_z(6543210, Mys),
+ "1979-06-21T12:12:12.000000Z" = do_format_z(298815132000000, Mys),
+ "1918-11-11T13:00:00.000000Z" = do_format_z(-1613818800000000, Mys),
ok.
%%
diff --git a/lib/stdlib/test/epp_SUITE.erl b/lib/stdlib/test/epp_SUITE.erl
index a3e294ffea..0ac99ad03a 100644
--- a/lib/stdlib/test/epp_SUITE.erl
+++ b/lib/stdlib/test/epp_SUITE.erl
@@ -29,7 +29,7 @@
otp_8562/1, otp_8665/1, otp_8911/1, otp_10302/1, otp_10820/1,
otp_11728/1, encoding/1, extends/1, function_macro/1,
test_error/1, test_warning/1, otp_14285/1,
- test_if/1]).
+ test_if/1,source_name/1]).
-export([epp_parse_erl_form/2]).
@@ -70,7 +70,7 @@ all() ->
overload_mac, otp_8388, otp_8470, otp_8562,
otp_8665, otp_8911, otp_10302, otp_10820, otp_11728,
encoding, extends, function_macro, test_error, test_warning,
- otp_14285, test_if].
+ otp_14285, test_if, source_name].
groups() ->
[{upcase_mac, [], [upcase_mac_1, upcase_mac_2]},
@@ -1702,6 +1702,18 @@ function_macro(Config) ->
ok.
+source_name(Config) when is_list(Config) ->
+ DataDir = proplists:get_value(data_dir, Config),
+ File = filename:join(DataDir, "source_name.erl"),
+
+ source_name_1(File, "/test/gurka.erl"),
+ source_name_1(File, "gaffel.erl"),
+
+ ok.
+
+source_name_1(File, Expected) ->
+ Res = epp:parse_file(File, [{source_name, Expected}]),
+ {ok, [{attribute,_,file,{Expected,_}} | _Forms]} = Res.
check(Config, Tests) ->
eval_tests(Config, fun check_test/2, Tests).
diff --git a/lib/stdlib/test/epp_SUITE_data/source_name.erl b/lib/stdlib/test/epp_SUITE_data/source_name.erl
new file mode 100644
index 0000000000..71ad2dddb9
--- /dev/null
+++ b/lib/stdlib/test/epp_SUITE_data/source_name.erl
@@ -0,0 +1,27 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+-module(source_name).
+-export([ok/0]).
+
+%% Changing source name should not affect headers
+-include("bar.hrl").
+
+ok() -> ok.
diff --git a/lib/stdlib/test/erl_lint_SUITE.erl b/lib/stdlib/test/erl_lint_SUITE.erl
index f9ab83a120..d18c7c255c 100644
--- a/lib/stdlib/test/erl_lint_SUITE.erl
+++ b/lib/stdlib/test/erl_lint_SUITE.erl
@@ -67,7 +67,8 @@
record_errors/1, otp_11879_cont/1,
non_latin1_module/1, otp_14323/1,
stacktrace_syntax/1,
- otp_14285/1, otp_14378/1]).
+ otp_14285/1, otp_14378/1,
+ external_funs/1]).
suite() ->
[{ct_hooks,[ts_install_cth]},
@@ -88,7 +89,7 @@ all() ->
maps, maps_type, maps_parallel_match,
otp_11851, otp_11879, otp_13230,
record_errors, otp_11879_cont, non_latin1_module, otp_14323,
- stacktrace_syntax, otp_14285, otp_14378].
+ stacktrace_syntax, otp_14285, otp_14378, external_funs].
groups() ->
[{unused_vars_warn, [],
@@ -4134,6 +4135,21 @@ otp_14285(Config) ->
run(Config, Ts),
ok.
+external_funs(Config) when is_list(Config) ->
+ Ts = [{external_funs_1,
+ %% ERL-762: Unused variable warning not being emitted.
+ <<"f() ->
+ BugVar = process_info(self()),
+ if true -> fun m:f/1 end.
+ f(M, F) ->
+ BugVar = process_info(self()),
+ if true -> fun M:F/1 end.">>,
+ [],
+ {warnings,[{2,erl_lint,{unused_var,'BugVar'}},
+ {5,erl_lint,{unused_var,'BugVar'}}]}}],
+ run(Config, Ts),
+ ok.
+
format_error(E) ->
lists:flatten(erl_lint:format_error(E)).
diff --git a/lib/stdlib/test/ets_SUITE.erl b/lib/stdlib/test/ets_SUITE.erl
index d8912e548c..aa2e352c29 100644
--- a/lib/stdlib/test/ets_SUITE.erl
+++ b/lib/stdlib/test/ets_SUITE.erl
@@ -41,7 +41,7 @@
-export([t_delete_object/1, t_init_table/1, t_whitebox/1,
select_bound_chunk/1,
t_delete_all_objects/1, t_insert_list/1, t_test_ms/1,
- t_select_delete/1,t_select_replace/1,t_ets_dets/1]).
+ t_select_delete/1,t_select_replace/1,t_select_replace_next_bug/1,t_ets_dets/1]).
-export([ordered/1, ordered_match/1, interface_equality/1,
fixtable_next/1, fixtable_insert/1, rename/1, rename_unnamed/1, evil_rename/1,
@@ -125,6 +125,7 @@ all() ->
select_bound_chunk,
t_init_table, t_whitebox, t_delete_all_objects,
t_insert_list, t_test_ms, t_select_delete, t_select_replace,
+ t_select_replace_next_bug,
t_ets_dets, memory, t_select_reverse, t_bucket_disappears,
t_named_select,
select_fail, t_insert_new, t_repair_continuation,
@@ -1466,6 +1467,25 @@ t_select_replace(Config) when is_list(Config) ->
verify_etsmem(EtsMem).
+%% OTP-15346: Bug caused select_replace of bound key to corrupt static stack
+%% used by ets:next and ets:prev.
+t_select_replace_next_bug(Config) when is_list(Config) ->
+ T = ets:new(k, [ordered_set]),
+ [ets:insert(T, {I, value}) || I <- lists:seq(1,10)],
+ 1 = ets:first(T),
+
+ %% Make sure select_replace does not leave pointer
+ %% to deallocated {2,value} in static stack.
+ MS = [{{2,value}, [], [{{2,"new_value"}}]}],
+ 1 = ets:select_replace(T, MS),
+
+ %% This would crash or give wrong result at least on DEBUG emulator
+ %% where deallocated memory is overwritten.
+ 2 = ets:next(T, 1),
+
+ ets:delete(T).
+
+
%% Test that partly bound keys gives faster matches.
partly_bound(Config) when is_list(Config) ->
case os:type() of
diff --git a/lib/stdlib/test/gen_fsm_SUITE.erl b/lib/stdlib/test/gen_fsm_SUITE.erl
index 41ee3246f5..a8264e5a84 100644
--- a/lib/stdlib/test/gen_fsm_SUITE.erl
+++ b/lib/stdlib/test/gen_fsm_SUITE.erl
@@ -124,8 +124,10 @@ start2(Config) when is_list(Config) ->
{ok, Pid0} = gen_fsm:start(gen_fsm_SUITE, [], []),
ok = do_func_test(Pid0),
ok = do_sync_func_test(Pid0),
+ MRef = monitor(process,Pid0),
shutdown_stopped =
gen_fsm:sync_send_all_state_event(Pid0, stop_shutdown),
+ receive {'DOWN',MRef,_,_,shutdown} -> ok end,
{'EXIT', {noproc,_}} =
(catch gen_fsm:sync_send_event(Pid0, hej)),
diff --git a/lib/stdlib/test/lists_SUITE.erl b/lib/stdlib/test/lists_SUITE.erl
index 837ab4e97e..984b51e7ae 100644
--- a/lib/stdlib/test/lists_SUITE.erl
+++ b/lib/stdlib/test/lists_SUITE.erl
@@ -2597,6 +2597,20 @@ subtract(Config) when is_list(Config) ->
{'EXIT',_} = (catch sub([a|b], [])),
{'EXIT',_} = (catch sub([a|b], [a])),
+ %% Trapping, both crashing and otherwise.
+ [sub_trapping(N) || N <- lists:seq(0, 18)],
+
+ %% The current implementation chooses which algorithm to use based on
+ %% certain thresholds, and we need proper coverage for all corner cases.
+ [sub_thresholds(N) || N <- lists:seq(0, 32)],
+
+ %% Trapping, both crashing and otherwise.
+ [sub_trapping(N) || N <- lists:seq(0, 18)],
+
+ %% The current implementation chooses which algorithm to use based on
+ %% certain thresholds, and we need proper coverage for all corner cases.
+ [sub_thresholds(N) || N <- lists:seq(0, 32)],
+
ok.
sub_non_matching(A, B) ->
@@ -2606,6 +2620,41 @@ sub(A, B) ->
Res = A -- B,
Res = lists:subtract(A, B).
+sub_trapping(N) ->
+ List = lists:duplicate(N + (1 bsl N), gurka),
+ ImproperList = List ++ crash,
+
+ {'EXIT',_} = (catch sub_trapping_1(ImproperList, [])),
+ {'EXIT',_} = (catch sub_trapping_1(List, ImproperList)),
+
+ List = List -- lists:duplicate(N + (1 bsl N), gaffel),
+ ok = sub_trapping_1(List, []).
+
+sub_trapping_1([], _) -> ok;
+sub_trapping_1(L, R) -> sub_trapping_1(L -- R, [gurka | R]).
+
+sub_thresholds(N) ->
+ %% This needs to be long enough to cause trapping.
+ OtherLen = 1 bsl 18,
+ Other = lists:seq(0, OtherLen - 1),
+
+ Disjoint = lists:seq(-N, -1),
+ Subset = lists:seq(1, N),
+
+ %% LHS is disjoint from RHS, so all elements must be retained.
+ Disjoint = Disjoint -- Other,
+
+ %% LHS is covered by RHS, so all elements must be removed.
+ [] = Subset -- Other,
+
+ %% RHS is disjoint from LHS, so all elements must be retained.
+ Other = Other -- Disjoint,
+
+ %% RHS is covered by LHS, so N elements must be removed.
+ N = OtherLen - length(Other -- Subset),
+
+ ok.
+
%% Test lists:droplast/1
droplast(Config) when is_list(Config) ->
[] = lists:droplast([x]),
diff --git a/lib/stdlib/test/rand_SUITE.erl b/lib/stdlib/test/rand_SUITE.erl
index d753d929f5..b76c9f5341 100644
--- a/lib/stdlib/test/rand_SUITE.erl
+++ b/lib/stdlib/test/rand_SUITE.erl
@@ -475,10 +475,11 @@ stats_standard_normal_box_muller_2(Config) when is_list(Config) ->
stats_standard_normal(Config) when is_list(Config) ->
+ Retries = 7,
try math:erfc(1.0) of
_ ->
stats_standard_normal(
- fun rand:normal_s/1, rand:seed_s(exrop), 3)
+ fun rand:normal_s/1, rand:seed_s(exrop), Retries)
catch error:_ ->
{skip, "math:erfc/1 not supported"}
end.
diff --git a/lib/stdlib/test/sys_SUITE.erl b/lib/stdlib/test/sys_SUITE.erl
index 3278eb0eb0..fcc4419569 100644
--- a/lib/stdlib/test/sys_SUITE.erl
+++ b/lib/stdlib/test/sys_SUITE.erl
@@ -219,7 +219,7 @@ spec_proc(Mod) ->
{Mod,system_get_state},{throw,fail}},_}} ->
ok
end,
- ok = sys:terminate(Mod, normal),
+ ok = sync_terminate(Mod),
{ok,_} = Mod:start_link(4),
ok = case catch sys:replace_state(Mod, fun(_) -> {} end) of
{} ->
@@ -228,7 +228,7 @@ spec_proc(Mod) ->
{Mod,system_replace_state},{throw,fail}},_}} ->
ok
end,
- ok = sys:terminate(Mod, normal),
+ ok = sync_terminate(Mod),
{ok,_} = Mod:start_link(4),
StateFun = fun(_) -> error(fail) end,
ok = case catch sys:replace_state(Mod, StateFun) of
@@ -240,7 +240,18 @@ spec_proc(Mod) ->
{'EXIT',{{callback_failed,StateFun,{error,fail}},_}} ->
ok
end,
- ok = sys:terminate(Mod, normal).
+ ok = sync_terminate(Mod).
+
+sync_terminate(Mod) ->
+ P = whereis(Mod),
+ MRef = erlang:monitor(process,P),
+ ok = sys:terminate(Mod, normal),
+ receive
+ {'DOWN',MRef,_,_,normal} ->
+ ok
+ end,
+ undefined = whereis(Mod),
+ ok.
%%%%%%%%%%%%%%%%%%%%
%% Dummy server
diff --git a/lib/stdlib/test/unicode_util_SUITE.erl b/lib/stdlib/test/unicode_util_SUITE.erl
index 962b307b07..044b4e5834 100644
--- a/lib/stdlib/test/unicode_util_SUITE.erl
+++ b/lib/stdlib/test/unicode_util_SUITE.erl
@@ -126,17 +126,30 @@ verify_gc(Line0, N, Acc) ->
%io:format("Line: ~s~n",[Line]),
[Data|_Comments] = string:tokens(Line, "#"),
- %io:format("Data: ~w~n",[string:tokens(Data, " \t")]),
+ %% io:format("Data: ~w~n",[string:tokens(Data, " \t")]),
{Str,Res} = gc_test_data(string:tokens(Data, " \t"), [], [[]]),
- try
- Res = fetch(Str, fun unicode_util:gc/1),
- Acc
- catch _Cl:{badmatch, Other} ->
+ %% io:format("InputStr: ~w ~w~n",[Str,unicode:characters_to_binary(Str)]),
+ case verify_gc(Str, Res, N, Line) andalso
+ verify_gc(unicode:characters_to_binary(Str), Res, N, Line0) of
+ true -> Acc;
+ false -> Acc+1
+ end.
+
+verify_gc({error,_,[CP|_]}=Err, _Res, N, Line) ->
+ IsSurrogate = 16#D800 =< CP andalso CP =< 16#DFFF,
+ %% Surrogat is not valid in utf8 encoding only utf16
+ IsSurrogate orelse
+ io:format("~w: ~ts~n Error in unicode:characters_to_binary ~w~n", [N, Line, Err]),
+ IsSurrogate;
+verify_gc(Str, Res, N, Line) ->
+ try fetch(Str, fun unicode_util:gc/1) of
+ Res -> true;
+ Other ->
io:format("Failed: ~p~nInput: ~ts~n\t=> ~w |~ts|~n",[N, Line, Str, Str]),
io:format("Expected: ~p~n", [Res]),
io:format("Got: ~w~n", [Other]),
- Acc+1;
- Cl:R:Stacktrace ->
+ false
+ catch Cl:R:Stacktrace ->
io:format("~p: ~ts => |~tp|~n",[N, Line, Str]),
io:format("Expected: ~p~n", [Res]),
erlang:raise(Cl,R,Stacktrace)
diff --git a/lib/stdlib/uc_spec/gen_unicode_mod.escript b/lib/stdlib/uc_spec/gen_unicode_mod.escript
index fe5a860d45..535f01a1c5 100755
--- a/lib/stdlib/uc_spec/gen_unicode_mod.escript
+++ b/lib/stdlib/uc_spec/gen_unicode_mod.escript
@@ -646,7 +646,7 @@ gen_gc(Fd, GBP) ->
io:put_chars(Fd, "is_emodifier(_) -> false.\n\n"),
io:put_chars(Fd, "gc_zwj(R0, Acc) ->\n case cp(R0) of\n"),
- GenZWJGlue = fun(Range) -> io:format(Fd, "~8c~s gc_extend(R1, R0, [CP|Acc]);\n",
+ GenZWJGlue = fun(Range) -> io:format(Fd, "~8c~s gc_extend(cp(R1), R0, [CP|Acc]);\n",
[$\s,gen_case_clause(Range)]) end,
[GenZWJGlue(CP) || CP <- merge_ranges(maps:get(glue_after_zwj,GBP))],
GenZWJEBG = fun(Range) -> io:format(Fd, "~8c~s gc_e_cont(R1, [CP|Acc]);\n",
diff --git a/lib/tools/emacs/Makefile b/lib/tools/emacs/Makefile
index ea4d6cb723..b7775d1c8c 100644
--- a/lib/tools/emacs/Makefile
+++ b/lib/tools/emacs/Makefile
@@ -46,6 +46,7 @@ EMACS_FILES= \
erlang-eunit \
erlang-edoc \
erlang-flymake \
+ erlang-test \
erlang
README_FILES= README
diff --git a/lib/tools/emacs/erlang-edoc.el b/lib/tools/emacs/erlang-edoc.el
index d0dcc81028..ea1e263faf 100644
--- a/lib/tools/emacs/erlang-edoc.el
+++ b/lib/tools/emacs/erlang-edoc.el
@@ -28,6 +28,7 @@
(defcustom erlang-edoc-indent-level 2
"Indentation level of xhtml in Erlang edoc."
+ :type '(integer)
:safe 'integerp
:group 'erlang)
diff --git a/lib/tools/emacs/erlang-eunit.el b/lib/tools/emacs/erlang-eunit.el
index 38c40927f4..53543d7b01 100644
--- a/lib/tools/emacs/erlang-eunit.el
+++ b/lib/tools/emacs/erlang-eunit.el
@@ -23,6 +23,7 @@
(eval-when-compile
(require 'cl))
+(require 'erlang)
(defvar erlang-eunit-src-candidate-dirs '("../src" ".")
"*Name of directories which to search for source files matching
@@ -331,8 +332,7 @@ With prefix arg, compiles for debug and runs tests with the verbose flag set."
t)
(apply test-fun test-args)
(if under-cover
- (save-excursion
- (set-buffer (find-file-noselect src-filename))
+ (with-current-buffer (find-file-noselect src-filename)
(erlang-eunit-analyze-coverage)))))))
(defun erlang-eunit-compile-and-run-module-tests-under-cover ()
@@ -348,8 +348,7 @@ With prefix arg, compiles for debug and runs tests with the verbose flag set."
(defun erlang-eunit-compile-file (file-path &optional under-cover)
(if (file-readable-p file-path)
- (save-excursion
- (set-buffer (find-file-noselect file-path))
+ (with-current-buffer (find-file-noselect file-path)
;; In order to run a code coverage analysis on a
;; module, we have two options:
;;
@@ -376,8 +375,7 @@ With prefix arg, compiles for debug and runs tests with the verbose flag set."
(error msg))))
(defun erlang-eunit-last-compilation-successful-p ()
- (save-excursion
- (set-buffer inferior-erlang-buffer)
+ (with-current-buffer inferior-erlang-buffer
(goto-char compilation-parsing-end)
(erlang-eunit-all-list-elems-fulfill-p
(lambda (re) (let ((continue t)
diff --git a/lib/tools/emacs/erlang-pkg.el b/lib/tools/emacs/erlang-pkg.el
index 02d6bebbf4..7e95e4050e 100644
--- a/lib/tools/emacs/erlang-pkg.el
+++ b/lib/tools/emacs/erlang-pkg.el
@@ -1,3 +1,6 @@
(define-package "erlang" "2.7.0"
"Erlang major mode"
'((emacs "24.1")))
+;; Local Variables:
+;; no-byte-compile: t
+;; End:
diff --git a/lib/tools/emacs/erlang-skels.el b/lib/tools/emacs/erlang-skels.el
index 534f50ab33..3ebc6e8e1e 100644
--- a/lib/tools/emacs/erlang-skels.el
+++ b/lib/tools/emacs/erlang-skels.el
@@ -1985,7 +1985,7 @@ configured off."
The first character of DD is space if the value is less than 10."
(let ((date (current-time-string)))
(format "%2d %s %s"
- (string-to-int (substring date 8 10))
+ (string-to-number (substring date 8 10))
(substring date 4 7)
(substring date -4))))
diff --git a/lib/tools/emacs/erlang-test.el b/lib/tools/emacs/erlang-test.el
index efe3d515e9..2ee584d11a 100644
--- a/lib/tools/emacs/erlang-test.el
+++ b/lib/tools/emacs/erlang-test.el
@@ -29,7 +29,7 @@
;; This library require GNU Emacs 25 or later.
;;
-;; There are two ways to run emacs unit tests.
+;; There are three ways to run the erlang emacs unit tests.
;;
;; 1. Within a running emacs process. Load this file. Then to run
;; all defined test cases:
@@ -49,11 +49,15 @@
;;
;; The -L option adds a directory to the load-path. It should be the
;; directory containing erlang.el and erlang-test.el.
+;;
+;; 3. Call the script test-erlang-mode in this directory. This script
+;; use the second method.
;;; Code:
+(eval-when-compile
+ (require 'cl))
(require 'ert)
-(require 'cl-lib)
(require 'erlang)
(defvar erlang-test-code
@@ -63,7 +67,7 @@
("SYMBOL" . "-define(SYMBOL, value).")
("MACRO" . "-define(MACRO(X), X + X).")
("struct" . "-record(struct, {until,maps,are,everywhere}).")
- ("function". "function() -> #struct{}."))
+ ("function" . "function() -> #struct{}."))
"Alist of erlang test code.
Each entry have the format (TAGNAME . ERLANG_CODE). If TAGNAME
is nil there is no definitions in the ERLANG_CODE. The
@@ -116,8 +120,8 @@ concatenated to form an erlang file to test on.")
(defun erlang-test-create-erlang-file (erlang-file)
(with-temp-file erlang-file
- (cl-loop for (_ . code) in erlang-test-code
- do (insert code "\n"))))
+ (loop for (_ . code) in erlang-test-code
+ do (insert code "\n"))))
(defun erlang-test-compile-tags (erlang-file tags-file)
(should (zerop (call-process "etags" nil nil nil
@@ -132,19 +136,20 @@ concatenated to form an erlang file to test on.")
(sort (erlang-expected-completion-table) #'string-lessp))))
(defun erlang-expected-completion-table ()
- (append (cl-loop for (symbol . _) in erlang-test-code
- when (stringp symbol)
- append (list symbol (concat "erlang_test:" symbol)))
+ (append (loop for (symbol . _) in erlang-test-code
+ when (stringp symbol)
+ append (list symbol (concat "erlang_test:" symbol)))
(list "erlang_test:" "erlang_test:module_info")))
(defun erlang-test-xref-find-definitions (erlang-file erlang-buffer)
- (cl-loop for (tagname . code) in erlang-test-code
- for line = 1 then (1+ line)
- do (when tagname
- (switch-to-buffer erlang-buffer)
- (erlang-test-xref-jump tagname erlang-file line)
- (erlang-test-xref-jump (concat "erlang_test:" tagname)
- erlang-file line)))
+ (loop for (tagname . code) in erlang-test-code
+ for line = 1 then (1+ line)
+ do (when tagname
+ (switch-to-buffer erlang-buffer)
+ (erlang-test-xref-jump tagname erlang-file line)
+ (when (string-equal tagname "function")
+ (erlang-test-xref-jump (concat "erlang_test:" tagname)
+ erlang-file line))))
(erlang-test-xref-jump "erlang_test:" erlang-file 1))
(defun erlang-test-xref-jump (id expected-file expected-line)
@@ -213,27 +218,27 @@ concatenated to form an erlang file to test on.")
(ert-deftest erlang-test-parse-id ()
- (cl-loop for id-string in '("fun/10"
- "qualified-function module:fun/10"
- "record reko"
- "macro _SYMBOL"
- "macro MACRO/10"
- "module modula"
- "macro"
- nil)
- for id-list in '((nil nil "fun" 10)
- (qualified-function "module" "fun" 10)
- (record nil "reko" nil)
- (macro nil "_SYMBOL" nil)
- (macro nil "MACRO" 10)
- (module nil "modula" nil)
- (nil nil "macro" nil)
- nil)
- for id-list2 = (erlang-id-to-list id-string)
- do (should (equal id-list id-list2))
- for id-string2 = (erlang-id-to-string id-list)
- do (should (equal id-string id-string2))
- collect id-list2))
+ (loop for id-string in '("fun/10"
+ "qualified-function module:fun/10"
+ "record reko"
+ "macro _SYMBOL"
+ "macro MACRO/10"
+ "module modula"
+ "macro"
+ nil)
+ for id-list in '((nil nil "fun" 10)
+ (qualified-function "module" "fun" 10)
+ (record nil "reko" nil)
+ (macro nil "_SYMBOL" nil)
+ (macro nil "MACRO" 10)
+ (module nil "modula" nil)
+ (nil nil "macro" nil)
+ nil)
+ for id-list2 = (erlang-id-to-list id-string)
+ do (should (equal id-list id-list2))
+ for id-string2 = (erlang-id-to-string id-list)
+ do (should (equal id-string id-string2))
+ collect id-list2))
(provide 'erlang-test)
diff --git a/lib/tools/emacs/erlang.el b/lib/tools/emacs/erlang.el
index 82e5c2222d..3cbe9daa60 100644
--- a/lib/tools/emacs/erlang.el
+++ b/lib/tools/emacs/erlang.el
@@ -78,6 +78,8 @@
(eval-when-compile (require 'cl))
(require 'align)
+(require 'comint)
+(require 'tempo)
;; Variables:
@@ -334,6 +336,7 @@ when a new function header is generated. When nil, no blank line is
inserted between the current line and the new header. When bound to a
number it represents the number of blank lines which should be
inserted."
+ :type '(restricted-sexp :match-alternatives (integerp 'nil))
:group 'erlang)
(defvar erlang-electric-semicolon-criteria
@@ -1711,10 +1714,10 @@ Personal extensions could be added to `erlang-menu-personal-items'.
This function should be called if any variable describing the
menu configuration is changed."
- (erlang-menu-install "Erlang" erlang-menu-items erlang-mode-map t))
+ (erlang-menu-install "Erlang" erlang-menu-items erlang-mode-map))
-(defun erlang-menu-install (name items keymap &optional popup)
+(defun erlang-menu-install (name items keymap)
"Install a menu in Emacs based on an abstract description.
NAME is the name of the menu.
@@ -3694,16 +3697,17 @@ retried without regard to module.
4. Arity - Integer in case of functions and macros if the number
of arguments could be found, otherwise nil."
(save-excursion
- (save-match-data
- (if (eq (char-syntax (following-char)) ? )
- (skip-chars-backward " \t"))
- (skip-chars-backward "[:word:]_:'")
- (cond ((looking-at erlang-module-function-regexp)
- (erlang-get-qualified-function-id-at-point))
- ((looking-at (concat erlang-atom-regexp ":"))
- (erlang-get-module-id-at-point))
- ((looking-at erlang-name-regexp)
- (erlang-get-some-other-id-at-point))))))
+ (let (case-fold-search)
+ (save-match-data
+ (if (eq (char-syntax (following-char)) ? )
+ (skip-chars-backward " \t"))
+ (skip-chars-backward "[:word:]_:'")
+ (cond ((looking-at erlang-module-function-regexp)
+ (erlang-get-qualified-function-id-at-point))
+ ((looking-at (concat erlang-atom-regexp ":"))
+ (erlang-get-module-id-at-point))
+ ((looking-at erlang-name-regexp)
+ (erlang-get-some-other-id-at-point)))))))
(defun erlang-get-qualified-function-id-at-point ()
(let ((kind 'qualified-function)
@@ -4207,22 +4211,18 @@ Return t if criteria fulfilled, nil otherwise."
nil)))))
-(defun erlang-in-literal (&optional lim)
+(defun erlang-in-literal ()
"Test if point is in string, quoted atom or comment.
Return one of the three atoms `atom', `string', and `comment'.
Should the point be inside none of the above mentioned types of
context, nil is returned."
(save-excursion
- (let* ((lim (or lim (save-excursion
- (erlang-beginning-of-clause)
- (point))))
- (state (funcall (symbol-function 'syntax-ppss))))
- (cond
- ((eq (nth 3 state) ?') 'atom)
- ((nth 3 state) 'string)
- ((nth 4 state) 'comment)
- (t nil)))))
+ (let ((state (funcall (symbol-function 'syntax-ppss))))
+ (cond ((eq (nth 3 state) ?') 'atom)
+ ((nth 3 state) 'string)
+ ((nth 4 state) 'comment)
+ (t nil)))))
(defun erlang-at-end-of-function-p ()
@@ -5041,7 +5041,10 @@ considered first when it is time to jump to the definition.")
(defun erlang-visit-tags-table-buffer (cont cbuf)
(if (< emacs-major-version 26)
(visit-tags-table-buffer cont)
- (visit-tags-table-buffer cont cbuf)))
+ ;; Remove this with-no-warnings when Emacs 26 is the required
+ ;; version minimum.
+ (with-no-warnings
+ (visit-tags-table-buffer cont cbuf))))
(defun erlang-xref-find-definitions-module-tag (module
tag
@@ -5536,7 +5539,7 @@ Return the position after the newly inserted command."
(+ insert-point insert-length)))
-(defun inferior-erlang-strip-delete (&optional s)
+(defun inferior-erlang-strip-delete (&optional _s)
"Remove `^H' (delete) and the characters it was supposed to remove."
(interactive)
(if (and (boundp 'comint-last-input-end)
@@ -5554,7 +5557,7 @@ Return the position after the newly inserted command."
;; Basically `comint-strip-ctrl-m', with a few extra checks.
-(defun inferior-erlang-strip-ctrl-m (&optional string)
+(defun inferior-erlang-strip-ctrl-m (&optional _string)
"Strip trailing `^M' characters from the current output group."
(interactive)
(if (and (boundp 'comint-last-input-end)
@@ -5591,8 +5594,8 @@ There exists two workarounds for this bug:
(let* ((dir (inferior-erlang-compile-outdir))
(noext (substring (erlang-local-buffer-file-name) 0 -4))
(opts (append (list (cons 'outdir dir))
- (if current-prefix-arg
- (list 'debug_info 'export_all))
+ (when arg
+ (list 'debug_info 'export_all))
erlang-compile-extra-opts))
end)
(with-current-buffer inferior-erlang-buffer
@@ -5641,7 +5644,6 @@ unless the optional NO-DISPLAY is non-nil."
(defun inferior-erlang-compute-compile-command (module-name opts)
(let ((ccfn erlang-compile-command-function-alist)
- (res (inferior-erlang-compute-erl-compile-command module-name opts))
ccfn-entry
done
result)
diff --git a/lib/tools/emacs/erlang_appwiz.el b/lib/tools/emacs/erlang_appwiz.el
index ecbce66f47..b71c180739 100644
--- a/lib/tools/emacs/erlang_appwiz.el
+++ b/lib/tools/emacs/erlang_appwiz.el
@@ -103,6 +103,10 @@
;;
;;
+(defvar appwiz-erlang-modulename "foo")
+(defvar appwiz-erlang-ext "_work")
+
+
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
;; Erlang application wizard
@@ -245,13 +249,6 @@ creating the root directory and for naming application files."
(insert "Application specification file for " name ".")
(save-buffer)))
-;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
-;;
-;; These are setq:ed
-;;
-
-(defvar appwiz-erlang-modulename "foo")
-(defvar appwiz-erlang-ext "_work")
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;
@@ -468,7 +465,7 @@ Call the function `erlang-menu-init' after modifying this variable.")
The first character of DD is *not* space if the value is less than 10."
(let ((date (current-time-string)))
(format "%d %s %s"
- (string-to-int (substring date 8 10))
+ (string-to-number (substring date 8 10))
(substring date 4 7)
(substring date -4))))
diff --git a/lib/tools/src/eprof.erl b/lib/tools/src/eprof.erl
index 535ddbcd04..86e3d3a8b8 100644
--- a/lib/tools/src/eprof.erl
+++ b/lib/tools/src/eprof.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -26,11 +26,11 @@
-export([start/0,
stop/0,
- dump/0,
+ dump/0, dump_data/0,
start_profiling/1, start_profiling/2, start_profiling/3,
profile/1, profile/2, profile/3, profile/4, profile/5,
stop_profiling/0,
- analyze/0, analyze/1, analyze/2,
+ analyze/0, analyze/1, analyze/2, analyze/4,
log/1]).
%% Internal exports
@@ -117,6 +117,9 @@ profile(Rootset, M, F, A, Pattern, Options) ->
dump() ->
gen_server:call(?MODULE, dump, infinity).
+dump_data() ->
+ gen_server:call(?MODULE, dump_data, infinity).
+
log(File) ->
gen_server:call(?MODULE, {logfile, File}, infinity).
@@ -151,22 +154,18 @@ init([]) ->
%% analyze
-handle_call({analyze, _, _}, _, #state{ bpd = #bpd{ p = {0,nil}, us = 0, n = 0} = Bpd } = S) when is_record(Bpd, bpd) ->
+handle_call(
+ {analyze, _, _}, _,
+ #state{ bpd = #bpd{ p = {0,nil}, us = 0, n = 0 } } = S) ->
{reply, nothing_to_analyze, S};
-handle_call({analyze, procs, Opts}, _, #state{ bpd = #bpd{ p = Ps, us = Tus} = Bpd, fd = Fd} = S) when is_record(Bpd, bpd) ->
- lists:foreach(fun
- ({Pid, Mfas}) ->
- {Pn, Pus} = sum_bp_total_n_us(Mfas),
- format(Fd, "~n****** Process ~w -- ~s % of profiled time *** ~n", [Pid, s("~.2f", [100.0*divide(Pus,Tus)])]),
- print_bp_mfa(Mfas, {Pn,Pus}, Fd, Opts),
- ok
- end, gb_trees:to_list(Ps)),
- {reply, ok, S};
+handle_call({analyze, procs, Opts}, _, #state{ bpd = Bpd, fd = Fd } = S)
+ when is_record(Bpd, bpd) ->
+ {reply, analyze(Fd, procs, Opts, Bpd), S};
-handle_call({analyze, total, Opts}, _, #state{ bpd = #bpd{ mfa = Mfas, n = Tn, us = Tus} = Bpd, fd = Fd} = S) when is_record(Bpd, bpd) ->
- print_bp_mfa(Mfas, {Tn, Tus}, Fd, Opts),
- {reply, ok, S};
+handle_call({analyze, total, Opts}, _, #state{ bpd = Bpd, fd = Fd } = S)
+ when is_record(Bpd, bpd) ->
+ {reply, analyze(Fd, total, Opts, Bpd), S};
handle_call({analyze, Type, _Opts}, _, S) ->
{reply, {error, {undefined, Type}}, S};
@@ -260,6 +259,10 @@ handle_call({logfile, File}, _From, #state{ fd = OldFd } = S) ->
handle_call(dump, _From, #state{ bpd = Bpd } = S) when is_record(Bpd, bpd) ->
{reply, gb_trees:to_list(Bpd#bpd.p), S};
+handle_call(dump_data, _, #state{ bpd = #bpd{} = Bpd } = S)
+ when is_record(Bpd, bpd) ->
+ {reply, Bpd, S};
+
handle_call(stop, _FromTag, S) ->
{stop, normal, stopped, S}.
@@ -438,6 +441,23 @@ collect_bpdfp(Mfa, Tree, Data) ->
{PTno + Ni, PTuso + Time, Ti1}
end, {0,0, Tree}, Data).
+
+
+analyze(Fd, procs, Opts, #bpd{ p = Ps, us = Tus }) ->
+ lists:foreach(
+ fun
+ ({Pid, Mfas}) ->
+ {Pn, Pus} = sum_bp_total_n_us(Mfas),
+ format(
+ Fd,
+ "~n****** Process ~w -- ~s % of profiled time *** ~n",
+ [Pid, s("~.2f", [100.0*divide(Pus, Tus)])]),
+ print_bp_mfa(Mfas, {Pn,Pus}, Fd, Opts),
+ ok
+ end, gb_trees:to_list(Ps));
+analyze(Fd, total, Opts, #bpd{ mfa = Mfas, n = Tn, us = Tus } ) ->
+ print_bp_mfa(Mfas, {Tn, Tus}, Fd, Opts).
+
%% manipulators
sort_mfa(Bpfs, mfa) when is_list(Bpfs) ->
lists:sort(fun
diff --git a/lib/tools/test/emacs_SUITE.erl b/lib/tools/test/emacs_SUITE.erl
index 5839f9ce5b..a6d43d1816 100644
--- a/lib/tools/test/emacs_SUITE.erl
+++ b/lib/tools/test/emacs_SUITE.erl
@@ -23,18 +23,28 @@
-export([all/0, init_per_testcase/2, end_per_testcase/2]).
--export([bif_highlight/1, indent/1]).
+-export([bif_highlight/1,
+ load_interpreted/1, compile_and_load/1,
+ indent/1,
+ tests_interpreted/1, tests_compiled/1
+ ]).
all() ->
- [bif_highlight, indent].
+ [bif_highlight, load_interpreted, compile_and_load,
+ indent,
+ tests_interpreted, tests_compiled
+ ].
-init_per_testcase(_Case, Config) ->
+init_per_testcase(Case, Config) ->
ErlangEl = filename:join([code:lib_dir(tools),"emacs","erlang.el"]),
case file:read_file_info(ErlangEl) of
- {ok, _} ->
- [{el, ErlangEl}|Config];
- _ ->
- {skip, "Could not find erlang.el"}
+ {ok, _} ->
+ case Case =:= bif_highlight orelse emacs_version_ok(24.1) of
+ false -> {skip, "Old or no emacs found"};
+ _ -> [{el, ErlangEl}|Config]
+ end;
+ _ ->
+ {skip, "Could not find erlang.el"}
end.
end_per_testcase(_Case, _Config) ->
@@ -46,26 +56,26 @@ bif_highlight(Config) ->
%% All auto-imported bifs
IntBifs = lists:usort(
- [F || {F,A} <- erlang:module_info(exports),
- erl_internal:bif(F,A)]),
+ [F || {F,A} <- erlang:module_info(exports),
+ erl_internal:bif(F,A)]),
%% all bif which need erlang: prefix and are not operands
ExtBifs = lists:usort(
- [F || {F,A} <- erlang:module_info(exports),
- not erl_internal:bif(F,A) andalso
- not is_atom(catch erl_internal:op_type(F,A))]),
+ [F || {F,A} <- erlang:module_info(exports),
+ not erl_internal:bif(F,A) andalso
+ not is_atom(catch erl_internal:op_type(F,A))]),
check_bif_highlight(Bin, <<"erlang-int-bifs">>, IntBifs),
check_bif_highlight(Bin, <<"erlang-ext-bifs">>, ExtBifs).
-
+
check_bif_highlight(Bin, Tag, Compare) ->
- [_H,IntMatch,_T] =
- re:split(Bin,<<"defvar ",Tag/binary,
- "[^(]*\\(([^)]*)">>,[]),
- EmacsIntBifs = [list_to_atom(S) ||
- S <- string:tokens(binary_to_list(IntMatch)," '\"\n")],
-
+ [_H,IntMatch,_T] =
+ re:split(Bin,<<"defvar ",Tag/binary,
+ "[^(]*\\(([^)]*)">>,[]),
+ EmacsIntBifs = [list_to_atom(S) ||
+ S <- string:tokens(binary_to_list(IntMatch)," '\"\n")],
+
ct:log("Emacs ~p",[EmacsIntBifs]),
ct:log("Int ~p",[Compare]),
@@ -73,27 +83,92 @@ check_bif_highlight(Bin, Tag, Compare) ->
ct:log("Diff2 ~p",[EmacsIntBifs -- Compare]),
[] = Compare -- EmacsIntBifs,
[] = EmacsIntBifs -- Compare.
-
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-indent(Config) ->
- case emacs_version_ok() of
+load_interpreted(_Config) ->
+ _ = emacs(["-l erlang.el -f erlang-mode"]),
+ ok.
+
+compile_and_load(_Config) ->
+ Dir = emacs_dir(),
+ Files0 = filelib:wildcard("*.el", Dir),
+ Files = case emacs_version_ok(24.3) of
+ %% erldoc.el depends on cl-lib which was introduced in 24.3.
+ false -> Files0 -- ["erldoc.el"];
+ _ -> Files0
+ end,
+ Unforgiving =
+ case emacs_version_ok(24) of
+ Ver when Ver < 25 ->
+ "";
+ Ver when Ver < 26 ->
+ %% Workaround byte-compile-error-on-warn which seem broken in
+ %% Emacs 25.
+ "\"(advice-add #'display-warning :after "
+ "(lambda (_ f _ _) (error \"%s\" f)))\"";
+ _ ->
+ "\"(setq byte-compile-error-on-warn t)\""
+ end,
+ %% Add files here whenever they are cleaned of warnings.
+ NoWarn = ["erlang.el", "erlang-test.el", "erlang-edoc.el", "erlang-start.el", "erldoc.el"],
+ Compile = fun(File) ->
+ Pedantic = case lists:member(File, NoWarn) andalso Unforgiving /= "" of
+ true -> ["--eval ", Unforgiving, " "];
+ false -> " "
+ end,
+ emacs([Pedantic,
+ " -f batch-byte-compile ",filename:join(Dir, File)]),
+ true
+ end,
+ lists:foreach(Compile, Files),
+ emacs(["-l erlang.elc -f erlang-mode"]),
+ ok.
+
+tests_interpreted(_Config) ->
+ case emacs_version_ok(25) of
false -> {skip, "Old or no emacs found"};
- true ->
- Def = filename:dirname(code:which(?MODULE)) ++ "/" ++ ?MODULE_STRING ++ "_data",
- Dir = proplists:get_value(data_dir, Config, Def),
- OrigFs = filelib:wildcard(Dir ++ "/*"),
- io:format("Dir: ~s~nFs: ~p~n", [Dir, OrigFs]),
- Fs = [{File, unindent(File)} || File <- OrigFs,
- filename:extension(File) =:= ""],
- Indent = fun emacs/1,
- [Indent(File) || {_, File} <- Fs],
- Res = [diff(Orig, File) || {Orig, File} <- Fs],
- [file:delete(File) || {ok, File} <- Res], %% Cleanup
- [] = [Fail || {fail, Fail} <- Res],
+ _ ->
+ emacs(["-l erlang.el ",
+ "-l erlang-test.el -f ert-run-tests-batch-and-exit"]),
ok
end.
+tests_compiled(_Config) ->
+ case emacs_version_ok(25) of
+ false -> {skip, "Old or no emacs found"};
+ _ ->
+ emacs(["-l erlang.elc ",
+ "-l erlang-test.elc -f ert-run-tests-batch-and-exit"]),
+ ok
+ end.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+
+indent(Config) ->
+ Def = filename:dirname(code:which(?MODULE))
+ ++ "/"
+ ++ ?MODULE_STRING
+ ++ "_data",
+ Dir = proplists:get_value(data_dir, Config, Def),
+ OrigFs = filelib:wildcard(Dir ++ "/*"),
+ io:format("Dir: ~s~nFs: ~p~n", [Dir, OrigFs]),
+ Fs = [{File, unindent(File)} || File <- OrigFs,
+ filename:extension(File) =:= ""],
+ Indent = fun(File) ->
+ emacs([
+ File, " ",
+ "--eval '(indent-region (point-min) (point-max) nil)' ",
+ "--eval '(save-buffer 0)'"
+ ]),
+ ok
+ end,
+ [Indent(File) || {_, File} <- Fs],
+ Res = [diff(Orig, File) || {Orig, File} <- Fs],
+ [file:delete(File) || {ok, File} <- Res], %% Cleanup
+ [] = [Fail || {fail, Fail} <- Res],
+ ok.
+
unindent(Input) ->
Output = Input ++ ".erl",
{ok, Bin} = file:read_file(Input),
@@ -112,14 +187,13 @@ diff(Orig, File) ->
{fail, File}
end.
-emacs_version_ok() ->
+emacs_version_ok(AcceptVer) ->
case os:cmd("emacs --version | head -1") of
"GNU Emacs " ++ Ver ->
case string:to_float(Ver) of
- {Vsn, _} when Vsn >= 24.1 ->
- true;
+ {Vsn, _} when Vsn >= AcceptVer ->
+ Vsn;
_ ->
- io:format("Emacs version fail~n~s~n~n",[Ver]),
false
end;
Res ->
@@ -127,16 +201,19 @@ emacs_version_ok() ->
false
end.
-emacs(File) ->
- EmacsErlDir = filename:join([code:lib_dir(tools), "emacs"]),
+emacs(EmacsCmds) when is_list(EmacsCmds) ->
Cmd = ["emacs ",
"--batch --quick ",
- "--directory ", EmacsErlDir, " ",
- "--eval \"(require 'erlang-start)\" ",
- File, " ",
- "--eval '(indent-region (point-min) (point-max) nil)' ",
- "--eval '(save-buffer 0)'"
- ],
- _Res = os:cmd(Cmd),
- % io:format("cmd ~s:~n=> ~s~n", [Cmd, _Res]),
- ok.
+ "--directory ", emacs_dir(), " ",
+ "--eval \"(require 'erlang-start)\" "
+ | EmacsCmds],
+ Res0 = os:cmd(Cmd ++ " ; echo $?"),
+ Rows = string:lexemes(Res0, ["\r\n", $\n]),
+ Res = lists:last(Rows),
+ Output = string:join(lists:droplast(Rows), "\n"),
+ io:format("Cmd ~s:~n => ~s ~ts~n", [Cmd, Res, Output]),
+ "0" = Res,
+ Output.
+
+emacs_dir() ->
+ filename:join([code:lib_dir(tools), "emacs"]).
diff --git a/lib/wx/api_gen/wx_gen_cpp.erl b/lib/wx/api_gen/wx_gen_cpp.erl
index 70a3530526..f13d5873a0 100644
--- a/lib/wx/api_gen/wx_gen_cpp.erl
+++ b/lib/wx/api_gen/wx_gen_cpp.erl
@@ -995,8 +995,13 @@ build_ret_types(Type,Ps) ->
end,
lists:foldl(Calc, Free, Ps).
-build_ret(Name,_,#type{base={class,Class},single=true}) ->
- w(" rt.addRef(getRef((void *)~s,memenv), \"~s\");~n",[Name,Class]);
+build_ret(Name,_D,#type{base={class,Class},single=true}=_T) ->
+ case Class of
+ "wxGraphicsContext" ->
+ w(" rt.addRef(getRef((void *)~s,memenv,8), \"~s\");~n",[Name,Class]);
+ _ ->
+ w(" rt.addRef(getRef((void *)~s,memenv), \"~s\");~n",[Name,Class])
+ end;
build_ret(Name,_,#type{name="wxTreeItemId",single=true}) ->
w(" rt.add((wxUIntPtr *) ~s.m_pItem);~n",[Name]);
build_ret(Name,_,#type{name="wxTreeItemIdValue",single=true}) ->
diff --git a/lib/wx/c_src/gen/wxe_funcs.cpp b/lib/wx/c_src/gen/wxe_funcs.cpp
index a7bac4cf9d..74961b2e5e 100644
--- a/lib/wx/c_src/gen/wxe_funcs.cpp
+++ b/lib/wx/c_src/gen/wxe_funcs.cpp
@@ -6147,18 +6147,18 @@ case wxGraphicsObject_IsNull: { // wxGraphicsObject::IsNull
case wxGraphicsContext_Create_1_1: { // wxGraphicsContext::Create
wxWindowDC * dc = (wxWindowDC *) getPtr(bp,memenv); bp += 4;
wxGraphicsContext * Result = (wxGraphicsContext*)wxGraphicsContext::Create(*dc);
- rt.addRef(getRef((void *)Result,memenv), "wxGraphicsContext");
+ rt.addRef(getRef((void *)Result,memenv,8), "wxGraphicsContext");
break;
}
case wxGraphicsContext_Create_1_0: { // wxGraphicsContext::Create
wxWindow *window = (wxWindow *) getPtr(bp,memenv); bp += 4;
wxGraphicsContext * Result = (wxGraphicsContext*)wxGraphicsContext::Create(window);
- rt.addRef(getRef((void *)Result,memenv), "wxGraphicsContext");
+ rt.addRef(getRef((void *)Result,memenv,8), "wxGraphicsContext");
break;
}
case wxGraphicsContext_Create_0: { // wxGraphicsContext::Create
wxGraphicsContext * Result = (wxGraphicsContext*)wxGraphicsContext::Create();
- rt.addRef(getRef((void *)Result,memenv), "wxGraphicsContext");
+ rt.addRef(getRef((void *)Result,memenv,8), "wxGraphicsContext");
break;
}
case wxGraphicsContext_CreatePen: { // wxGraphicsContext::CreatePen
@@ -6999,7 +6999,7 @@ case wxGraphicsRenderer_CreateContext_1_1: { // wxGraphicsRenderer::CreateContex
wxWindowDC * dc = (wxWindowDC *) getPtr(bp,memenv); bp += 4;
if(!This) throw wxe_badarg(0);
wxGraphicsContext * Result = (wxGraphicsContext*)This->CreateContext(*dc);
- rt.addRef(getRef((void *)Result,memenv), "wxGraphicsContext");
+ rt.addRef(getRef((void *)Result,memenv,8), "wxGraphicsContext");
break;
}
case wxGraphicsRenderer_CreateContext_1_0: { // wxGraphicsRenderer::CreateContext
@@ -7007,7 +7007,7 @@ case wxGraphicsRenderer_CreateContext_1_0: { // wxGraphicsRenderer::CreateContex
wxWindow *window = (wxWindow *) getPtr(bp,memenv); bp += 4;
if(!This) throw wxe_badarg(0);
wxGraphicsContext * Result = (wxGraphicsContext*)This->CreateContext(window);
- rt.addRef(getRef((void *)Result,memenv), "wxGraphicsContext");
+ rt.addRef(getRef((void *)Result,memenv,8), "wxGraphicsContext");
break;
}
diff --git a/lib/wx/c_src/wxe_impl.cpp b/lib/wx/c_src/wxe_impl.cpp
index 1510866f09..f856099ffa 100644
--- a/lib/wx/c_src/wxe_impl.cpp
+++ b/lib/wx/c_src/wxe_impl.cpp
@@ -591,7 +591,7 @@ int WxeApp::newPtr(void * ptr, int type, wxeMemEnv *memenv) {
return ref;
}
-int WxeApp::getRef(void * ptr, wxeMemEnv *memenv) {
+int WxeApp::getRef(void * ptr, wxeMemEnv *memenv, int type) {
if(!ptr) return 0; // NULL and zero is the same
ptrMap::iterator it = ptr2ref.find(ptr);
if(it != ptr2ref.end()) {
@@ -618,7 +618,7 @@ int WxeApp::getRef(void * ptr, wxeMemEnv *memenv) {
}
memenv->ref2ptr[ref] = ptr;
- ptr2ref[ptr] = new wxeRefData(ref, 0, false, memenv);
+ ptr2ref[ptr] = new wxeRefData(ref, type, false, memenv);
return ref;
}
diff --git a/lib/wx/c_src/wxe_impl.h b/lib/wx/c_src/wxe_impl.h
index 140a2bd36a..69cc81c429 100644
--- a/lib/wx/c_src/wxe_impl.h
+++ b/lib/wx/c_src/wxe_impl.h
@@ -84,7 +84,7 @@ public:
wxeMemEnv * getMemEnv(ErlDrvTermData port);
int newPtr(void * ptr, int type, wxeMemEnv *memenv);
- int getRef(void * ptr, wxeMemEnv *memenv);
+ int getRef(void * ptr, wxeMemEnv *memenv, int type = 0);
void * getPtr(char * bp, wxeMemEnv *memenv);
void clearPtr(void *ptr);
wxeRefData * getRefData(void *ptr);