aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/compiler/doc/src/notes.xml15
-rw-r--r--lib/compiler/src/beam_utils.erl5
-rw-r--r--lib/compiler/test/beam_utils_SUITE.erl23
-rw-r--r--lib/compiler/vsn.mk2
-rw-r--r--lib/crypto/c_src/crypto.c266
-rw-r--r--lib/crypto/c_src/crypto_callback.c6
-rw-r--r--lib/crypto/c_src/otp_test_engine.c178
-rw-r--r--lib/crypto/doc/src/algorithm_details.xml14
-rw-r--r--lib/crypto/doc/src/crypto.xml11
-rw-r--r--lib/crypto/src/crypto.erl42
-rw-r--r--lib/crypto/test/crypto_SUITE.erl428
-rw-r--r--lib/crypto/test/engine_SUITE.erl132
-rw-r--r--lib/kernel/doc/src/logger.xml7
-rw-r--r--lib/kernel/doc/src/logger_chapter.xml5
-rw-r--r--lib/kernel/doc/src/logger_formatter.xml2
-rw-r--r--lib/kernel/doc/src/notes.xml16
-rw-r--r--lib/kernel/src/kernel.appup.src47
-rw-r--r--lib/kernel/src/logger_config.erl4
-rw-r--r--lib/kernel/src/logger_disk_log_h.erl707
-rw-r--r--lib/kernel/src/logger_h_common.erl653
-rw-r--r--lib/kernel/src/logger_h_common.hrl16
-rw-r--r--lib/kernel/src/logger_std_h.erl764
-rw-r--r--lib/kernel/src/net_kernel.erl3
-rw-r--r--lib/kernel/test/logger_disk_log_h_SUITE.erl138
-rw-r--r--lib/kernel/test/logger_std_h_SUITE.erl66
-rw-r--r--lib/kernel/vsn.mk2
-rw-r--r--lib/observer/src/crashdump_viewer.erl78
-rw-r--r--lib/observer/test/crashdump_helper.erl18
-rw-r--r--lib/observer/test/crashdump_viewer_SUITE.erl7
-rw-r--r--lib/public_key/asn1/OTP-PKIX.asn117
-rw-r--r--lib/public_key/doc/src/notes.xml16
-rw-r--r--lib/public_key/doc/src/public_key.xml15
-rw-r--r--lib/public_key/src/pubkey_pem.erl11
-rw-r--r--lib/public_key/src/pubkey_ssh.erl99
-rw-r--r--lib/public_key/src/public_key.erl19
-rw-r--r--lib/public_key/vsn.mk2
-rw-r--r--lib/reltool/src/reltool_utils.erl13
-rw-r--r--lib/reltool/test/reltool_server_SUITE.erl16
-rw-r--r--lib/sasl/src/sasl.app.src2
-rw-r--r--lib/sasl/src/systools_make.erl2
-rw-r--r--lib/ssh/doc/src/notes.xml17
-rw-r--r--lib/ssh/doc/src/ssh.xml43
-rw-r--r--lib/ssh/doc/src/ssh_app.xml8
-rw-r--r--lib/ssh/doc/src/ssh_file.xml10
-rw-r--r--lib/ssh/src/ssh.hrl24
-rw-r--r--lib/ssh/src/ssh_auth.erl112
-rw-r--r--lib/ssh/src/ssh_connection_handler.erl65
-rw-r--r--lib/ssh/src/ssh_file.erl16
-rw-r--r--lib/ssh/src/ssh_message.erl8
-rw-r--r--lib/ssh/src/ssh_options.erl24
-rw-r--r--lib/ssh/src/ssh_transport.erl76
-rw-r--r--lib/ssh/test/ssh_algorithms_SUITE.erl7
-rw-r--r--lib/ssh/test/ssh_algorithms_SUITE_data/id_ed255197
-rw-r--r--lib/ssh/test/ssh_algorithms_SUITE_data/id_ed25519.pub1
-rw-r--r--lib/ssh/test/ssh_algorithms_SUITE_data/id_ed44810
-rw-r--r--lib/ssh/test/ssh_algorithms_SUITE_data/id_ed448.pub1
-rw-r--r--lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed25519_key7
-rw-r--r--lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed25519_key.pub1
-rw-r--r--lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed448_key10
-rw-r--r--lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed448_key.pub1
-rw-r--r--lib/ssh/test/ssh_basic_SUITE.erl26
-rw-r--r--lib/ssh/test/ssh_basic_SUITE_data/id_ed255197
-rw-r--r--lib/ssh/test/ssh_basic_SUITE_data/id_ed25519.pub1
-rw-r--r--lib/ssh/test/ssh_basic_SUITE_data/id_ed44810
-rw-r--r--lib/ssh/test/ssh_basic_SUITE_data/id_ed448.pub1
-rw-r--r--lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed25519_key7
-rw-r--r--lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed25519_key.pub1
-rw-r--r--lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed448_key10
-rw-r--r--lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed448_key.pub1
-rw-r--r--lib/ssh/test/ssh_compat_SUITE.erl21
-rwxr-xr-xlib/ssh/test/ssh_compat_SUITE_data/build_scripts/create_all6
-rw-r--r--lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed25519_key7
-rw-r--r--lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed25519_key.pub1
-rw-r--r--lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed448_key10
-rw-r--r--lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed448_key.pub1
-rw-r--r--lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed255197
-rw-r--r--lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed25519.pub1
-rw-r--r--lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed44810
-rw-r--r--lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed448.pub1
-rw-r--r--lib/ssh/test/ssh_engine_SUITE.erl11
-rw-r--r--lib/ssh/test/ssh_options_SUITE.erl39
-rw-r--r--lib/ssh/test/ssh_test_lib.erl25
-rw-r--r--lib/ssl/doc/src/notes.xml34
-rw-r--r--lib/ssl/src/ssl_handshake.erl7
-rw-r--r--lib/ssl/src/tls_sender.erl2
-rw-r--r--lib/ssl/test/ssl_bench_test_lib.erl4
-rw-r--r--lib/ssl/test/ssl_engine_SUITE.erl30
-rw-r--r--lib/stdlib/doc/src/calendar.xml6
-rw-r--r--lib/stdlib/doc/src/notes.xml31
-rw-r--r--lib/stdlib/src/calendar.erl11
-rw-r--r--lib/stdlib/src/erl_lint.erl3
-rw-r--r--lib/stdlib/test/calendar_SUITE.erl68
-rw-r--r--lib/stdlib/test/erl_lint_SUITE.erl20
-rw-r--r--lib/stdlib/test/lists_SUITE.erl7
-rw-r--r--lib/wx/api_gen/wx_gen_cpp.erl9
-rw-r--r--lib/wx/c_src/gen/wxe_funcs.cpp10
-rw-r--r--lib/wx/c_src/wxe_impl.cpp4
-rw-r--r--lib/wx/c_src/wxe_impl.h2
98 files changed, 2934 insertions, 1833 deletions
diff --git a/lib/compiler/doc/src/notes.xml b/lib/compiler/doc/src/notes.xml
index 5024310788..e0e5bc832b 100644
--- a/lib/compiler/doc/src/notes.xml
+++ b/lib/compiler/doc/src/notes.xml
@@ -32,6 +32,21 @@
<p>This document describes the changes made to the Compiler
application.</p>
+<section><title>Compiler 7.2.7</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>Fixed a bug where incorrect code was generated
+ following a binary match guard.</p>
+ <p>
+ Own Id: OTP-15353 Aux Id: ERL-753 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Compiler 7.2.6</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/lib/compiler/src/beam_utils.erl b/lib/compiler/src/beam_utils.erl
index 6e23003fc7..6b2ab5a2a4 100644
--- a/lib/compiler/src/beam_utils.erl
+++ b/lib/compiler/src/beam_utils.erl
@@ -745,8 +745,11 @@ check_liveness_block_2(R, {gc_bif,Op,{f,Lbl}}, Ss, St) ->
check_liveness_block_3(R, Lbl, {Op,length(Ss)}, St);
check_liveness_block_2(R, {bif,Op,{f,Lbl}}, Ss, St) ->
Arity = length(Ss),
+
+ %% Note that is_function/2 is a type test but is not safe.
case erl_internal:comp_op(Op, Arity) orelse
- erl_internal:new_type_test(Op, Arity) of
+ (erl_internal:new_type_test(Op, Arity) andalso
+ erl_bifs:is_safe(erlang, Op, Arity)) of
true ->
{killed,St};
false ->
diff --git a/lib/compiler/test/beam_utils_SUITE.erl b/lib/compiler/test/beam_utils_SUITE.erl
index ac19305d69..ff0f72d519 100644
--- a/lib/compiler/test/beam_utils_SUITE.erl
+++ b/lib/compiler/test/beam_utils_SUITE.erl
@@ -26,7 +26,7 @@
select/1,y_catch/1,otp_8949_b/1,liveopt/1,coverage/1,
y_registers/1,user_predef/1,scan_f/1,cafu/1,
receive_label/1,read_size_file_version/1,not_used/1,
- is_used_fr/1]).
+ is_used_fr/1,unsafe_is_function/1]).
-export([id/1]).
suite() -> [{ct_hooks,[ts_install_cth]}].
@@ -53,7 +53,8 @@ groups() ->
cafu,
read_size_file_version,
not_used,
- is_used_fr
+ is_used_fr,
+ unsafe_is_function
]}].
init_per_suite(Config) ->
@@ -570,6 +571,24 @@ is_used_fr(X, Y) ->
end,
X ! 1.
+%% ERL-778.
+unsafe_is_function(Config) ->
+ {undefined,any} = unsafe_is_function(undefined, any),
+ {ok,any} = unsafe_is_function(fun() -> ok end, any),
+ {'EXIT',{{case_clause,_},_}} = (catch unsafe_is_function(fun(_) -> ok end, any)),
+ ok.
+
+unsafe_is_function(F, M) ->
+ %% There would be an internal consistency failure:
+ %% Instruction: {bif,is_function,{f,0},[{x,0},{integer,0}],{x,2}}
+ %% Error: {uninitialized_reg,{y,0}}:
+
+ NewValue = case is_function(F, 0) of
+ true -> F();
+ false when F =:= undefined -> undefined
+ end,
+ {NewValue,M}.
+
%% The identity function.
id(I) -> I.
diff --git a/lib/compiler/vsn.mk b/lib/compiler/vsn.mk
index ab707885f4..92f8aec424 100644
--- a/lib/compiler/vsn.mk
+++ b/lib/compiler/vsn.mk
@@ -1 +1 @@
-COMPILER_VSN = 7.2.6
+COMPILER_VSN = 7.2.7
diff --git a/lib/crypto/c_src/crypto.c b/lib/crypto/c_src/crypto.c
index b2d8123f00..df607732bf 100644
--- a/lib/crypto/c_src/crypto.c
+++ b/lib/crypto/c_src/crypto.c
@@ -173,10 +173,13 @@
#endif
// (test for >= 1.1.1pre8)
-#if OPENSSL_VERSION_NUMBER >= (PACKED_OPENSSL_VERSION_PLAIN(1,1,1) - 7) \
+#if OPENSSL_VERSION_NUMBER >= (PACKED_OPENSSL_VERSION_PLAIN(1,1,1) -7) \
&& !defined(HAS_LIBRESSL) \
&& defined(HAVE_EC)
# define HAVE_ED_CURVE_DH
+# if OPENSSL_VERSION_NUMBER >= (PACKED_OPENSSL_VERSION_PLAIN(1,1,1))
+# define HAVE_EDDSA
+# endif
#endif
#if OPENSSL_VERSION_NUMBER >= PACKED_OPENSSL_VERSION(0,9,8,'c')
@@ -632,10 +635,8 @@ static ErlNifFunc nif_funcs[] = {
{"rsa_generate_key_nif", 2, rsa_generate_key_nif},
{"dh_generate_key_nif", 4, dh_generate_key_nif},
{"dh_compute_key_nif", 3, dh_compute_key_nif},
-
{"evp_compute_key_nif", 3, evp_compute_key_nif},
{"evp_generate_key_nif", 1, evp_generate_key_nif},
-
{"privkey_to_pubkey_nif", 2, privkey_to_pubkey_nif},
{"srp_value_B_nif", 5, srp_value_B_nif},
{"srp_user_secret_nif", 7, srp_user_secret_nif},
@@ -742,6 +743,12 @@ static ERL_NIF_TERM atom_x25519;
static ERL_NIF_TERM atom_x448;
#endif
+static ERL_NIF_TERM atom_eddsa;
+#ifdef HAVE_EDDSA
+static ERL_NIF_TERM atom_ed25519;
+static ERL_NIF_TERM atom_ed448;
+#endif
+
static ERL_NIF_TERM atom_rsa_mgf1_md;
static ERL_NIF_TERM atom_rsa_oaep_label;
static ERL_NIF_TERM atom_rsa_oaep_md;
@@ -1165,6 +1172,7 @@ static int initialize(ErlNifEnv* env, ERL_NIF_TERM load_info)
atom_ppbasis = enif_make_atom(env,"ppbasis");
atom_onbasis = enif_make_atom(env,"onbasis");
#endif
+
atom_aes_cfb8 = enif_make_atom(env, "aes_cfb8");
atom_aes_cfb128 = enif_make_atom(env, "aes_cfb128");
#ifdef HAVE_GCM
@@ -1195,6 +1203,11 @@ static int initialize(ErlNifEnv* env, ERL_NIF_TERM load_info)
atom_x25519 = enif_make_atom(env,"x25519");
atom_x448 = enif_make_atom(env,"x448");
#endif
+ atom_eddsa = enif_make_atom(env,"eddsa");
+#ifdef HAVE_EDDSA
+ atom_ed25519 = enif_make_atom(env,"ed25519");
+ atom_ed448 = enif_make_atom(env,"ed448");
+#endif
atom_rsa_mgf1_md = enif_make_atom(env,"rsa_mgf1_md");
atom_rsa_oaep_label = enif_make_atom(env,"rsa_oaep_label");
atom_rsa_oaep_md = enif_make_atom(env,"rsa_oaep_md");
@@ -1336,13 +1349,13 @@ static void unload(ErlNifEnv* env, void* priv_data)
static int algo_hash_cnt, algo_hash_fips_cnt;
static ERL_NIF_TERM algo_hash[12]; /* increase when extending the list */
static int algo_pubkey_cnt, algo_pubkey_fips_cnt;
-static ERL_NIF_TERM algo_pubkey[11]; /* increase when extending the list */
+static ERL_NIF_TERM algo_pubkey[12]; /* increase when extending the list */
static int algo_cipher_cnt, algo_cipher_fips_cnt;
static ERL_NIF_TERM algo_cipher[25]; /* increase when extending the list */
static int algo_mac_cnt, algo_mac_fips_cnt;
static ERL_NIF_TERM algo_mac[3]; /* increase when extending the list */
static int algo_curve_cnt, algo_curve_fips_cnt;
-static ERL_NIF_TERM algo_curve[87]; /* increase when extending the list */
+static ERL_NIF_TERM algo_curve[89]; /* increase when extending the list */
static int algo_rsa_opts_cnt, algo_rsa_opts_fips_cnt;
static ERL_NIF_TERM algo_rsa_opts[11]; /* increase when extending the list */
@@ -1394,6 +1407,10 @@ static void init_algorithms_types(ErlNifEnv* env)
#endif
// Non-validated algorithms follow
algo_pubkey_fips_cnt = algo_pubkey_cnt;
+ // Don't know if Edward curves are fips validated
+#if defined(HAVE_EDDSA)
+ algo_pubkey[algo_pubkey_cnt++] = enif_make_atom(env, "eddsa");
+#endif
algo_pubkey[algo_pubkey_cnt++] = enif_make_atom(env, "srp");
// Validated algorithms first
@@ -1554,6 +1571,10 @@ static void init_algorithms_types(ErlNifEnv* env)
#endif
#endif
//--
+#ifdef HAVE_EDDSA
+ algo_curve[algo_curve_cnt++] = enif_make_atom(env,"ed25519");
+ algo_curve[algo_curve_cnt++] = enif_make_atom(env,"ed448");
+#endif
#ifdef HAVE_ED_CURVE_DH
algo_curve[algo_curve_cnt++] = enif_make_atom(env,"x25519");
algo_curve[algo_curve_cnt++] = enif_make_atom(env,"x448");
@@ -3215,6 +3236,36 @@ static int get_rsa_public_key(ErlNifEnv* env, ERL_NIF_TERM key, RSA *rsa)
return 1;
}
+#ifdef HAVE_EDDSA
+ static int get_eddsa_key(ErlNifEnv* env, int public, ERL_NIF_TERM key, EVP_PKEY **pkey)
+{
+ /* key=[K] */
+ ERL_NIF_TERM head, tail, tail2, algo;
+ ErlNifBinary bin;
+ int type;
+
+ if (!enif_get_list_cell(env, key, &head, &tail)
+ || !enif_inspect_binary(env, head, &bin)
+ || !enif_get_list_cell(env, tail, &algo, &tail2)
+ || !enif_is_empty_list(env, tail2)) {
+ return 0;
+ }
+ if (algo == atom_ed25519) type = EVP_PKEY_ED25519;
+ else if (algo == atom_ed448) type = EVP_PKEY_ED448;
+ else
+ return 0;
+
+ if (public)
+ *pkey = EVP_PKEY_new_raw_public_key(type, NULL, bin.data, bin.size);
+ else
+ *pkey = EVP_PKEY_new_raw_private_key(type, NULL, bin.data, bin.size);
+
+ if (!pkey)
+ return 0;
+ return 1;
+}
+#endif
+
static int get_dss_private_key(ErlNifEnv* env, ERL_NIF_TERM key, DSA *dsa)
{
/* key=[P,Q,G,KEY] */
@@ -4183,9 +4234,9 @@ static ERL_NIF_TERM evp_compute_key_nif(ErlNifEnv* env, int argc, const ERL_NIF_
{
#ifdef HAVE_ED_CURVE_DH
int type;
- EVP_PKEY_CTX *ctx;
+ EVP_PKEY_CTX *ctx = NULL;
ErlNifBinary peer_bin, my_bin, key_bin;
- EVP_PKEY *peer_key, *my_key;
+ EVP_PKEY *peer_key = NULL, *my_key = NULL;
size_t max_size;
if (argv[0] == atom_x25519) type = EVP_PKEY_X25519;
@@ -4193,41 +4244,43 @@ static ERL_NIF_TERM evp_compute_key_nif(ErlNifEnv* env, int argc, const ERL_NIF_
else return enif_make_badarg(env);
if (!enif_inspect_binary(env, argv[1], &peer_bin) ||
- !enif_inspect_binary(env, argv[2], &my_bin)) {
- return enif_make_badarg(env);
- }
+ !enif_inspect_binary(env, argv[2], &my_bin))
+ goto return_badarg;
if (!(my_key = EVP_PKEY_new_raw_private_key(type, NULL, my_bin.data, my_bin.size)) ||
- !(ctx = EVP_PKEY_CTX_new(my_key, NULL))) {
- return enif_make_badarg(env);
- }
+ !(ctx = EVP_PKEY_CTX_new(my_key, NULL)))
+ goto return_badarg;
- if (!EVP_PKEY_derive_init(ctx)) {
- return enif_make_badarg(env);
- }
+ if (!EVP_PKEY_derive_init(ctx))
+ goto return_badarg;
if (!(peer_key = EVP_PKEY_new_raw_public_key(type, NULL, peer_bin.data, peer_bin.size)) ||
- !EVP_PKEY_derive_set_peer(ctx, peer_key)) {
- return enif_make_badarg(env);
- }
+ !EVP_PKEY_derive_set_peer(ctx, peer_key))
+ goto return_badarg;
- if (!EVP_PKEY_derive(ctx, NULL, &max_size)) {
- return enif_make_badarg(env);
- }
+ if (!EVP_PKEY_derive(ctx, NULL, &max_size))
+ goto return_badarg;
if (!enif_alloc_binary(max_size, &key_bin) ||
- !EVP_PKEY_derive(ctx, key_bin.data, &key_bin.size)) {
- return enif_make_badarg(env);
- }
+ !EVP_PKEY_derive(ctx, key_bin.data, &key_bin.size))
+ goto return_badarg;
if (key_bin.size < max_size) {
size_t actual_size = key_bin.size;
- if (!enif_realloc_binary(&key_bin, actual_size)) {
- return enif_make_badarg(env);
- }
+ if (!enif_realloc_binary(&key_bin, actual_size))
+ goto return_badarg;
}
+ EVP_PKEY_free(my_key);
+ EVP_PKEY_free(peer_key);
+ EVP_PKEY_CTX_free(ctx);
return enif_make_binary(env, &key_bin);
+
+return_badarg:
+ if (my_key) EVP_PKEY_free(my_key);
+ if (peer_key) EVP_PKEY_free(peer_key);
+ if (ctx) EVP_PKEY_CTX_free(ctx);
+ return enif_make_badarg(env);
#else
return atom_notsup;
#endif
@@ -4238,7 +4291,7 @@ static ERL_NIF_TERM evp_generate_key_nif(ErlNifEnv* env, int argc, const ERL_NIF
{
#ifdef HAVE_ED_CURVE_DH
int type;
- EVP_PKEY_CTX *ctx;
+ EVP_PKEY_CTX *ctx = NULL;
EVP_PKEY *pkey = NULL;
ERL_NIF_TERM ret_pub, ret_prv;
size_t key_len;
@@ -4249,22 +4302,30 @@ static ERL_NIF_TERM evp_generate_key_nif(ErlNifEnv* env, int argc, const ERL_NIF
if (!(ctx = EVP_PKEY_CTX_new_id(type, NULL))) return enif_make_badarg(env);
- if (!EVP_PKEY_keygen_init(ctx)) return atom_error;
- if (!EVP_PKEY_keygen(ctx, &pkey)) return atom_error;
+ if (!EVP_PKEY_keygen_init(ctx)) goto return_error;
+ if (!EVP_PKEY_keygen(ctx, &pkey)) goto return_error;
- if (!EVP_PKEY_get_raw_public_key(pkey, NULL, &key_len)) return atom_error;
+ if (!EVP_PKEY_get_raw_public_key(pkey, NULL, &key_len)) goto return_error;
if (!EVP_PKEY_get_raw_public_key(pkey,
enif_make_new_binary(env, key_len, &ret_pub),
&key_len))
- return atom_error;
+ goto return_error;
- if (!EVP_PKEY_get_raw_private_key(pkey, NULL, &key_len)) return atom_error;
+ if (!EVP_PKEY_get_raw_private_key(pkey, NULL, &key_len)) goto return_error;
if (!EVP_PKEY_get_raw_private_key(pkey,
enif_make_new_binary(env, key_len, &ret_prv),
&key_len))
- return atom_error;
+ goto return_error;
+ EVP_PKEY_free(pkey);
+ EVP_PKEY_CTX_free(ctx);
return enif_make_tuple2(env, ret_pub, ret_prv);
+
+return_error:
+ if (pkey) EVP_PKEY_free(pkey);
+ if (ctx) EVP_PKEY_CTX_free(ctx);
+ return atom_error;
+
#else
return atom_notsup;
#endif
@@ -4296,7 +4357,9 @@ static int get_pkey_digest_type(ErlNifEnv *env, ERL_NIF_TERM algorithm, ERL_NIF_
*md = NULL;
if (type == atom_none && algorithm == atom_rsa) return PKEY_OK;
-
+#ifdef HAVE_EDDSA
+ if (algorithm == atom_eddsa) return PKEY_OK;
+#endif
digp = get_digest_type(type);
if (!digp) return PKEY_BADARG;
if (!digp->md.p) return PKEY_NOTSUP;
@@ -4471,7 +4534,7 @@ static int get_engine_and_key_id(ErlNifEnv *env, ERL_NIF_TERM key, char ** id, E
static char *get_key_password(ErlNifEnv *env, ERL_NIF_TERM key) {
ERL_NIF_TERM tmp_term;
ErlNifBinary pwd_bin;
- char *pwd;
+ char *pwd = NULL;
if (enif_get_map_value(env, key, atom_password, &tmp_term) &&
enif_inspect_binary(env, tmp_term, &pwd_bin) &&
zero_terminate(pwd_bin, &pwd)
@@ -4496,16 +4559,17 @@ static int get_pkey_private_key(ErlNifEnv *env, ERL_NIF_TERM algorithm, ERL_NIF_
#ifdef HAS_ENGINE_SUPPORT
/* Use key stored in engine */
ENGINE *e;
- char *id;
+ char *id = NULL;
char *password;
if (!get_engine_and_key_id(env, key, &id, &e))
return PKEY_BADARG;
password = get_key_password(env, key);
*pkey = ENGINE_load_private_key(e, id, NULL, password);
+ if (password) enif_free(password);
+ enif_free(id);
if (!*pkey)
return PKEY_BADARG;
- enif_free(id);
#else
return PKEY_BADARG;
#endif
@@ -4546,6 +4610,14 @@ static int get_pkey_private_key(ErlNifEnv *env, ERL_NIF_TERM algorithm, ERL_NIF_
#else
return PKEY_NOTSUP;
#endif
+ } else if (algorithm == atom_eddsa) {
+#if defined(HAVE_EDDSA)
+ if (!get_eddsa_key(env, 0, key, pkey)) {
+ return PKEY_BADARG;
+ }
+#else
+ return PKEY_NOTSUP;
+#endif
} else if (algorithm == atom_dss) {
DSA *dsa = DSA_new();
@@ -4575,16 +4647,17 @@ static int get_pkey_public_key(ErlNifEnv *env, ERL_NIF_TERM algorithm, ERL_NIF_T
#ifdef HAS_ENGINE_SUPPORT
/* Use key stored in engine */
ENGINE *e;
- char *id;
+ char *id = NULL;
char *password;
if (!get_engine_and_key_id(env, key, &id, &e))
return PKEY_BADARG;
password = get_key_password(env, key);
*pkey = ENGINE_load_public_key(e, id, NULL, password);
+ if (password) enif_free(password);
+ enif_free(id);
if (!pkey)
return PKEY_BADARG;
- enif_free(id);
#else
return PKEY_BADARG;
#endif
@@ -4624,6 +4697,14 @@ static int get_pkey_public_key(ErlNifEnv *env, ERL_NIF_TERM algorithm, ERL_NIF_T
#else
return PKEY_NOTSUP;
#endif
+ } else if (algorithm == atom_eddsa) {
+#if defined(HAVE_EDDSA)
+ if (!get_eddsa_key(env, 1, key, pkey)) {
+ return PKEY_BADARG;
+ }
+#else
+ return PKEY_NOTSUP;
+#endif
} else if (algorithm == atom_dss) {
DSA *dsa = DSA_new();
@@ -4697,8 +4778,10 @@ printf("\r\n");
ctx = EVP_PKEY_CTX_new(pkey, NULL);
if (!ctx) goto badarg;
- if (EVP_PKEY_sign_init(ctx) <= 0) goto badarg;
- if (md != NULL && EVP_PKEY_CTX_set_signature_md(ctx, md) <= 0) goto badarg;
+ if (argv[0] != atom_eddsa) {
+ if (EVP_PKEY_sign_init(ctx) <= 0) goto badarg;
+ if (md != NULL && EVP_PKEY_CTX_set_signature_md(ctx, md) <= 0) goto badarg;
+ }
if (argv[0] == atom_rsa) {
if (EVP_PKEY_CTX_set_rsa_padding(ctx, sig_opt.rsa_padding) <= 0) goto badarg;
@@ -4720,14 +4803,40 @@ printf("\r\n");
#endif
}
- if (EVP_PKEY_sign(ctx, NULL, &siglen, tbs, tbslen) <= 0) goto badarg;
- enif_alloc_binary(siglen, &sig_bin);
+ if (argv[0] == atom_eddsa) {
+#ifdef HAVE_EDDSA
+ EVP_MD_CTX* mdctx = EVP_MD_CTX_new();
+ if (!EVP_DigestSignInit(mdctx, NULL, NULL, NULL, pkey)) {
+ if (mdctx) EVP_MD_CTX_free(mdctx);
+ goto badarg;
+ }
+
+ if (!EVP_DigestSign(mdctx, NULL, &siglen, tbs, tbslen)) {
+ EVP_MD_CTX_free(mdctx);
+ goto badarg;
+ }
+ enif_alloc_binary(siglen, &sig_bin);
- if (md != NULL) {
- ERL_VALGRIND_ASSERT_MEM_DEFINED(tbs, EVP_MD_size(md));
+ if (!EVP_DigestSign(mdctx, sig_bin.data, &siglen, tbs, tbslen)) {
+ EVP_MD_CTX_free(mdctx);
+ goto badarg;
+ }
+ EVP_MD_CTX_free(mdctx);
+#else
+ goto badarg;
+#endif
}
- i = EVP_PKEY_sign(ctx, sig_bin.data, &siglen, tbs, tbslen);
+ else
+ {
+ if (EVP_PKEY_sign(ctx, NULL, &siglen, tbs, tbslen) <= 0) goto badarg;
+ enif_alloc_binary(siglen, &sig_bin);
+ if (md != NULL) {
+ ERL_VALGRIND_ASSERT_MEM_DEFINED(tbs, EVP_MD_size(md));
+ }
+ i = EVP_PKEY_sign(ctx, sig_bin.data, &siglen, tbs, tbslen);
+ }
+
EVP_PKEY_CTX_free(ctx);
#else
/*printf("Old interface\r\n");
@@ -4835,8 +4944,11 @@ static ERL_NIF_TERM pkey_verify_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM
*/
ctx = EVP_PKEY_CTX_new(pkey, NULL);
if (!ctx) goto badarg;
- if (EVP_PKEY_verify_init(ctx) <= 0) goto badarg;
- if (md != NULL && EVP_PKEY_CTX_set_signature_md(ctx, md) <= 0) goto badarg;
+
+ if (argv[0] != atom_eddsa) {
+ if (EVP_PKEY_verify_init(ctx) <= 0) goto badarg;
+ if (md != NULL && EVP_PKEY_CTX_set_signature_md(ctx, md) <= 0) goto badarg;
+ }
if (argv[0] == atom_rsa) {
if (EVP_PKEY_CTX_set_rsa_padding(ctx, sig_opt.rsa_padding) <= 0) goto badarg;
@@ -4856,10 +4968,28 @@ static ERL_NIF_TERM pkey_verify_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM
}
}
- if (md != NULL) {
- ERL_VALGRIND_ASSERT_MEM_DEFINED(tbs, EVP_MD_size(md));
- }
- i = EVP_PKEY_verify(ctx, sig_bin.data, sig_bin.size, tbs, tbslen);
+ if (argv[0] == atom_eddsa) {
+#ifdef HAVE_EDDSA
+ EVP_MD_CTX* mdctx = EVP_MD_CTX_create();
+
+ if (!EVP_DigestVerifyInit(mdctx, NULL, NULL, NULL, pkey)) {
+ if (mdctx) EVP_MD_CTX_destroy(mdctx);
+ goto badarg;
+ }
+
+ i = EVP_DigestVerify(mdctx, sig_bin.data, sig_bin.size, tbs, tbslen);
+ EVP_MD_CTX_destroy(mdctx);
+#else
+ goto badarg;
+#endif
+ }
+ else
+ {
+ if (md != NULL) {
+ ERL_VALGRIND_ASSERT_MEM_DEFINED(tbs, EVP_MD_size(md));
+ }
+ i = EVP_PKEY_verify(ctx, sig_bin.data, sig_bin.size, tbs, tbslen);
+ }
EVP_PKEY_CTX_free(ctx);
#else
@@ -5003,6 +5133,15 @@ static int get_pkey_crypt_options(ErlNifEnv *env, ERL_NIF_TERM algorithm, ERL_NI
return PKEY_OK;
}
+static size_t size_of_RSA(EVP_PKEY *pkey) {
+ size_t tmplen;
+ RSA *rsa = EVP_PKEY_get1_RSA(pkey);
+ if (rsa == NULL) return 0;
+ tmplen = RSA_size(rsa);
+ RSA_free(rsa);
+ return tmplen;
+}
+
static ERL_NIF_TERM pkey_crypt_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM argv[])
{/* (Algorithm, Data, PublKey=[E,N]|[E,N,D]|[E,N,D,P1,P2,E1,E2,C], Options, IsPrivate, IsEncrypt) */
int i;
@@ -5100,9 +5239,8 @@ static ERL_NIF_TERM pkey_crypt_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM
#ifdef HAVE_RSA_SSLV23_PADDING
if (crypt_opt.rsa_padding == RSA_SSLV23_PADDING) {
if (is_encrypt) {
- RSA *rsa = EVP_PKEY_get1_RSA(pkey);
- if (rsa == NULL) goto badarg;
- tmplen = RSA_size(rsa);
+ tmplen = size_of_RSA(pkey);
+ if (tmplen == 0) goto badarg;
if (!enif_alloc_binary(tmplen, &tmp_bin)) goto badarg;
if (RSA_padding_add_SSLv23(tmp_bin.data, tmplen, in_bin.data, in_bin.size) <= 0)
goto badarg;
@@ -5122,7 +5260,7 @@ static ERL_NIF_TERM pkey_crypt_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM
if (crypt_opt.rsa_mgf1_md != NULL
&& EVP_PKEY_CTX_set_rsa_mgf1_md(ctx, crypt_opt.rsa_mgf1_md) <= 0) goto badarg;
if (crypt_opt.rsa_oaep_label.data != NULL && crypt_opt.rsa_oaep_label.size > 0) {
- unsigned char *label_copy;
+ unsigned char *label_copy = NULL;
label_copy = OPENSSL_malloc(crypt_opt.rsa_oaep_label.size);
if (label_copy == NULL) goto badarg;
memcpy((void *)(label_copy), (const void *)(crypt_opt.rsa_oaep_label.data),
@@ -5234,14 +5372,11 @@ static ERL_NIF_TERM pkey_crypt_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM
if ((i > 0) && argv[0] == atom_rsa && !is_encrypt) {
#ifdef HAVE_RSA_SSLV23_PADDING
if (crypt_opt.rsa_padding == RSA_SSLV23_PADDING) {
- RSA *rsa = EVP_PKEY_get1_RSA(pkey);
unsigned char *p;
- if (rsa == NULL) goto badarg;
- tmplen = RSA_size(rsa);
- if (!enif_alloc_binary(tmplen, &tmp_bin)) {
- RSA_free(rsa);
+ tmplen = size_of_RSA(pkey);
+ if (tmplen == 0) goto badarg;
+ if (!enif_alloc_binary(tmplen, &tmp_bin))
goto badarg;
- }
p = out_bin.data;
p++;
i = RSA_padding_check_SSLv23(tmp_bin.data, tmplen, p, out_bin.size - 1, tmplen);
@@ -5252,7 +5387,6 @@ static ERL_NIF_TERM pkey_crypt_nif(ErlNifEnv *env, int argc, const ERL_NIF_TERM
tmp_bin = in_bin;
i = 1;
}
- RSA_free(rsa);
}
#endif
}
@@ -5315,6 +5449,7 @@ static ERL_NIF_TERM privkey_to_pubkey_nif(ErlNifEnv* env, int argc, const ERL_NI
RSA_get0_key(rsa, &n, &e, &d);
result[0] = bin_from_bn(env, e); // Exponent E
result[1] = bin_from_bn(env, n); // Modulus N = p*q
+ RSA_free(rsa);
EVP_PKEY_free(pkey);
return enif_make_list_from_array(env, result, 2);
}
@@ -5329,6 +5464,7 @@ static ERL_NIF_TERM privkey_to_pubkey_nif(ErlNifEnv* env, int argc, const ERL_NI
result[1] = bin_from_bn(env, q);
result[2] = bin_from_bn(env, g);
result[3] = bin_from_bn(env, pub_key);
+ DSA_free(dsa);
EVP_PKEY_free(pkey);
return enif_make_list_from_array(env, result, 4);
}
diff --git a/lib/crypto/c_src/crypto_callback.c b/lib/crypto/c_src/crypto_callback.c
index 23d2bed057..0cc7dd609d 100644
--- a/lib/crypto/c_src/crypto_callback.c
+++ b/lib/crypto/c_src/crypto_callback.c
@@ -179,6 +179,10 @@ DLLEXPORT struct crypto_callbacks* get_crypto_callbacks(int nlocks)
/* This is not really a NIF library, but we use ERL_NIF_INIT in order to
* get access to the erl_nif API (on Windows).
*/
-ERL_NIF_INIT(dummy, (ErlNifFunc*)NULL , NULL, NULL, NULL, NULL)
+static struct {
+ int dummy__;
+ ErlNifFunc funcv[0];
+} empty;
+ERL_NIF_INIT(dummy, empty.funcv, NULL, NULL, NULL, NULL)
#endif
diff --git a/lib/crypto/c_src/otp_test_engine.c b/lib/crypto/c_src/otp_test_engine.c
index 34c825059f..2c8cce094e 100644
--- a/lib/crypto/c_src/otp_test_engine.c
+++ b/lib/crypto/c_src/otp_test_engine.c
@@ -35,7 +35,12 @@
#if OPENSSL_VERSION_NUMBER < PACKED_OPENSSL_VERSION_PLAIN(1,1,0) \
|| defined(LIBRESSL_VERSION_NUMBER)
-#define OLD
+# define OLD
+#endif
+
+#if OPENSSL_VERSION_NUMBER >= PACKED_OPENSSL_VERSION_PLAIN(1,1,0) \
+ && !defined(LIBRESSL_VERSION_NUMBER)
+# define FAKE_RSA_IMPL
#endif
#if OPENSSL_VERSION_NUMBER >= PACKED_OPENSSL_VERSION(0,9,8,'o') \
@@ -56,13 +61,41 @@
static const char *test_engine_id = "MD5";
static const char *test_engine_name = "MD5 test engine";
-/* The callback that does the job of fetching keys on demand by the Engine */
-EVP_PKEY* test_key_load(ENGINE *er, const char *id, UI_METHOD *ui_method, void *callback_data);
+#if defined(FAKE_RSA_IMPL)
+/*-------- test of private/public keys and RSA in engine ---------*/
+static RSA_METHOD *test_rsa_method = NULL;
+
+/* Our on "RSA" implementation */
+static int test_rsa_sign(int dtype, const unsigned char *m,
+ unsigned int m_len, unsigned char *sigret,
+ unsigned int *siglen, const RSA *rsa);
+static int test_rsa_verify(int dtype, const unsigned char *m,
+ unsigned int m_len, const unsigned char *sigret,
+ unsigned int siglen, const RSA *rsa);
+static int test_rsa_free(RSA *rsa);
+#endif /* if defined(FAKE_RSA_IMPL) */
+
+/* The callbacks that does the job of fetching keys on demand by the Engine */
+EVP_PKEY* test_privkey_load(ENGINE *eng, const char *id, UI_METHOD *ui_method, void *callback_data);
+EVP_PKEY* test_pubkey_load(ENGINE *eng, const char *id, UI_METHOD *ui_method, void *callback_data);
+EVP_PKEY* test_key_load(ENGINE *er, const char *id, UI_METHOD *ui_method, void *callback_data, int priv);
+
+/*----------------------------------------------------------------*/
static int test_init(ENGINE *e) {
printf("OTP Test Engine Initializatzion!\r\n");
+#if defined(FAKE_RSA_IMPL)
+ if ( !RSA_meth_set_finish(test_rsa_method, test_rsa_free)
+ || !RSA_meth_set_sign(test_rsa_method, test_rsa_sign)
+ || !RSA_meth_set_verify(test_rsa_method, test_rsa_verify)
+ ) {
+ fprintf(stderr, "Setup RSA_METHOD failed\r\n");
+ return 0;
+ }
+#endif /* if defined(FAKE_RSA_IMPL) */
+
/* Load all digest and cipher algorithms. Needed for password protected private keys */
OpenSSL_add_all_ciphers();
OpenSSL_add_all_digests();
@@ -79,6 +112,19 @@ static void add_test_data(unsigned char *md, unsigned int len)
}
}
+#if defined(FAKE_RSA_IMPL)
+static int chk_test_data(const unsigned char *md, unsigned int len)
+{
+ unsigned int i;
+
+ for (i=0; i<len; i++) {
+ if (md[i] != (unsigned char)(i & 0xff))
+ return 0;
+ }
+ return 1;
+}
+#endif /* if defined(FAKE_RSA_IMPL) */
+
/* MD5 part */
#undef data
#ifdef OLD
@@ -184,19 +230,34 @@ static int test_engine_digest_selector(ENGINE *e, const EVP_MD **digest,
return ok;
}
-
static int bind_helper(ENGINE * e, const char *id)
{
- if (!ENGINE_set_id(e, test_engine_id) ||
- !ENGINE_set_name(e, test_engine_name) ||
- !ENGINE_set_init_function(e, test_init) ||
- !ENGINE_set_digests(e, &test_engine_digest_selector) ||
+#if defined(FAKE_RSA_IMPL)
+ test_rsa_method = RSA_meth_new("OTP test RSA method", 0);
+ if (test_rsa_method == NULL) {
+ fprintf(stderr, "RSA_meth_new failed\r\n");
+ return 0;
+ }
+#endif /* if defined(FAKE_RSA_IMPL) */
+
+ if (!ENGINE_set_id(e, test_engine_id)
+ || !ENGINE_set_name(e, test_engine_name)
+ || !ENGINE_set_init_function(e, test_init)
+ || !ENGINE_set_digests(e, &test_engine_digest_selector)
/* For testing of key storage in an Engine: */
- !ENGINE_set_load_privkey_function(e, &test_key_load) ||
- !ENGINE_set_load_pubkey_function(e, &test_key_load)
- )
+ || !ENGINE_set_load_privkey_function(e, &test_privkey_load)
+ || !ENGINE_set_load_pubkey_function(e, &test_pubkey_load)
+ )
return 0;
+#if defined(FAKE_RSA_IMPL)
+ if ( !ENGINE_set_RSA(e, test_rsa_method) ) {
+ RSA_meth_free(test_rsa_method);
+ test_rsa_method = NULL;
+ return 0;
+ }
+#endif /* if defined(FAKE_RSA_IMPL) */
+
return 1;
}
@@ -211,24 +272,29 @@ IMPLEMENT_DYNAMIC_BIND_FN(bind_helper);
*/
int pem_passwd_cb_fun(char *buf, int size, int rwflag, void *password);
-EVP_PKEY* test_key_load(ENGINE *er, const char *id, UI_METHOD *ui_method, void *callback_data)
+EVP_PKEY* test_privkey_load(ENGINE *eng, const char *id, UI_METHOD *ui_method, void *callback_data) {
+ return test_key_load(eng, id, ui_method, callback_data, 1);
+}
+
+EVP_PKEY* test_pubkey_load(ENGINE *eng, const char *id, UI_METHOD *ui_method, void *callback_data) {
+ return test_key_load(eng, id, ui_method, callback_data, 0);
+}
+
+EVP_PKEY* test_key_load(ENGINE *eng, const char *id, UI_METHOD *ui_method, void *callback_data, int priv)
{
EVP_PKEY *pkey = NULL;
FILE *f = fopen(id, "r");
if (!f) {
- fprintf(stderr, "%s:%d fopen(%s) failed\r\n", __FILE__,__LINE__,id);
- return NULL;
+ fprintf(stderr, "%s:%d fopen(%s) failed\r\n", __FILE__,__LINE__,id);
+ return NULL;
}
- /* First try to read as a private key. If that fails, try to read as a public key: */
- pkey = PEM_read_PrivateKey(f, NULL, pem_passwd_cb_fun, callback_data);
- if (!pkey) {
- /* ERR_print_errors_fp (stderr); */
- fclose(f);
- f = fopen(id, "r");
- pkey = PEM_read_PUBKEY(f, NULL, NULL, NULL);
- }
+ pkey =
+ priv
+ ? PEM_read_PrivateKey(f, NULL, pem_passwd_cb_fun, callback_data)
+ : PEM_read_PUBKEY(f, NULL, NULL, NULL);
+
fclose(f);
if (!pkey) {
@@ -278,3 +344,71 @@ int pem_passwd_cb_fun(char *buf, int size, int rwflag, void *password)
}
#endif
+
+#if defined(FAKE_RSA_IMPL)
+/* RSA sign. This returns a fixed string so the test case can test that it was called
+ instead of the cryptolib default RSA sign */
+
+unsigned char fake_flag[] = {255,3,124,180,35,10,180,151,101,247,62,59,80,122,220,
+ 142,24,180,191,34,51,150,112,27,43,142,195,60,245,213,80,179};
+
+int test_rsa_sign(int dtype,
+ /* The digest to sign */
+ const unsigned char *m, unsigned int m_len,
+ /* The allocated buffer to fill with the signature */
+ unsigned char *sigret, unsigned int *siglen,
+ /* The key */
+ const RSA *rsa)
+{
+ int slen;
+ fprintf(stderr, "test_rsa_sign (dtype=%i) called m_len=%u *siglen=%u\r\n", dtype, m_len, *siglen);
+ if (!sigret) {
+ fprintf(stderr, "sigret = NULL\r\n");
+ return -1;
+ }
+
+ /* {int i;
+ fprintf(stderr, "Digest =\r\n");
+ for(i=0; i<m_len; i++)
+ fprintf(stderr, "%i,", m[i]);
+ fprintf(stderr, "\r\n");
+ } */
+
+ if ((sizeof(fake_flag) == m_len)
+ && bcmp(m,fake_flag,m_len) == 0) {
+ printf("To be faked\r\n");
+ /* To be faked */
+ slen = RSA_size(rsa);
+ add_test_data(sigret, slen); /* The signature is 0,1,2...255,0,1... */
+ *siglen = slen; /* Must set this. Why? */
+ return 1; /* 1 = success */
+ }
+ return 0;
+}
+
+int test_rsa_verify(int dtype,
+ /* The digest to verify */
+ const unsigned char *m, unsigned int m_len,
+ /* The signature */
+ const unsigned char *sigret, unsigned int siglen,
+ /* The key */
+ const RSA *rsa)
+{
+ printf("test_rsa_verify (dtype=%i) called m_len=%u siglen=%u\r\n", dtype, m_len, siglen);
+
+ if ((sizeof(fake_flag) == m_len)
+ && bcmp(m,fake_flag,m_len) == 0) {
+ printf("To be faked\r\n");
+ return (siglen == RSA_size(rsa))
+ && chk_test_data(sigret, siglen);
+ }
+ return 0;
+}
+
+static int test_rsa_free(RSA *rsa)
+{
+ printf("test_rsa_free called\r\n");
+ return 1;
+}
+
+#endif /* if defined(FAKE_RSA_IMPL) */
diff --git a/lib/crypto/doc/src/algorithm_details.xml b/lib/crypto/doc/src/algorithm_details.xml
index 4d58d26970..854bfbb4b1 100644
--- a/lib/crypto/doc/src/algorithm_details.xml
+++ b/lib/crypto/doc/src/algorithm_details.xml
@@ -303,6 +303,20 @@
</section>
<section>
+ <title>EdDSA</title>
+ <p>EdDSA is available with OpenSSL 1.1.1 or later if not disabled by configuration.
+ To dynamically check availability, check that the atom <c>eddsa</c> is present in the
+ list with the <c>public_keys</c> tag in the return value of
+ <seealso marker="crypto#supports-0">crypto:supports()</seealso>.
+ </p>
+ <p>Support for the curves ed25519 and ed448 is implemented.
+ The actual supported named curves could be checked by examining the list with the
+ <c>curves</c> tag in the return value of
+ <seealso marker="crypto#supports-0">crypto:supports()</seealso>.
+ </p>
+ </section>
+
+ <section>
<title>Diffie-Hellman</title>
<p>Diffie-Hellman computations are available with OpenSSL versions compatible with Erlang CRYPTO
if not disabled by configuration.
diff --git a/lib/crypto/doc/src/crypto.xml b/lib/crypto/doc/src/crypto.xml
index 651b647e1c..b33db0d6e4 100644
--- a/lib/crypto/doc/src/crypto.xml
+++ b/lib/crypto/doc/src/crypto.xml
@@ -268,7 +268,8 @@
<datatype_title>Elliptic Curves</datatype_title>
<datatype>
<name name="ec_named_curve"/>
- <name name="edwards_curve"/>
+ <name name="edwards_curve_dh"/>
+ <name name="edwards_curve_ed"/>
<desc>
<p>Note that some curves are disabled if FIPS is enabled.</p>
</desc>
@@ -348,6 +349,14 @@
</datatype>
<datatype>
+ <name name="eddsa_public"/>
+ <name name="eddsa_private"/>
+ <name name="eddsa_params"/>
+ <desc>
+ </desc>
+ </datatype>
+
+ <datatype>
<name name="srp_public"/>
<name name="srp_private"/>
<desc>
diff --git a/lib/crypto/src/crypto.erl b/lib/crypto/src/crypto.erl
index c2ab88417e..72cb9aabfd 100644
--- a/lib/crypto/src/crypto.erl
+++ b/lib/crypto/src/crypto.erl
@@ -118,7 +118,11 @@
-type ecdsa_public() :: key_integer() .
-type ecdsa_private() :: key_integer() .
--type ecdsa_params() :: ec_named_curve() | edwards_curve() | ec_explicit_curve() .
+-type ecdsa_params() :: ec_named_curve() | ec_explicit_curve() .
+
+-type eddsa_public() :: key_integer() .
+-type eddsa_private() :: key_integer() .
+-type eddsa_params() :: edwards_curve_ed() .
-type srp_public() :: key_integer() .
-type srp_private() :: key_integer() .
@@ -135,7 +139,7 @@
-type ecdh_public() :: key_integer() .
-type ecdh_private() :: key_integer() .
--type ecdh_params() :: ec_named_curve() | edwards_curve() | ec_explicit_curve() .
+-type ecdh_params() :: ec_named_curve() | edwards_curve_dh() | ec_explicit_curve() .
%%% Curves
@@ -247,8 +251,9 @@
| wtls9
.
--type edwards_curve() :: x25519
- | x448 .
+-type edwards_curve_dh() :: x25519 | x448 .
+
+-type edwards_curve_ed() :: ed25519 | ed448 .
%%%
-type block_cipher_with_iv() :: cbc_cipher()
@@ -328,7 +333,7 @@ stop() ->
],
PKs :: [rsa | dss | ecdsa | dh | ecdh | ec_gf2m],
Macs :: [hmac | cmac | poly1305],
- Curves :: [ec_named_curve() | edwards_curve()],
+ Curves :: [ec_named_curve() | edwards_curve_dh() | edwards_curve_ed()],
RSAopts :: [rsa_sign_verify_opt() | rsa_opt()] .
supports()->
{Hashs, PubKeys, Ciphers, Macs, Curves, RsaOpts} = algorithms(),
@@ -777,7 +782,7 @@ rand_seed_nif(_Seed) -> ?nif_stub.
%%% Sign/verify
%%%
%%%================================================================
--type pk_sign_verify_algs() :: rsa | dss | ecdsa .
+-type pk_sign_verify_algs() :: rsa | dss | ecdsa | eddsa .
-type pk_sign_verify_opts() :: [ rsa_sign_verify_opt() ] .
@@ -801,7 +806,8 @@ rand_seed_nif(_Seed) -> ?nif_stub.
Msg :: binary() | {digest,binary()},
Key :: rsa_private()
| dss_private()
- | [ecdsa_private()|ecdsa_params()]
+ | [ecdsa_private() | ecdsa_params()]
+ | [eddsa_private() | eddsa_params()]
| engine_key_ref(),
Signature :: binary() .
@@ -820,6 +826,7 @@ sign(Algorithm, Type, Data, Key) ->
Key :: rsa_private()
| dss_private()
| [ecdsa_private() | ecdsa_params()]
+ | [eddsa_private() | eddsa_params()]
| engine_key_ref(),
Options :: pk_sign_verify_opts(),
Signature :: binary() .
@@ -842,12 +849,14 @@ pkey_sign_nif(_Algorithm, _Type, _Digest, _Key, _Options) -> ?nif_stub.
when Algorithm :: pk_sign_verify_algs(),
DigestType :: rsa_digest_type()
| dss_digest_type()
- | ecdsa_digest_type(),
+ | ecdsa_digest_type()
+ | none,
Msg :: binary() | {digest,binary()},
Signature :: binary(),
- Key :: rsa_private()
- | dss_private()
- | [ecdsa_private() | ecdsa_params()]
+ Key :: rsa_public()
+ | dss_public()
+ | [ecdsa_public() | ecdsa_params()]
+ | [eddsa_public() | eddsa_params()]
| engine_key_ref(),
Result :: boolean().
@@ -865,6 +874,7 @@ verify(Algorithm, Type, Data, Signature, Key) ->
Key :: rsa_public()
| dss_public()
| [ecdsa_public() | ecdsa_params()]
+ | [eddsa_public() | eddsa_params()]
| engine_key_ref(),
Options :: pk_sign_verify_opts(),
Result :: boolean().
@@ -1214,7 +1224,11 @@ engine_load_1(Engine, PreCmds, PostCmds, EngineMethods) ->
throw:Error ->
%% The engine couldn't initialise, release the structural reference
ok = engine_free_nif(Engine),
- throw(Error)
+ throw(Error);
+ error:badarg ->
+ %% For example bad argument list, release the structural reference
+ ok = engine_free_nif(Engine),
+ error(badarg)
end.
engine_load_2(Engine, PostCmds, EngineMethods) ->
@@ -1762,7 +1776,9 @@ ec_key_generate(_Curve, _Key) -> ?nif_stub.
ecdh_compute_key_nif(_Others, _Curve, _My) -> ?nif_stub.
--spec ec_curves() -> [EllipticCurve] when EllipticCurve :: ec_named_curve() | edwards_curve() .
+-spec ec_curves() -> [EllipticCurve] when EllipticCurve :: ec_named_curve()
+ | edwards_curve_dh()
+ | edwards_curve_ed() .
ec_curves() ->
crypto_ec_curves:curves().
diff --git a/lib/crypto/test/crypto_SUITE.erl b/lib/crypto/test/crypto_SUITE.erl
index 6b7ed06797..6c6188f775 100644
--- a/lib/crypto/test/crypto_SUITE.erl
+++ b/lib/crypto/test/crypto_SUITE.erl
@@ -1,4 +1,4 @@
-%%
+%
%% %CopyrightBegin%
%%
%% Copyright Ericsson AB 1999-2018. All Rights Reserved.
@@ -59,6 +59,8 @@ groups() ->
{group, rsa},
{group, dss},
{group, ecdsa},
+ {group, ed25519},
+ {group, ed448},
{group, dh},
{group, ecdh},
{group, srp},
@@ -146,6 +148,12 @@ groups() ->
{ecdsa, [], [sign_verify
%% Does not work yet: ,public_encrypt, private_encrypt
]},
+ {ed25519, [], [sign_verify
+ %% Does not work yet: ,public_encrypt, private_encrypt
+ ]},
+ {ed448, [], [sign_verify
+ %% Does not work yet: ,public_encrypt, private_encrypt
+ ]},
{dh, [], [generate_compute,
compute_bug]},
{ecdh, [], [generate_all_supported, compute, generate]},
@@ -573,7 +581,8 @@ generate_all_supported(_Config) ->
ct:log("ERROR: Curve ~p exception ~p:~p~n~p", [C,Cls,Err,Stack]),
{error,{C,{Cls,Err}}}
end
- || C <- crypto:ec_curves()
+ || C <- crypto:ec_curves(),
+ not lists:member(C, [ed25519, ed448])
],
OK = [C || {ok,C} <- Results],
ct:log("Ok (len=~p): ~p", [length(OK), OK]),
@@ -904,6 +913,29 @@ aead_cipher({Type, Key, PlainText, IV, AAD, CipherText, CipherTag, TagLen, Info}
{got, Other1}})
end.
+do_sign_verify({Type, undefined=Hash, Private, Public, Msg, Signature}) ->
+ case crypto:sign(eddsa, Hash, Msg, [Private,Type]) of
+ Signature ->
+ ct:log("OK crypto:sign(eddsa, ~p, Msg, [Private,~p])", [Hash,Type]),
+ case crypto:verify(eddsa, Hash, Msg, Signature, [Public,Type]) of
+ true ->
+ ct:log("OK crypto:verify(eddsa, ~p, Msg, Signature, [Public,~p])", [Hash,Type]),
+ negative_verify(eddsa, Hash, Msg, <<10,20>>, [Public,Type]);
+ false ->
+ ct:log("ERROR crypto:verify(eddsa, ~p, Msg= ~p, Signature= ~p, [Public= ~p,~p])",
+ [Hash,Msg,Signature,Public,Type]),
+ ct:fail({{crypto, verify, [eddsa, Hash, Msg, Signature, [Public,Type]]}})
+ end;
+ ErrorSig ->
+ ct:log("ERROR crypto:sign(~p, ~p, ..., [Private= ~p,~p])", [eddsa,Hash,Private,Type]),
+ ct:log("ERROR crypto:verify(eddsa, ~p, Msg= ~p, [Public= ~p,~p])~n"
+ "ErrorSig = ~p~n"
+ "CorrectSig = ~p~n"
+ ,
+ [Hash,Msg,Public,Type,ErrorSig,Signature]),
+ ct:fail({{crypto, sign, [Type, Hash, Msg, ErrorSig, [Private]]}})
+ end;
+
do_sign_verify({Type, Hash, Public, Private, Msg}) ->
Signature = crypto:sign(Type, Hash, Msg, Private),
case crypto:verify(Type, Hash, Msg, Signature, Public) of
@@ -1463,6 +1495,12 @@ group_config(ecdsa = Type, Config) ->
MsgPubEnc = <<"7896345786348 Asldi">>,
PubPrivEnc = [{ecdsa, Public, Private, MsgPubEnc, []}],
[{sign_verify, SignVerify}, {pub_priv_encrypt, PubPrivEnc} | Config];
+
+group_config(Type, Config) when Type == ed25519 ; Type == ed448 ->
+ TestVectors = eddsa(Type),
+ [{sign_verify,TestVectors} | Config];
+
+
group_config(srp, Config) ->
GenerateCompute = [srp3(), srp6(), srp6a(), srp6a_smaller_prime()],
[{generate_compute, GenerateCompute} | Config];
@@ -2722,6 +2760,392 @@ srp(ClientPrivate, Generator, Prime, Version, Verifier, ServerPublic, ServerPriv
ServerPublic, ServerPrivate, {host, [Verifier, Generator, Prime, Version]},
{host, [Verifier, Prime, Version, Scrambler]},
SessionKey}.
+
+eddsa(ed25519) ->
+ %% https://tools.ietf.org/html/rfc8032#section-7.1
+ %% {ALGORITHM, (SHA)}, SECRET KEY, PUBLIC KEY, MESSAGE, SIGNATURE}
+ [
+ %% TEST 1
+ {ed25519, undefined,
+ hexstr2bin("9d61b19deffd5a60ba844af492ec2cc4"
+ "4449c5697b326919703bac031cae7f60"),
+ hexstr2bin("d75a980182b10ab7d54bfed3c964073a"
+ "0ee172f3daa62325af021a68f707511a"),
+ hexstr2bin(""),
+ hexstr2bin("e5564300c360ac729086e2cc806e828a"
+ "84877f1eb8e5d974d873e06522490155"
+ "5fb8821590a33bacc61e39701cf9b46b"
+ "d25bf5f0595bbe24655141438e7a100b")},
+ %% TEST 2
+ {ed25519, undefined,
+ hexstr2bin("4ccd089b28ff96da9db6c346ec114e0f"
+ "5b8a319f35aba624da8cf6ed4fb8a6fb"),
+ hexstr2bin("3d4017c3e843895a92b70aa74d1b7ebc"
+ "9c982ccf2ec4968cc0cd55f12af4660c"),
+ hexstr2bin("72"),
+ hexstr2bin("92a009a9f0d4cab8720e820b5f642540"
+ "a2b27b5416503f8fb3762223ebdb69da"
+ "085ac1e43e15996e458f3613d0f11d8c"
+ "387b2eaeb4302aeeb00d291612bb0c00")},
+ %% TEST 3
+ {ed25519, undefined,
+ hexstr2bin("c5aa8df43f9f837bedb7442f31dcb7b1"
+ "66d38535076f094b85ce3a2e0b4458f7"),
+ hexstr2bin("fc51cd8e6218a1a38da47ed00230f058"
+ "0816ed13ba3303ac5deb911548908025"),
+ hexstr2bin("af82"),
+ hexstr2bin("6291d657deec24024827e69c3abe01a3"
+ "0ce548a284743a445e3680d7db5ac3ac"
+ "18ff9b538d16f290ae67f760984dc659"
+ "4a7c15e9716ed28dc027beceea1ec40a")},
+ %% TEST 1024
+ {ed25519, undefined,
+ hexstr2bin("f5e5767cf153319517630f226876b86c"
+ "8160cc583bc013744c6bf255f5cc0ee5"),
+ hexstr2bin("278117fc144c72340f67d0f2316e8386"
+ "ceffbf2b2428c9c51fef7c597f1d426e"),
+ hexstr2bin("08b8b2b733424243760fe426a4b54908"
+ "632110a66c2f6591eabd3345e3e4eb98"
+ "fa6e264bf09efe12ee50f8f54e9f77b1"
+ "e355f6c50544e23fb1433ddf73be84d8"
+ "79de7c0046dc4996d9e773f4bc9efe57"
+ "38829adb26c81b37c93a1b270b20329d"
+ "658675fc6ea534e0810a4432826bf58c"
+ "941efb65d57a338bbd2e26640f89ffbc"
+ "1a858efcb8550ee3a5e1998bd177e93a"
+ "7363c344fe6b199ee5d02e82d522c4fe"
+ "ba15452f80288a821a579116ec6dad2b"
+ "3b310da903401aa62100ab5d1a36553e"
+ "06203b33890cc9b832f79ef80560ccb9"
+ "a39ce767967ed628c6ad573cb116dbef"
+ "efd75499da96bd68a8a97b928a8bbc10"
+ "3b6621fcde2beca1231d206be6cd9ec7"
+ "aff6f6c94fcd7204ed3455c68c83f4a4"
+ "1da4af2b74ef5c53f1d8ac70bdcb7ed1"
+ "85ce81bd84359d44254d95629e9855a9"
+ "4a7c1958d1f8ada5d0532ed8a5aa3fb2"
+ "d17ba70eb6248e594e1a2297acbbb39d"
+ "502f1a8c6eb6f1ce22b3de1a1f40cc24"
+ "554119a831a9aad6079cad88425de6bd"
+ "e1a9187ebb6092cf67bf2b13fd65f270"
+ "88d78b7e883c8759d2c4f5c65adb7553"
+ "878ad575f9fad878e80a0c9ba63bcbcc"
+ "2732e69485bbc9c90bfbd62481d9089b"
+ "eccf80cfe2df16a2cf65bd92dd597b07"
+ "07e0917af48bbb75fed413d238f5555a"
+ "7a569d80c3414a8d0859dc65a46128ba"
+ "b27af87a71314f318c782b23ebfe808b"
+ "82b0ce26401d2e22f04d83d1255dc51a"
+ "ddd3b75a2b1ae0784504df543af8969b"
+ "e3ea7082ff7fc9888c144da2af58429e"
+ "c96031dbcad3dad9af0dcbaaaf268cb8"
+ "fcffead94f3c7ca495e056a9b47acdb7"
+ "51fb73e666c6c655ade8297297d07ad1"
+ "ba5e43f1bca32301651339e22904cc8c"
+ "42f58c30c04aafdb038dda0847dd988d"
+ "cda6f3bfd15c4b4c4525004aa06eeff8"
+ "ca61783aacec57fb3d1f92b0fe2fd1a8"
+ "5f6724517b65e614ad6808d6f6ee34df"
+ "f7310fdc82aebfd904b01e1dc54b2927"
+ "094b2db68d6f903b68401adebf5a7e08"
+ "d78ff4ef5d63653a65040cf9bfd4aca7"
+ "984a74d37145986780fc0b16ac451649"
+ "de6188a7dbdf191f64b5fc5e2ab47b57"
+ "f7f7276cd419c17a3ca8e1b939ae49e4"
+ "88acba6b965610b5480109c8b17b80e1"
+ "b7b750dfc7598d5d5011fd2dcc5600a3"
+ "2ef5b52a1ecc820e308aa342721aac09"
+ "43bf6686b64b2579376504ccc493d97e"
+ "6aed3fb0f9cd71a43dd497f01f17c0e2"
+ "cb3797aa2a2f256656168e6c496afc5f"
+ "b93246f6b1116398a346f1a641f3b041"
+ "e989f7914f90cc2c7fff357876e506b5"
+ "0d334ba77c225bc307ba537152f3f161"
+ "0e4eafe595f6d9d90d11faa933a15ef1"
+ "369546868a7f3a45a96768d40fd9d034"
+ "12c091c6315cf4fde7cb68606937380d"
+ "b2eaaa707b4c4185c32eddcdd306705e"
+ "4dc1ffc872eeee475a64dfac86aba41c"
+ "0618983f8741c5ef68d3a101e8a3b8ca"
+ "c60c905c15fc910840b94c00a0b9d0"),
+ hexstr2bin("0aab4c900501b3e24d7cdf4663326a3a"
+ "87df5e4843b2cbdb67cbf6e460fec350"
+ "aa5371b1508f9f4528ecea23c436d94b"
+ "5e8fcd4f681e30a6ac00a9704a188a03")},
+ %% TEST SHA(abc)
+ {ed25519, undefined,
+ hexstr2bin("833fe62409237b9d62ec77587520911e"
+ "9a759cec1d19755b7da901b96dca3d42"),
+ hexstr2bin("ec172b93ad5e563bf4932c70e1245034"
+ "c35467ef2efd4d64ebf819683467e2bf"),
+ hexstr2bin("ddaf35a193617abacc417349ae204131"
+ "12e6fa4e89a97ea20a9eeee64b55d39a"
+ "2192992a274fc1a836ba3c23a3feebbd"
+ "454d4423643ce80e2a9ac94fa54ca49f"),
+ hexstr2bin("dc2a4459e7369633a52b1bf277839a00"
+ "201009a3efbf3ecb69bea2186c26b589"
+ "09351fc9ac90b3ecfdfbc7c66431e030"
+ "3dca179c138ac17ad9bef1177331a704")}
+ ];
+
+eddsa(ed448) ->
+ %% https://tools.ietf.org/html/rfc8032#section-7.4
+ [{ed448, undefined,
+ hexstr2bin("6c82a562cb808d10d632be89c8513ebf"
+ "6c929f34ddfa8c9f63c9960ef6e348a3"
+ "528c8a3fcc2f044e39a3fc5b94492f8f"
+ "032e7549a20098f95b"),
+ hexstr2bin("5fd7449b59b461fd2ce787ec616ad46a"
+ "1da1342485a70e1f8a0ea75d80e96778"
+ "edf124769b46c7061bd6783df1e50f6c"
+ "d1fa1abeafe8256180"),
+ hexstr2bin(""),
+ hexstr2bin("533a37f6bbe457251f023c0d88f976ae"
+ "2dfb504a843e34d2074fd823d41a591f"
+ "2b233f034f628281f2fd7a22ddd47d78"
+ "28c59bd0a21bfd3980ff0d2028d4b18a"
+ "9df63e006c5d1c2d345b925d8dc00b41"
+ "04852db99ac5c7cdda8530a113a0f4db"
+ "b61149f05a7363268c71d95808ff2e65"
+ "2600")},
+ %% 1 octet
+ {ed448, undefined,
+ hexstr2bin("c4eab05d357007c632f3dbb48489924d"
+ "552b08fe0c353a0d4a1f00acda2c463a"
+ "fbea67c5e8d2877c5e3bc397a659949e"
+ "f8021e954e0a12274e"),
+ hexstr2bin("43ba28f430cdff456ae531545f7ecd0a"
+ "c834a55d9358c0372bfa0c6c6798c086"
+ "6aea01eb00742802b8438ea4cb82169c"
+ "235160627b4c3a9480"),
+ hexstr2bin("03"),
+ hexstr2bin("26b8f91727bd62897af15e41eb43c377"
+ "efb9c610d48f2335cb0bd0087810f435"
+ "2541b143c4b981b7e18f62de8ccdf633"
+ "fc1bf037ab7cd779805e0dbcc0aae1cb"
+ "cee1afb2e027df36bc04dcecbf154336"
+ "c19f0af7e0a6472905e799f1953d2a0f"
+ "f3348ab21aa4adafd1d234441cf807c0"
+ "3a00")},
+
+ %% %% 1 octet (with context)
+ %% {ed448, undefined,
+ %% hexstr2bin("c4eab05d357007c632f3dbb48489924d"
+ %% "552b08fe0c353a0d4a1f00acda2c463a"
+ %% "fbea67c5e8d2877c5e3bc397a659949e"
+ %% "f8021e954e0a12274e"),
+ %% hexstr2bin("43ba28f430cdff456ae531545f7ecd0a"
+ %% "c834a55d9358c0372bfa0c6c6798c086"
+ %% "6aea01eb00742802b8438ea4cb82169c"
+ %% "235160627b4c3a9480"),
+ %% hexstr2bin("03"),
+ %% hexstr2bin("666f6f"), % Context
+ %% hexstr2bin("d4f8f6131770dd46f40867d6fd5d5055"
+ %% "de43541f8c5e35abbcd001b32a89f7d2"
+ %% "151f7647f11d8ca2ae279fb842d60721"
+ %% "7fce6e042f6815ea000c85741de5c8da"
+ %% "1144a6a1aba7f96de42505d7a7298524"
+ %% "fda538fccbbb754f578c1cad10d54d0d"
+ %% "5428407e85dcbc98a49155c13764e66c"
+ %% "3c00")},
+
+ %% 11 octets
+ {ed448, undefined,
+ hexstr2bin("cd23d24f714274e744343237b93290f5"
+ "11f6425f98e64459ff203e8985083ffd"
+ "f60500553abc0e05cd02184bdb89c4cc"
+ "d67e187951267eb328"),
+ hexstr2bin("dcea9e78f35a1bf3499a831b10b86c90"
+ "aac01cd84b67a0109b55a36e9328b1e3"
+ "65fce161d71ce7131a543ea4cb5f7e9f"
+ "1d8b00696447001400"),
+ hexstr2bin("0c3e544074ec63b0265e0c"),
+ hexstr2bin("1f0a8888ce25e8d458a21130879b840a"
+ "9089d999aaba039eaf3e3afa090a09d3"
+ "89dba82c4ff2ae8ac5cdfb7c55e94d5d"
+ "961a29fe0109941e00b8dbdeea6d3b05"
+ "1068df7254c0cdc129cbe62db2dc957d"
+ "bb47b51fd3f213fb8698f064774250a5"
+ "028961c9bf8ffd973fe5d5c206492b14"
+ "0e00")},
+ %% 12 octets
+ {ed448, undefined,
+ hexstr2bin("258cdd4ada32ed9c9ff54e63756ae582"
+ "fb8fab2ac721f2c8e676a72768513d93"
+ "9f63dddb55609133f29adf86ec9929dc"
+ "cb52c1c5fd2ff7e21b"),
+ hexstr2bin("3ba16da0c6f2cc1f30187740756f5e79"
+ "8d6bc5fc015d7c63cc9510ee3fd44adc"
+ "24d8e968b6e46e6f94d19b945361726b"
+ "d75e149ef09817f580"),
+ hexstr2bin("64a65f3cdedcdd66811e2915"),
+ hexstr2bin("7eeeab7c4e50fb799b418ee5e3197ff6"
+ "bf15d43a14c34389b59dd1a7b1b85b4a"
+ "e90438aca634bea45e3a2695f1270f07"
+ "fdcdf7c62b8efeaf00b45c2c96ba457e"
+ "b1a8bf075a3db28e5c24f6b923ed4ad7"
+ "47c3c9e03c7079efb87cb110d3a99861"
+ "e72003cbae6d6b8b827e4e6c143064ff"
+ "3c00")},
+ %% 13 octets
+ {ed448, undefined,
+ hexstr2bin("7ef4e84544236752fbb56b8f31a23a10"
+ "e42814f5f55ca037cdcc11c64c9a3b29"
+ "49c1bb60700314611732a6c2fea98eeb"
+ "c0266a11a93970100e"),
+ hexstr2bin("b3da079b0aa493a5772029f0467baebe"
+ "e5a8112d9d3a22532361da294f7bb381"
+ "5c5dc59e176b4d9f381ca0938e13c6c0"
+ "7b174be65dfa578e80"),
+ hexstr2bin("64a65f3cdedcdd66811e2915e7"),
+ hexstr2bin("6a12066f55331b6c22acd5d5bfc5d712"
+ "28fbda80ae8dec26bdd306743c5027cb"
+ "4890810c162c027468675ecf645a8317"
+ "6c0d7323a2ccde2d80efe5a1268e8aca"
+ "1d6fbc194d3f77c44986eb4ab4177919"
+ "ad8bec33eb47bbb5fc6e28196fd1caf5"
+ "6b4e7e0ba5519234d047155ac727a105"
+ "3100")},
+ %% 64 octets
+ {ed448, undefined,
+ hexstr2bin("d65df341ad13e008567688baedda8e9d"
+ "cdc17dc024974ea5b4227b6530e339bf"
+ "f21f99e68ca6968f3cca6dfe0fb9f4fa"
+ "b4fa135d5542ea3f01"),
+ hexstr2bin("df9705f58edbab802c7f8363cfe5560a"
+ "b1c6132c20a9f1dd163483a26f8ac53a"
+ "39d6808bf4a1dfbd261b099bb03b3fb5"
+ "0906cb28bd8a081f00"),
+ hexstr2bin("bd0f6a3747cd561bdddf4640a332461a"
+ "4a30a12a434cd0bf40d766d9c6d458e5"
+ "512204a30c17d1f50b5079631f64eb31"
+ "12182da3005835461113718d1a5ef944"),
+ hexstr2bin("554bc2480860b49eab8532d2a533b7d5"
+ "78ef473eeb58c98bb2d0e1ce488a98b1"
+ "8dfde9b9b90775e67f47d4a1c3482058"
+ "efc9f40d2ca033a0801b63d45b3b722e"
+ "f552bad3b4ccb667da350192b61c508c"
+ "f7b6b5adadc2c8d9a446ef003fb05cba"
+ "5f30e88e36ec2703b349ca229c267083"
+ "3900")},
+ %% 256 octets
+ {ed448, undefined,
+ hexstr2bin("2ec5fe3c17045abdb136a5e6a913e32a"
+ "b75ae68b53d2fc149b77e504132d3756"
+ "9b7e766ba74a19bd6162343a21c8590a"
+ "a9cebca9014c636df5"),
+ hexstr2bin("79756f014dcfe2079f5dd9e718be4171"
+ "e2ef2486a08f25186f6bff43a9936b9b"
+ "fe12402b08ae65798a3d81e22e9ec80e"
+ "7690862ef3d4ed3a00"),
+ hexstr2bin("15777532b0bdd0d1389f636c5f6b9ba7"
+ "34c90af572877e2d272dd078aa1e567c"
+ "fa80e12928bb542330e8409f31745041"
+ "07ecd5efac61ae7504dabe2a602ede89"
+ "e5cca6257a7c77e27a702b3ae39fc769"
+ "fc54f2395ae6a1178cab4738e543072f"
+ "c1c177fe71e92e25bf03e4ecb72f47b6"
+ "4d0465aaea4c7fad372536c8ba516a60"
+ "39c3c2a39f0e4d832be432dfa9a706a6"
+ "e5c7e19f397964ca4258002f7c0541b5"
+ "90316dbc5622b6b2a6fe7a4abffd9610"
+ "5eca76ea7b98816af0748c10df048ce0"
+ "12d901015a51f189f3888145c03650aa"
+ "23ce894c3bd889e030d565071c59f409"
+ "a9981b51878fd6fc110624dcbcde0bf7"
+ "a69ccce38fabdf86f3bef6044819de11"),
+ hexstr2bin("c650ddbb0601c19ca11439e1640dd931"
+ "f43c518ea5bea70d3dcde5f4191fe53f"
+ "00cf966546b72bcc7d58be2b9badef28"
+ "743954e3a44a23f880e8d4f1cfce2d7a"
+ "61452d26da05896f0a50da66a239a8a1"
+ "88b6d825b3305ad77b73fbac0836ecc6"
+ "0987fd08527c1a8e80d5823e65cafe2a"
+ "3d00")},
+ %% 1023 octets
+ {ed448, undefined,
+ hexstr2bin("872d093780f5d3730df7c212664b37b8"
+ "a0f24f56810daa8382cd4fa3f77634ec"
+ "44dc54f1c2ed9bea86fafb7632d8be19"
+ "9ea165f5ad55dd9ce8"),
+ hexstr2bin("a81b2e8a70a5ac94ffdbcc9badfc3feb"
+ "0801f258578bb114ad44ece1ec0e799d"
+ "a08effb81c5d685c0c56f64eecaef8cd"
+ "f11cc38737838cf400"),
+ hexstr2bin("6ddf802e1aae4986935f7f981ba3f035"
+ "1d6273c0a0c22c9c0e8339168e675412"
+ "a3debfaf435ed651558007db4384b650"
+ "fcc07e3b586a27a4f7a00ac8a6fec2cd"
+ "86ae4bf1570c41e6a40c931db27b2faa"
+ "15a8cedd52cff7362c4e6e23daec0fbc"
+ "3a79b6806e316efcc7b68119bf46bc76"
+ "a26067a53f296dafdbdc11c77f7777e9"
+ "72660cf4b6a9b369a6665f02e0cc9b6e"
+ "dfad136b4fabe723d2813db3136cfde9"
+ "b6d044322fee2947952e031b73ab5c60"
+ "3349b307bdc27bc6cb8b8bbd7bd32321"
+ "9b8033a581b59eadebb09b3c4f3d2277"
+ "d4f0343624acc817804728b25ab79717"
+ "2b4c5c21a22f9c7839d64300232eb66e"
+ "53f31c723fa37fe387c7d3e50bdf9813"
+ "a30e5bb12cf4cd930c40cfb4e1fc6225"
+ "92a49588794494d56d24ea4b40c89fc0"
+ "596cc9ebb961c8cb10adde976a5d602b"
+ "1c3f85b9b9a001ed3c6a4d3b1437f520"
+ "96cd1956d042a597d561a596ecd3d173"
+ "5a8d570ea0ec27225a2c4aaff26306d1"
+ "526c1af3ca6d9cf5a2c98f47e1c46db9"
+ "a33234cfd4d81f2c98538a09ebe76998"
+ "d0d8fd25997c7d255c6d66ece6fa56f1"
+ "1144950f027795e653008f4bd7ca2dee"
+ "85d8e90f3dc315130ce2a00375a318c7"
+ "c3d97be2c8ce5b6db41a6254ff264fa6"
+ "155baee3b0773c0f497c573f19bb4f42"
+ "40281f0b1f4f7be857a4e59d416c06b4"
+ "c50fa09e1810ddc6b1467baeac5a3668"
+ "d11b6ecaa901440016f389f80acc4db9"
+ "77025e7f5924388c7e340a732e554440"
+ "e76570f8dd71b7d640b3450d1fd5f041"
+ "0a18f9a3494f707c717b79b4bf75c984"
+ "00b096b21653b5d217cf3565c9597456"
+ "f70703497a078763829bc01bb1cbc8fa"
+ "04eadc9a6e3f6699587a9e75c94e5bab"
+ "0036e0b2e711392cff0047d0d6b05bd2"
+ "a588bc109718954259f1d86678a579a3"
+ "120f19cfb2963f177aeb70f2d4844826"
+ "262e51b80271272068ef5b3856fa8535"
+ "aa2a88b2d41f2a0e2fda7624c2850272"
+ "ac4a2f561f8f2f7a318bfd5caf969614"
+ "9e4ac824ad3460538fdc25421beec2cc"
+ "6818162d06bbed0c40a387192349db67"
+ "a118bada6cd5ab0140ee273204f628aa"
+ "d1c135f770279a651e24d8c14d75a605"
+ "9d76b96a6fd857def5e0b354b27ab937"
+ "a5815d16b5fae407ff18222c6d1ed263"
+ "be68c95f32d908bd895cd76207ae7264"
+ "87567f9a67dad79abec316f683b17f2d"
+ "02bf07e0ac8b5bc6162cf94697b3c27c"
+ "d1fea49b27f23ba2901871962506520c"
+ "392da8b6ad0d99f7013fbc06c2c17a56"
+ "9500c8a7696481c1cd33e9b14e40b82e"
+ "79a5f5db82571ba97bae3ad3e0479515"
+ "bb0e2b0f3bfcd1fd33034efc6245eddd"
+ "7ee2086ddae2600d8ca73e214e8c2b0b"
+ "db2b047c6a464a562ed77b73d2d841c4"
+ "b34973551257713b753632efba348169"
+ "abc90a68f42611a40126d7cb21b58695"
+ "568186f7e569d2ff0f9e745d0487dd2e"
+ "b997cafc5abf9dd102e62ff66cba87"),
+ hexstr2bin("e301345a41a39a4d72fff8df69c98075"
+ "a0cc082b802fc9b2b6bc503f926b65bd"
+ "df7f4c8f1cb49f6396afc8a70abe6d8a"
+ "ef0db478d4c6b2970076c6a0484fe76d"
+ "76b3a97625d79f1ce240e7c576750d29"
+ "5528286f719b413de9ada3e8eb78ed57"
+ "3603ce30d8bb761785dc30dbc320869e"
+ "1a00")}
+ ].
+
ecdh() ->
%% http://csrc.nist.gov/groups/STM/cavp/
Curves = crypto:ec_curves() ++
diff --git a/lib/crypto/test/engine_SUITE.erl b/lib/crypto/test/engine_SUITE.erl
index 0427923941..8a45fc9076 100644
--- a/lib/crypto/test/engine_SUITE.erl
+++ b/lib/crypto/test/engine_SUITE.erl
@@ -51,12 +51,14 @@ all() ->
ctrl_cmd_string,
ctrl_cmd_string_optional,
ensure_load,
- {group, engine_stored_key}
+ {group, engine_stored_key},
+ {group, engine_fakes_rsa}
].
groups() ->
[{engine_stored_key, [],
- [sign_verify_rsa,
+ [
+ sign_verify_rsa,
sign_verify_dsa,
sign_verify_ecdsa,
sign_verify_rsa_pwd,
@@ -71,7 +73,10 @@ groups() ->
get_pub_from_priv_key_rsa_pwd_bad_pwd,
get_pub_from_priv_key_dsa,
get_pub_from_priv_key_ecdsa
- ]}].
+ ]},
+ {engine_fakes_rsa, [], [sign_verify_rsa_fake
+ ]}
+ ].
init_per_suite(Config) ->
@@ -102,7 +107,20 @@ end_per_suite(_Config) ->
%%--------------------------------------------------------------------
init_per_group(engine_stored_key, Config) ->
- case load_storage_engine(Config) of
+ group_load_engine(Config, [engine_method_rsa]);
+init_per_group(engine_fakes_rsa, Config) ->
+ case crypto:info_lib() of
+ [{<<"OpenSSL">>,LibVer,_}] when is_integer(LibVer), LibVer >= 16#10100000 ->
+ group_load_engine(Config, []);
+ _ ->
+ {skip, "Too low OpenSSL cryptolib version"}
+ end;
+init_per_group(_Group, Config0) ->
+ Config0.
+
+
+group_load_engine(Config, ExcludeMthds) ->
+ case load_storage_engine(Config, ExcludeMthds) of
{ok, E} ->
KeyDir = key_dir(Config),
[{storage_engine,E}, {storage_dir,KeyDir} | Config];
@@ -115,19 +133,19 @@ init_per_group(engine_stored_key, Config) ->
Other ->
ct:log("Engine load failed: ~p",[Other]),
{fail, "Engine load failed"}
- end;
-init_per_group(_Group, Config0) ->
- Config0.
+ end.
+
+
-end_per_group(engine_stored_key, Config) ->
+
+
+end_per_group(_, Config) ->
case proplists:get_value(storage_engine, Config) of
undefined ->
ok;
E ->
ok = crypto:engine_unload(E)
- end;
-end_per_group(_, _) ->
- ok.
+ end.
%%--------------------------------------------------------------------
init_per_testcase(_Case, Config) ->
@@ -421,6 +439,9 @@ bad_arguments(Config) when is_list(Config) ->
try
try
crypto:engine_load(fail_engine, [], [])
+ of
+ X1 ->
+ ct:fail("1 Got ~p",[X1])
catch
error:badarg ->
ok
@@ -432,6 +453,11 @@ bad_arguments(Config) when is_list(Config) ->
{<<"ID">>, <<"MD5">>},
<<"LOAD">>],
[])
+ of
+ {error,bad_engine_id} ->
+ throw(dynamic_engine_unsupported);
+ X2 ->
+ ct:fail("2 Got ~p",[X2])
catch
error:badarg ->
ok
@@ -442,13 +468,20 @@ bad_arguments(Config) when is_list(Config) ->
{'ID', <<"MD5">>},
<<"LOAD">>],
[])
+ of
+ {error,bad_engine_id} -> % should have happend in the previous try...catch end!
+ throw(dynamic_engine_unsupported);
+ X3 ->
+ ct:fail("3 Got ~p",[X3])
catch
error:badarg ->
ok
end
catch
error:notsup ->
- {skip, "Engine not supported on this SSL version"}
+ {skip, "Engine not supported on this SSL version"};
+ throw:dynamic_engine_unsupported ->
+ {skip, "Dynamic Engine not supported"}
end
end.
@@ -554,11 +587,11 @@ ctrl_cmd_string(Config) when is_list(Config) ->
{ok, E} ->
case crypto:engine_ctrl_cmd_string(E, <<"TEST">>, <<"17">>) of
ok ->
+ ok = crypto:engine_unload(E),
ct:fail(fail_ctrl_cmd_should_fail);
{error,ctrl_cmd_failed} ->
- ok
- end,
- ok = crypto:engine_unload(E);
+ ok = crypto:engine_unload(E)
+ end;
{error, bad_engine_id} ->
{skip, "Dynamic Engine not supported"}
end
@@ -584,11 +617,12 @@ ctrl_cmd_string_optional(Config) when is_list(Config) ->
{ok, E} ->
case crypto:engine_ctrl_cmd_string(E, <<"TEST">>, <<"17">>, true) of
ok ->
- ok;
- _ ->
+ ok = crypto:engine_unload(E);
+ Err ->
+ ct:log("Error: ~p",[Err]),
+ ok = crypto:engine_unload(E),
ct:fail(fail_ctrl_cmd_string)
- end,
- ok = crypto:engine_unload(E);
+ end;
{error, bad_engine_id} ->
{skip, "Dynamic Engine not supported"}
end
@@ -650,6 +684,14 @@ sign_verify_rsa(Config) ->
key_id => key_id(Config, "rsa_public_key.pem")},
sign_verify(rsa, sha, Priv, Pub).
+sign_verify_rsa_fake(Config) ->
+ %% Use fake engine rsa implementation
+ Priv = #{engine => engine_ref(Config),
+ key_id => key_id(Config, "rsa_private_key.pem")},
+ Pub = #{engine => engine_ref(Config),
+ key_id => key_id(Config, "rsa_public_key.pem")},
+ sign_verify_fake(rsa, sha256, Priv, Pub).
+
sign_verify_dsa(Config) ->
Priv = #{engine => engine_ref(Config),
key_id => key_id(Config, "dsa_private_key.pem")},
@@ -809,13 +851,18 @@ get_pub_from_priv_key_ecdsa(Config) ->
%%%================================================================
%%% Help for engine_stored_pub_priv_keys* test cases
%%%
-load_storage_engine(_Config) ->
+load_storage_engine(Config) ->
+ load_storage_engine(Config, []).
+
+load_storage_engine(_Config, ExcludeMthds) ->
case crypto:get_test_engine() of
{ok, Engine} ->
try crypto:engine_load(<<"dynamic">>,
[{<<"SO_PATH">>, Engine},
<<"LOAD">>],
- [])
+ [],
+ crypto:engine_get_all_methods() -- ExcludeMthds
+ )
catch
error:notsup ->
{error, notsup}
@@ -873,10 +920,47 @@ sign_verify(Alg, Sha, KeySign, KeyVerify) ->
true ->
PlainText = <<"Hej på dig">>,
Signature = crypto:sign(Alg, Sha, PlainText, KeySign),
- case crypto:verify(Alg, Sha, PlainText, Signature, KeyVerify) of
- true -> ok;
- _ -> {fail, "Sign-verify error"}
+ case is_fake(Signature) of
+ true ->
+ ct:pal("SIG ~p ~p size ~p~n~p",[Alg,Sha,size(Signature),Signature]),
+ {fail, "Faked RSA impl used!!"};
+ false ->
+ case crypto:verify(Alg, Sha, PlainText, Signature, KeyVerify) of
+ true -> ok;
+ _ -> {fail, "Sign-verify error"}
+ end
+ end;
+ false ->
+ {skip, lists:concat([Alg," is not supported by cryptolib"])}
+ end.
+
+
+%%% Use fake engine rsa implementation
+sign_verify_fake(Alg, Sha, KeySign, KeyVerify) ->
+ case pubkey_alg_supported(Alg) of
+ true ->
+ PlainText = <<"Fake me!">>,
+ Signature = crypto:sign(Alg, Sha, PlainText, KeySign),
+ case is_fake(Signature) of
+ true ->
+ case crypto:verify(Alg, Sha, PlainText, Signature, KeyVerify) of
+ true -> ok;
+ _ -> {fail, "Sign-verify error"}
+ end;
+ false ->
+ ct:pal("SIG ~p ~p size ~p~n~p",[Alg,Sha,size(Signature),Signature]),
+ {fail, "Faked impl not used"}
end;
false ->
{skip, lists:concat([Alg," is not supported by cryptolib"])}
end.
+
+
+is_fake(Sig) -> is_fake(Sig, 0).
+
+is_fake(<<>>, _) -> true;
+is_fake(<<B,Rest/binary>>, B) -> is_fake(Rest, B+1);
+is_fake(_, _) -> false.
+
+
+
diff --git a/lib/kernel/doc/src/logger.xml b/lib/kernel/doc/src/logger.xml
index 2bcf137299..556f25f96a 100644
--- a/lib/kernel/doc/src/logger.xml
+++ b/lib/kernel/doc/src/logger.xml
@@ -290,7 +290,10 @@ logger:error("error happened because: ~p", [Reason]). % Without macro
<section>
<title>Macros</title>
- <p>The following macros are defined:</p>
+ <p>The following macros are defined in <c>logger.hrl</c>, which
+ is included in a module with the directive</p>
+ <code>
+ -include_lib("kernel/include/logger.hrl").</code>
<list>
<item><c>?LOG_EMERGENCY(StringOrReport[,Metadata])</c></item>
@@ -1214,7 +1217,7 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(), Meta)).
<item><seealso marker="logger#set_handler_config-2">
<c>logger:set_handler_config/2,3</c></seealso></item>
<item><seealso marker="logger#update_handler_config-2">
- <c>logger:updata_handler_config/2,3</c></seealso></item>
+ <c>logger:update_handler_config/2,3</c></seealso></item>
<item><seealso marker="logger#update_formatter_config-2">
<c>logger:update_formatter_config/2</c></seealso></item>
</list>
diff --git a/lib/kernel/doc/src/logger_chapter.xml b/lib/kernel/doc/src/logger_chapter.xml
index 1870d2ab79..458e61cef5 100644
--- a/lib/kernel/doc/src/logger_chapter.xml
+++ b/lib/kernel/doc/src/logger_chapter.xml
@@ -113,7 +113,10 @@
of functions on the form <c>logger:Level/1,2,3</c>, which are
all shortcuts
for <seealso marker="logger#log-2">
- <c>logger:log(Level,Arg1[,Arg2[,Arg3]])</c></seealso>.</p>
+ <c>logger:log(Level,Arg1[,Arg2[,Arg3]])</c></seealso>.</p>
+ <p>The macros are defined in <c>logger.hrl</c>, which is included
+ in a module with the directive</p>
+ <code>-include_lib("kernel/include/logger.hrl").</code>
<p>The difference between using the macros and the exported
functions is that macros add location (originator) information
to the metadata, and performs lazy evaluation by wrapping the
diff --git a/lib/kernel/doc/src/logger_formatter.xml b/lib/kernel/doc/src/logger_formatter.xml
index 24772fd6c4..d066e263df 100644
--- a/lib/kernel/doc/src/logger_formatter.xml
+++ b/lib/kernel/doc/src/logger_formatter.xml
@@ -303,7 +303,7 @@ exit_reason: "It crashed"</code>
<item><seealso marker="logger#set_handler_config-2">
<c>logger:set_handler_config/2,3</c></seealso></item>
<item><seealso marker="logger#update_handler_config-2">
- <c>logger:updata_handler_config/2</c></seealso></item>
+ <c>logger:update_handler_config/2</c></seealso></item>
<item><seealso marker="logger#update_formatter_config-2">
<c>logger:update_formatter_config/2</c></seealso></item>
</list>
diff --git a/lib/kernel/doc/src/notes.xml b/lib/kernel/doc/src/notes.xml
index 8188ede6a2..bfbaf6ef3e 100644
--- a/lib/kernel/doc/src/notes.xml
+++ b/lib/kernel/doc/src/notes.xml
@@ -31,6 +31,22 @@
</header>
<p>This document describes the changes made to the Kernel application.</p>
+<section><title>Kernel 6.1.1</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Fix bug causing net_kernel process crash on connection
+ attempt from node with name identical to local node.</p>
+ <p>
+ Own Id: OTP-15438 Aux Id: ERL-781 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Kernel 6.1</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/lib/kernel/src/kernel.appup.src b/lib/kernel/src/kernel.appup.src
index 0c0435e051..e73cea8351 100644
--- a/lib/kernel/src/kernel.appup.src
+++ b/lib/kernel/src/kernel.appup.src
@@ -16,15 +16,40 @@
%% limitations under the License.
%%
%% %CopyrightEnd%
+%%
+%% We allow upgrade from, and downgrade to all previous
+%% versions from the following OTP releases:
+%% - OTP 20
+%% - OTP 21
+%%
+%% We also allow upgrade from, and downgrade to all
+%% versions that have branched off from the above
+%% stated previous versions.
+%%
{"%VSN%",
- %% Up from - max one major revision back
- [{<<"5\\.3(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.0
- {<<"5\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.1+
- {<<"6\\.0(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-21.0
- {<<"6\\.1(\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-21.1
- %% Down to - max one major revision back
- [{<<"5\\.3(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.0
- {<<"5\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.1+
- {<<"6\\.0(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-21.0
- {<<"6\\.1(\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-21.1
-}.
+ [{<<"^5\\.3$">>,[restart_new_emulator]},
+ {<<"^5\\.3\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^5\\.3\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^5\\.4$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.2(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.3(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^6\\.0$">>,[restart_new_emulator]},
+ {<<"^6\\.0\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^6\\.0\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^6\\.1$">>,[restart_new_emulator]},
+ {<<"^6\\.1\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]}],
+ [{<<"^5\\.3$">>,[restart_new_emulator]},
+ {<<"^5\\.3\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^5\\.3\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^5\\.4$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.2(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^5\\.4\\.3(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^6\\.0$">>,[restart_new_emulator]},
+ {<<"^6\\.0\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]},
+ {<<"^6\\.0\\.1(?:\\.[0-9]+)*$">>,[restart_new_emulator]},
+ {<<"^6\\.1$">>,[restart_new_emulator]},
+ {<<"^6\\.1\\.0(?:\\.[0-9]+)+$">>,[restart_new_emulator]}]}.
diff --git a/lib/kernel/src/logger_config.erl b/lib/kernel/src/logger_config.erl
index 6bfe658552..5e9faf332c 100644
--- a/lib/kernel/src/logger_config.erl
+++ b/lib/kernel/src/logger_config.erl
@@ -31,7 +31,9 @@
-include("logger_internal.hrl").
new(Name) ->
- _ = ets:new(Name,[set,protected,named_table,{write_concurrency,true}]),
+ _ = ets:new(Name,[set,protected,named_table,
+ {read_concurrency,true},
+ {write_concurrency,true}]),
ets:whereis(Name).
delete(Tid,Id) ->
diff --git a/lib/kernel/src/logger_disk_log_h.erl b/lib/kernel/src/logger_disk_log_h.erl
index 2a81458ec8..41e0d51a9d 100644
--- a/lib/kernel/src/logger_disk_log_h.erl
+++ b/lib/kernel/src/logger_disk_log_h.erl
@@ -19,59 +19,33 @@
%%
-module(logger_disk_log_h).
--behaviour(gen_server).
-
-include("logger.hrl").
-include("logger_internal.hrl").
-include("logger_h_common.hrl").
%%% API
--export([start_link/3, info/1, filesync/1, reset/1]).
+-export([info/1, filesync/1, reset/1]).
-%% gen_server callbacks
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
+%% logger_h_common callbacks
+-export([init/2, check_config/4, reset_state/2,
+ filesync/3, write/4, handle_info/3, terminate/3]).
%% logger callbacks
-export([log/2, adding_handler/1, removing_handler/1, changing_config/3,
filter_config/1]).
-%% handler internal
--export([log_handler_info/4]).
-
%%%===================================================================
%%% API
%%%===================================================================
%%%-----------------------------------------------------------------
-%%% Start a disk_log handler process and link to caller.
-%%% This function is called by the kernel supervisor when this
-%%% handler process gets added (as a result of calling add/3).
--spec start_link(Name, Config, HandlerState) -> {ok,Pid} | {error,Reason} when
- Name :: atom(),
- Config :: logger:handler_config(),
- HandlerState :: map(),
- Pid :: pid(),
- Reason :: term().
-
-start_link(Name, Config, HandlerState) ->
- proc_lib:start_link(?MODULE,init,[[Name,Config,HandlerState]]).
-
-%%%-----------------------------------------------------------------
%%%
-spec filesync(Name) -> ok | {error,Reason} when
Name :: atom(),
Reason :: handler_busy | {badarg,term()}.
-filesync(Name) when is_atom(Name) ->
- try
- gen_server:call(?name_to_reg_name(?MODULE,Name),
- disk_log_sync, ?DEFAULT_CALL_TIMEOUT)
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
filesync(Name) ->
- {error,{badarg,{filesync,[Name]}}}.
+ logger_h_common:filesync(?MODULE,Name).
%%%-----------------------------------------------------------------
%%%
@@ -80,15 +54,8 @@ filesync(Name) ->
Info :: term(),
Reason :: handler_busy | {badarg,term()}.
-info(Name) when is_atom(Name) ->
- try
- gen_server:call(?name_to_reg_name(?MODULE,Name),
- info, ?DEFAULT_CALL_TIMEOUT)
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
info(Name) ->
- {error,{badarg,{info,[Name]}}}.
+ logger_h_common:info(?MODULE,Name).
%%%-----------------------------------------------------------------
%%%
@@ -96,16 +63,8 @@ info(Name) ->
Name :: atom(),
Reason :: handler_busy | {badarg,term()}.
-reset(Name) when is_atom(Name) ->
- try
- gen_server:call(?name_to_reg_name(?MODULE,Name),
- reset, ?DEFAULT_CALL_TIMEOUT)
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
reset(Name) ->
- {error,{badarg,{reset,[Name]}}}.
-
+ logger_h_common:reset(?MODULE,Name).
%%%===================================================================
%%% logger callbacks
@@ -113,30 +72,55 @@ reset(Name) ->
%%%-----------------------------------------------------------------
%%% Handler being added
-adding_handler(#{id:=Name}=Config) ->
- case check_config(adding, Config) of
- {ok, #{config:=HConfig}=Config1} ->
- %% create initial handler state by merging defaults with config
- HState = maps:merge(get_init_state(), HConfig),
- case logger_h_common:overload_levels_ok(HState) of
- true ->
- start(Name, Config1, HState);
- false ->
- #{sync_mode_qlen := SMQL,
- drop_mode_qlen := DMQL,
- flush_qlen := FQL} = HState,
- {error,{invalid_levels,{SMQL,DMQL,FQL}}}
- end;
+adding_handler(Config) ->
+ logger_h_common:adding_handler(Config).
+
+%%%-----------------------------------------------------------------
+%%% Updating handler config
+changing_config(SetOrUpdate, OldConfig, NewConfig) ->
+ logger_h_common:changing_config(SetOrUpdate, OldConfig, NewConfig).
+
+%%%-----------------------------------------------------------------
+%%% Handler being removed
+removing_handler(Config) ->
+ logger_h_common:removing_handler(Config).
+
+%%%-----------------------------------------------------------------
+%%% Log a string or report
+-spec log(LogEvent, Config) -> ok when
+ LogEvent :: logger:log_event(),
+ Config :: logger:handler_config().
+
+log(LogEvent, Config) ->
+ logger_h_common:log(LogEvent, Config).
+
+%%%-----------------------------------------------------------------
+%%% Remove internal fields from configuration
+filter_config(Config) ->
+ logger_h_common:filter_config(Config).
+
+%%%===================================================================
+%%% logger_h_common callbacks
+%%%===================================================================
+init(Name, #{file:=File,type:=Type,max_no_bytes:=MNB,max_no_files:=MNF}) ->
+ case open_disk_log(Name, File, Type, MNB, MNF) of
+ ok ->
+ {ok,#{log_opts => #{file => File,
+ type => Type,
+ max_no_bytes => MNB,
+ max_no_files => MNF},
+ prev_log_result => ok,
+ prev_sync_result => ok,
+ prev_disk_log_info => undefined}};
Error ->
Error
end.
-%%%-----------------------------------------------------------------
-%%% Updating handler config
-changing_config(SetOrUpdate,OldConfig=#{config:=OldHConfig},NewConfig) ->
+check_config(Name,set,undefined,HConfig0) ->
+ HConfig=merge_default_logopts(Name,maps:merge(get_default_config(),HConfig0)),
+ check_config(HConfig);
+check_config(_Name,SetOrUpdate,OldHConfig,NewHConfig0) ->
WriteOnce = maps:with([type,file,max_no_files,max_no_bytes],OldHConfig),
- ReadOnly = maps:with([handler_pid,mode_tab],OldHConfig),
- NewHConfig0 = maps:get(config, NewConfig, #{}),
Default =
case SetOrUpdate of
set ->
@@ -146,66 +130,25 @@ changing_config(SetOrUpdate,OldConfig=#{config:=OldHConfig},NewConfig) ->
OldHConfig
end,
- %% Allow (accidentially) included read-only fields - just overwrite them
- NewHConfig = maps:merge(maps:merge(Default,NewHConfig0),ReadOnly),
+ NewHConfig = maps:merge(Default,NewHConfig0),
- %% But fail if write-once fields are changed
+ %% Fail if write-once fields are changed
case maps:with([type,file,max_no_files,max_no_bytes],NewHConfig) of
WriteOnce ->
- changing_config1(maps:get(handler_pid,OldHConfig),
- OldConfig,
- NewConfig#{config=>NewHConfig});
+ check_config(NewHConfig);
Other ->
{Old,New} = logger_server:diff_maps(WriteOnce,Other),
- {error,{illegal_config_change,#{config=>Old},#{config=>New}}}
+ {error,{illegal_config_change,?MODULE,Old,New}}
end.
-changing_config1(HPid, OldConfig, NewConfig) ->
- case check_config(changing, NewConfig) of
- Result = {ok,NewConfig1} ->
- try gen_server:call(HPid, {change_config,OldConfig,NewConfig1},
- ?DEFAULT_CALL_TIMEOUT) of
- ok -> Result;
- Error -> Error
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
- Error ->
- Error
- end.
-
-check_config(adding, #{id:=Name}=Config) ->
- %% merge handler specific config data
- HConfig1 = maps:get(config, Config, #{}),
- HConfig2 = maps:merge(get_default_config(), HConfig1),
- HConfig3 = merge_default_logopts(Name, HConfig2),
- case check_h_config(maps:to_list(HConfig3)) of
- ok ->
- {ok,Config#{config=>HConfig3}};
- Error ->
- Error
- end;
-check_config(changing, Config) ->
- HConfig = maps:get(config, Config, #{}),
+check_config(HConfig) ->
case check_h_config(maps:to_list(HConfig)) of
- ok -> {ok,Config};
- Error -> Error
+ ok ->
+ {ok,HConfig};
+ {error,{Key,Value}} ->
+ {error,{invalid_config,?MODULE,#{Key=>Value}}}
end.
-merge_default_logopts(Name, HConfig) ->
- Type = maps:get(type, HConfig, wrap),
- {DefaultNoFiles,DefaultNoBytes} =
- case Type of
- halt -> {undefined,infinity};
- _wrap -> {10,1048576}
- end,
- {ok,Dir} = file:get_cwd(),
- Defaults = #{file => filename:join(Dir,Name),
- max_no_files => DefaultNoFiles,
- max_no_bytes => DefaultNoBytes,
- type => Type},
- maps:merge(Defaults, HConfig).
-
check_h_config([{file,File}|Config]) when is_list(File) ->
check_h_config(Config);
check_h_config([{max_no_files,undefined}|Config]) ->
@@ -218,454 +161,59 @@ check_h_config([{max_no_bytes,N}|Config]) when is_integer(N), N>0 ->
check_h_config(Config);
check_h_config([{type,Type}|Config]) when Type==wrap; Type==halt ->
check_h_config(Config);
-check_h_config([Other | Config]) ->
- case logger_h_common:check_common_config(Other) of
- valid ->
- check_h_config(Config);
- invalid ->
- {error,{invalid_config,?MODULE,Other}}
- end;
+check_h_config([Other | _]) ->
+ {error,Other};
check_h_config([]) ->
ok.
-%%%-----------------------------------------------------------------
-%%% Handler being removed
-removing_handler(#{id:=Name}) ->
- stop(Name).
-
-%%%-----------------------------------------------------------------
-%%% Log a string or report
--spec log(LogEvent, Config) -> ok when
- LogEvent :: logger:log_event(),
- Config :: logger:handler_config().
-
-log(LogEvent, Config = #{id := Name,
- config := #{handler_pid := HPid,
- mode_tab := ModeTab}}) ->
- %% if the handler has crashed, we must drop this event
- %% and hope the handler restarts so we can try again
- true = is_process_alive(HPid),
- Bin = logger_h_common:log_to_binary(LogEvent, Config),
- logger_h_common:call_cast_or_drop(Name, HPid, ModeTab, Bin).
+get_default_config() ->
+ #{}.
-%%%-----------------------------------------------------------------
-%%% Remove internal fields from configuration
-filter_config(#{config:=HConfig}=Config) ->
- Config#{config=>maps:without([handler_pid,mode_tab],HConfig)}.
+merge_default_logopts(Name, HConfig) ->
+ Type = maps:get(type, HConfig, wrap),
+ {DefaultNoFiles,DefaultNoBytes} =
+ case Type of
+ halt -> {undefined,infinity};
+ _wrap -> {10,1048576}
+ end,
+ {ok,Dir} = file:get_cwd(),
+ Defaults = #{file => filename:join(Dir,Name),
+ max_no_files => DefaultNoFiles,
+ max_no_bytes => DefaultNoBytes,
+ type => Type},
+ maps:merge(Defaults, HConfig).
-%%%===================================================================
-%%% gen_server callbacks
-%%%===================================================================
+filesync(Name,_Mode,State) ->
+ Result = ?disk_log_sync(Name),
+ maybe_notify_error(Name, filesync, Result, prev_sync_result, State).
-init([Name,
- Config = #{config := HConfig = #{file:=File,
- type:=Type,
- max_no_bytes:=MNB,
- max_no_files:=MNF}},
- State = #{dl_sync_int := DLSyncInt}]) ->
-
- RegName = ?name_to_reg_name(?MODULE,Name),
- register(RegName, self()),
- process_flag(trap_exit, true),
- process_flag(message_queue_data, off_heap),
-
- ?init_test_hooks(),
- ?start_observation(Name),
-
- LogOpts = #{file=>File, type=>Type, max_no_bytes=>MNB, max_no_files=>MNF},
- case open_disk_log(Name, File, Type, MNB, MNF) of
- ok ->
- try ets:new(Name, [public]) of
- ModeTab ->
- ?set_mode(ModeTab, async),
- T0 = ?timestamp(),
- State1 =
- ?merge_with_stats(State#{
- id => Name,
- mode_tab => ModeTab,
- mode => async,
- dl_sync => DLSyncInt,
- log_opts => LogOpts,
- last_qlen => 0,
- last_log_ts => T0,
- burst_win_ts => T0,
- burst_msg_count => 0,
- last_op => sync,
- prev_log_result => ok,
- prev_sync_result => ok,
- prev_disk_log_info => undefined}),
- Config1 =
- Config#{config => HConfig#{handler_pid => self(),
- mode_tab => ModeTab}},
- proc_lib:init_ack({ok,self(),Config1}),
- gen_server:cast(self(), repeated_disk_log_sync),
- case logger_h_common:unset_restart_flag(Name, ?MODULE) of
- true ->
- %% inform about restart
- gen_server:cast(self(), {log_handler_info,
- "Handler ~p restarted",
- [Name]});
- false ->
- %% initial start
- ok
- end,
- gen_server:enter_loop(?MODULE, [], State1)
- catch
- _:Error ->
- unregister(RegName),
- logger_h_common:error_notify({open_disk_log,Name,Error}),
- proc_lib:init_ack(Error)
- end;
- Error ->
- unregister(RegName),
- logger_h_common:error_notify({open_disk_log,Name,Error}),
- proc_lib:init_ack(Error)
- end.
+write(Name, Mode, Bin, State) ->
+ Result = ?disk_log_write(Name, Mode, Bin),
+ maybe_notify_error(Name, log, Result, prev_log_result, State).
-%% This is the synchronous log event.
-handle_call({log, Bin}, _From, State) ->
- {Result,State1} = do_log(Bin, call, State),
- %% Result == ok | dropped
- {reply, Result, State1};
-
-handle_call(disk_log_sync, _From, State = #{id := Name}) ->
- State1 = #{prev_sync_result := Result} = disk_log_sync(Name, State),
- {reply, Result, State1};
-
-handle_call({change_config,_OldConfig,NewConfig}, _From,
- State = #{filesync_repeat_interval := FSyncInt0}) ->
- HConfig = maps:get(config, NewConfig, #{}),
- State1 = #{sync_mode_qlen := SMQL,
- drop_mode_qlen := DMQL,
- flush_qlen := FQL} = maps:merge(State, HConfig),
- case logger_h_common:overload_levels_ok(State1) of
- true ->
- _ =
- case maps:get(filesync_repeat_interval, HConfig, undefined) of
- undefined ->
- ok;
- no_repeat ->
- _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
- State,
- undefined));
- FSyncInt0 ->
- ok;
- _FSyncInt1 ->
- _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
- State,
- undefined)),
- _ = gen_server:cast(self(), repeated_disk_log_sync)
- end,
- {reply, ok, State1};
- false ->
- {reply, {error,{invalid_levels,{SMQL,DMQL,FQL}}}, State}
- end;
-
-handle_call(info, _From, State) ->
- {reply, State, State};
-
-handle_call(reset, _From, State) ->
- State1 = ?merge_with_stats(State),
- {reply, ok, State1#{last_qlen => 0,
- last_log_ts => ?timestamp(),
- prev_log_result => ok,
- prev_sync_result => ok,
- prev_disk_log_info => undefined}};
-
-handle_call(stop, _From, State) ->
- {stop, {shutdown,stopped}, ok, State}.
-
-
-%% This is the asynchronous log event.
-handle_cast({log, Bin}, State) ->
- {_,State1} = do_log(Bin, cast, State),
- {noreply, State1};
-
-handle_cast({log_handler_info, Format, Args}, State = #{id:=Name}) ->
- log_handler_info(Name, Format, Args, State),
- {noreply, State};
-
-%% If FILESYNC_REPEAT_INTERVAL is set to a millisec value, this
-%% clause gets called repeatedly by the handler. In order to
-%% guarantee that a filesync *always* happens after the last log
-%% event, the repeat operation must be active!
-handle_cast(repeated_disk_log_sync,
- State = #{id := Name,
- filesync_repeat_interval := FSyncInt,
- last_op := LastOp}) ->
- State1 =
- if is_integer(FSyncInt) ->
- %% only do filesync if something has been
- %% written since last time we checked
- NewState = if LastOp == sync ->
- State;
- true ->
- disk_log_sync(Name, State)
- end,
- {ok,TRef} =
- timer:apply_after(FSyncInt, gen_server,cast,
- [self(),repeated_disk_log_sync]),
- NewState#{rep_sync_tref => TRef, last_op => sync};
- true ->
- State
- end,
- {noreply,State1}.
+reset_state(_Name, State) ->
+ State#{prev_log_result => ok,
+ prev_sync_result => ok,
+ prev_disk_log_info => undefined}.
%% The disk log owner must handle status messages from disk_log.
-handle_info({disk_log, _Node, _Log, {wrap,_NoLostItems}}, State) ->
- {noreply, State};
-handle_info({disk_log, _Node, Log, Info = {truncated,_NoLostItems}},
- State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
- error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}),
- {noreply, State#{prev_disk_log_info => Info}};
-handle_info({disk_log, _Node, Log, Info = {blocked_log,_Items}},
- State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
- error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}),
- {noreply, State#{prev_disk_log_info => Info}};
-handle_info({disk_log, _Node, Log, full},
- State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
- error_notify_new(full, PrevInfo, {disk_log,Name,Log,full}),
- {noreply, State#{prev_disk_log_info => full}};
-handle_info({disk_log, _Node, Log, Info = {error_status,_Status}},
- State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
- error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}),
- {noreply, State#{prev_disk_log_info => Info}};
-
-handle_info({'EXIT',_Pid,_Why}, State = #{id := _Name}) ->
- {noreply, State};
-
-handle_info(_, State) ->
- {noreply, State}.
-
-terminate(Reason, State = #{id := Name}) ->
- _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, State,
- undefined)),
+handle_info(Name, {disk_log, _Node, Log, Info={truncated,_NoLostItems}}, State) ->
+ maybe_notify_status(Name, Log, Info, prev_disk_log_info, State);
+handle_info(Name, {disk_log, _Node, Log, Info = {blocked_log,_Items}}, State) ->
+ maybe_notify_status(Name, Log, Info, prev_disk_log_info, State);
+handle_info(Name, {disk_log, _Node, Log, Info = full}, State) ->
+ maybe_notify_status(Name, Log, Info, prev_disk_log_info, State);
+handle_info(Name, {disk_log, _Node, Log, Info = {error_status,_Status}}, State) ->
+ maybe_notify_status(Name, Log, Info, prev_disk_log_info, State);
+handle_info(_, _, State) ->
+ State.
+
+terminate(Name, _Reason, _State) ->
_ = close_disk_log(Name, normal),
- ok = logger_h_common:stop_or_restart(Name, Reason, State),
- unregister(?name_to_reg_name(?MODULE, Name)),
ok.
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
-
%%%-----------------------------------------------------------------
%%% Internal functions
-
-%%%-----------------------------------------------------------------
-%%%
-get_default_config() ->
- #{sync_mode_qlen => ?SYNC_MODE_QLEN,
- drop_mode_qlen => ?DROP_MODE_QLEN,
- flush_qlen => ?FLUSH_QLEN,
- burst_limit_enable => ?BURST_LIMIT_ENABLE,
- burst_limit_max_count => ?BURST_LIMIT_MAX_COUNT,
- burst_limit_window_time => ?BURST_LIMIT_WINDOW_TIME,
- overload_kill_enable => ?OVERLOAD_KILL_ENABLE,
- overload_kill_qlen => ?OVERLOAD_KILL_QLEN,
- overload_kill_mem_size => ?OVERLOAD_KILL_MEM_SIZE,
- overload_kill_restart_after => ?OVERLOAD_KILL_RESTART_AFTER,
- filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}.
-
-get_init_state() ->
- #{dl_sync_int => ?CONTROLLER_SYNC_INTERVAL,
- filesync_ok_qlen => ?FILESYNC_OK_QLEN}.
-
-%%%-----------------------------------------------------------------
-%%% Add a disk_log handler to the logger.
-%%% This starts a dedicated handler process which should always
-%%% exist if the handler is registered with logger (and should not
-%%% exist if the handler is not registered).
-%%%
-%%% Config is the logger:handler_config() map. Handler specific parameters
-%%% should be provided with a sub map associated with a key named
-%%% 'config', e.g:
-%%%
-%%% Config = #{config => #{sync_mode_qlen => 50}
-%%%
-%%% The 'config' sub map will also contain parameters for configuring
-%%% the disk_log:
-%%%
-%%% Config = #{config => #{file => file:filename(),
-%%% max_no_bytes => integer(),
-%%% max_no_files => integer(),
-%%% type => wrap | halt}}.
-%%%
-%%% If type == halt, then max_no_files is ignored.
-%%%
-%%% The disk_log handler process is linked to logger_sup, which is
-%%% part of the kernel application's supervision tree.
-start(Name, Config, HandlerState) ->
- LoggerDLH =
- #{id => Name,
- start => {?MODULE, start_link, [Name,Config,HandlerState]},
- restart => temporary,
- shutdown => 2000,
- type => worker,
- modules => [?MODULE]},
- case supervisor:start_child(logger_sup, LoggerDLH) of
- {ok,Pid,Config1} ->
- ok = logger_handler_watcher:register_handler(Name,Pid),
- {ok,Config1};
- Error ->
- Error
- end.
-
-%%%-----------------------------------------------------------------
-%%% Stop and remove the handler.
-stop(Name) ->
- case whereis(?name_to_reg_name(?MODULE,Name)) of
- undefined ->
- ok;
- Pid ->
- %% We don't want to do supervisor:terminate_child here
- %% since we need to distinguish this explicit stop from a
- %% system termination in order to avoid circular attempts
- %% at removing the handler (implying deadlocks and
- %% timeouts).
- %% And we don't need to do supervisor:delete_child, since
- %% the restart type is temporary, which means that the
- %% child specification is automatically removed from the
- %% supervisor when the process dies.
- _ = gen_server:call(Pid, stop),
- ok
- end.
-
-%%%-----------------------------------------------------------------
-%%% Logging and overload control.
--define(update_dl_sync(C, Interval),
- if C == 0 -> Interval;
- true -> C-1 end).
-
-%% check for overload between every event (and set Mode to async,
-%% sync or drop accordingly), but never flush the whole mailbox
-%% before LogWindowSize events have been handled
-do_log(Bin, CallOrCast, State = #{id:=Name, mode := Mode0}) ->
- T1 = ?timestamp(),
-
- %% check if the handler is getting overloaded, or if it's
- %% recovering from overload (the check must be done for each
- %% event to react quickly to large bursts of events and
- %% to ensure that the handler can never end up in drop mode
- %% with an empty mailbox, which would stop operation)
- {Mode1,QLen,Mem,State1} = logger_h_common:check_load(State),
-
- if (Mode1 == drop) andalso (Mode0 =/= drop) ->
- log_handler_info(Name, "Handler ~p switched to drop mode",
- [Name], State);
- (Mode0 == drop) andalso ((Mode1 == async) orelse (Mode1 == sync)) ->
- log_handler_info(Name, "Handler ~p switched to ~w mode",
- [Name,Mode1], State);
- true ->
- ok
- end,
-
- %% kill the handler if it can't keep up with the load
- logger_h_common:kill_if_choked(Name, QLen, Mem, ?MODULE, State),
-
- if Mode1 == flush ->
- flush(Name, QLen, T1, State1);
- true ->
- write(Name, Mode1, T1, Bin, CallOrCast, State1)
- end.
-
-%% this function is called by do_log/3 after an overload check
-%% has been performed, where QLen > FlushQLen
-flush(Name, _QLen0, T1, State=#{last_log_ts := _T0, mode_tab := ModeTab}) ->
- %% flush messages in the mailbox (a limited number in
- %% order to not cause long delays)
- NewFlushed = logger_h_common:flush_log_events(?FLUSH_MAX_N),
-
- %% write info in log about flushed messages
- log_handler_info(Name, "Handler ~p flushed ~w log events",
- [Name,NewFlushed], State),
-
- %% because of the receive loop when flushing messages, the
- %% handler will be scheduled out often and the mailbox could
- %% grow very large, so we'd better check the queue again here
- {_,_QLen1} = process_info(self(), message_queue_len),
- ?observe(Name,{max_qlen,_QLen1}),
-
- %% Add 1 for the current log event
- ?observe(Name,{flushed,NewFlushed+1}),
-
- State1 = ?update_max_time(?diff_time(T1,_T0),State),
- {dropped,?update_other(flushed,FLUSHED,NewFlushed,
- State1#{mode => ?set_mode(ModeTab,async),
- last_qlen => 0,
- last_log_ts => T1})}.
-
-%% this function is called to write to disk_log
-write(Name, Mode, T1, Bin, _CallOrCast,
- State = #{mode_tab := ModeTab,
- dl_sync := DLSync,
- dl_sync_int := DLSyncInt,
- last_qlen := LastQLen,
- last_log_ts := T0}) ->
- %% check if we need to limit the number of writes
- %% during a burst of log events
- {DoWrite,BurstWinT,BurstMsgCount} = logger_h_common:limit_burst(State),
-
- %% only send a synhrounous event to the disk_log process
- %% every DLSyncInt time, to give the handler time between
- %% writes so it can keep up with incoming messages
- {Status,LastQLen1,State1} =
- if DoWrite, DLSync == 0 ->
- ?observe(Name,{_CallOrCast,1}),
- NewState = disk_log_write(Name, Bin, State),
- {ok, element(2,process_info(self(),message_queue_len)),
- NewState};
- DoWrite ->
- ?observe(Name,{_CallOrCast,1}),
- NewState = disk_log_write(Name, Bin, State),
- {ok, LastQLen, NewState};
- not DoWrite ->
- ?observe(Name,{flushed,1}),
- {dropped, LastQLen, State}
- end,
-
- %% Check if the time since the previous log event is long enough -
- %% and the queue length small enough - to assume the mailbox has
- %% been emptied, and if so, do filesync operation and reset mode to
- %% async. Note that this is the best we can do to detect an idle
- %% handler without setting a timer after each log call/cast. If the
- %% time between two consecutive log events is fast and no new
- %% event comes in after the last one, idle state won't be detected!
- Time = ?diff_time(T1,T0),
- {Mode1,BurstMsgCount1,State2} =
- if (LastQLen1 < ?FILESYNC_OK_QLEN) andalso
- (Time > ?IDLE_DETECT_TIME_USEC) ->
- {?change_mode(ModeTab,Mode,async), 0, disk_log_sync(Name,State1)};
- true ->
- {Mode, BurstMsgCount,State1}
- end,
-
- State3 =
- ?update_calls_or_casts(_CallOrCast,1,State2),
- State4 =
- ?update_max_time(Time,
- State3#{mode => Mode1,
- last_qlen := LastQLen1,
- last_log_ts => T1,
- burst_win_ts => BurstWinT,
- burst_msg_count => BurstMsgCount1,
- dl_sync => ?update_dl_sync(DLSync,DLSyncInt)}),
- {Status,State4}.
-
-
-log_handler_info(Name, Format, Args, State) ->
- Config =
- case logger:get_handler_config(Name) of
- {ok,Conf} -> Conf;
- _ -> #{formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}
- end,
- Meta = #{time=>erlang:system_time(microsecond)},
- Bin = logger_h_common:log_to_binary(#{level => notice,
- msg => {Format,Args},
- meta => Meta}, Config),
- _ = disk_log_write(Name, Bin, State),
- ok.
-
-
open_disk_log(Name, File, Type, MaxNoBytes, MaxNoFiles) ->
case filelib:ensure_dir(File) of
ok ->
@@ -698,43 +246,26 @@ close_disk_log(Name, _) ->
_ = disk_log:lclose(Name),
ok.
-disk_log_write(Name, Bin, State) ->
- case ?disk_log_blog(Name, Bin) of
- ok ->
- State#{prev_log_result => ok, last_op => write};
- LogError ->
- _ = case maps:get(prev_log_result, State) of
- LogError ->
- %% don't report same error twice
- ok;
- _ ->
- LogOpts = maps:get(log_opts, State),
- logger_h_common:error_notify({Name,log,
- LogOpts,
- LogError})
- end,
- State#{prev_log_result => LogError}
- end.
-
-disk_log_sync(Name, State) ->
- case ?disk_log_sync(Name) of
- ok ->
- State#{prev_sync_result => ok, last_op => sync};
- SyncError ->
- _ = case maps:get(prev_sync_result, State) of
- SyncError ->
- %% don't report same error twice
- ok;
- _ ->
- LogOpts = maps:get(log_opts, State),
- logger_h_common:error_notify({Name,filesync,
- LogOpts,
- SyncError})
- end,
- State#{prev_sync_result => SyncError}
- end.
+disk_log_write(Name, sync, Bin) ->
+ disk_log:blog(Name, Bin);
+disk_log_write(Name, async, Bin) ->
+ disk_log:balog(Name, Bin).
+
+%%%-----------------------------------------------------------------
+%%% Print error messages, but don't repeat the same message
+maybe_notify_error(Name, Op, Result, Key, #{log_opts:=LogOpts}=State) ->
+ {Result,error_notify_new({Name, Op, LogOpts, Result}, Result, Key, State)}.
-error_notify_new(Info,Info, _Term) ->
+maybe_notify_status(Name, Log, Info, Key, State) ->
+ error_notify_new({disk_log, Name, Log, Info}, Info, Key, State).
+
+error_notify_new(Term, What, Key, State) ->
+ error_notify_new(What, maps:get(Key,State), Term),
+ State#{Key => What}.
+
+error_notify_new(ok,_Prev,_Term) ->
+ ok;
+error_notify_new(Same,Same,_Term) ->
ok;
-error_notify_new(_Info0,_Info1, Term) ->
+error_notify_new(_New,_Prev,Term) ->
logger_h_common:error_notify(Term).
diff --git a/lib/kernel/src/logger_h_common.erl b/lib/kernel/src/logger_h_common.erl
index 94c640cb92..74a2d158fc 100644
--- a/lib/kernel/src/logger_h_common.erl
+++ b/lib/kernel/src/logger_h_common.erl
@@ -18,26 +18,469 @@
%% %CopyrightEnd%
%%
-module(logger_h_common).
+-behaviour(gen_server).
-include("logger_h_common.hrl").
-include("logger_internal.hrl").
--export([log_to_binary/2,
- check_common_config/1,
- call_cast_or_drop/4,
- check_load/1,
- limit_burst/1,
- kill_if_choked/5,
- flush_log_events/0,
- flush_log_events/1,
- handler_exit/2,
- set_restart_flag/2,
- unset_restart_flag/2,
- cancel_timer/1,
- stop_or_restart/3,
- overload_levels_ok/1,
- error_notify/1,
- info_notify/1]).
+%% API
+-export([start_link/1, info/2, filesync/2, reset/2]).
+
+%% gen_server and proc_lib callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%% logger callbacks
+-export([log/2, adding_handler/1, removing_handler/1, changing_config/3,
+ filter_config/1]).
+
+%% Library functions for handlers
+-export([error_notify/1]).
+
+%%%-----------------------------------------------------------------
+-define(CONFIG_KEYS,[sync_mode_qlen,
+ drop_mode_qlen,
+ flush_qlen,
+ burst_limit_enable,
+ burst_limit_max_count,
+ burst_limit_window_time,
+ overload_kill_enable,
+ overload_kill_qlen,
+ overload_kill_mem_size,
+ overload_kill_restart_after,
+ filesync_repeat_interval]).
+-define(READ_ONLY_KEYS,[handler_pid,mode_tab]).
+
+%%%-----------------------------------------------------------------
+%%% API
+
+%% This function is called by the logger_sup supervisor
+start_link(Args) ->
+ proc_lib:start_link(?MODULE,init,[Args]).
+
+filesync(Module, Name) ->
+ call(Module, Name, filesync).
+
+info(Module, Name) ->
+ call(Module, Name, info).
+
+reset(Module, Name) ->
+ call(Module, Name, reset).
+
+%%%-----------------------------------------------------------------
+%%% Handler being added
+adding_handler(#{id:=Name,module:=Module}=Config) ->
+ HConfig0 = maps:get(config, Config, #{}),
+ HandlerConfig0 = maps:without(?CONFIG_KEYS,HConfig0),
+ case Module:check_config(Name,set,undefined,HandlerConfig0) of
+ {ok,HandlerConfig} ->
+ ModifiedCommon = maps:with(?CONFIG_KEYS,HandlerConfig),
+ CommonConfig0 = maps:with(?CONFIG_KEYS,HConfig0),
+ CommonConfig = maps:merge(
+ maps:merge(get_default_config(), CommonConfig0),
+ ModifiedCommon),
+ case check_config(CommonConfig) of
+ ok ->
+ HConfig = maps:merge(CommonConfig,HandlerConfig),
+ start(Config#{config => HConfig});
+ {error,Faulty} ->
+ {error,{invalid_config,Module,Faulty}}
+ end;
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Handler being removed
+removing_handler(#{id:=Name, module:=Module}) ->
+ case whereis(?name_to_reg_name(Module,Name)) of
+ undefined ->
+ ok;
+ Pid ->
+ %% We don't want to do supervisor:terminate_child here
+ %% since we need to distinguish this explicit stop from a
+ %% system termination in order to avoid circular attempts
+ %% at removing the handler (implying deadlocks and
+ %% timeouts).
+ %% And we don't need to do supervisor:delete_child, since
+ %% the restart type is temporary, which means that the
+ %% child specification is automatically removed from the
+ %% supervisor when the process dies.
+ _ = gen_server:call(Pid, stop),
+ ok
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Updating handler config
+changing_config(SetOrUpdate,
+ #{id:=Name,config:=OldHConfig,module:=Module},
+ NewConfig0) ->
+ NewHConfig0 = maps:get(config, NewConfig0, #{}),
+ OldHandlerConfig = maps:without(?CONFIG_KEYS++?READ_ONLY_KEYS,OldHConfig),
+ NewHandlerConfig0 = maps:without(?CONFIG_KEYS++?READ_ONLY_KEYS,NewHConfig0),
+ case Module:check_config(Name, SetOrUpdate,
+ OldHandlerConfig,NewHandlerConfig0) of
+ {ok, NewHandlerConfig} ->
+ ModifiedCommon = maps:with(?CONFIG_KEYS,NewHandlerConfig),
+ NewCommonConfig0 = maps:with(?CONFIG_KEYS,NewHConfig0),
+ CommonDefault =
+ case SetOrUpdate of
+ set ->
+ get_default_config();
+ update ->
+ maps:with(?CONFIG_KEYS,OldHConfig)
+ end,
+ NewCommonConfig = maps:merge(
+ maps:merge(CommonDefault,NewCommonConfig0),
+ ModifiedCommon),
+ case check_config(NewCommonConfig) of
+ ok ->
+ ReadOnly = maps:with(?READ_ONLY_KEYS,OldHConfig),
+ NewHConfig = maps:merge(
+ maps:merge(NewCommonConfig,NewHandlerConfig),
+ ReadOnly),
+ NewConfig = NewConfig0#{config=>NewHConfig},
+ HPid = maps:get(handler_pid,OldHConfig),
+ case call(HPid, {change_config,NewConfig}) of
+ ok -> {ok,NewConfig};
+ Error -> Error
+ end;
+ {error,Faulty} ->
+ {error,{invalid_config,Module,Faulty}}
+ end;
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Log a string or report
+-spec log(LogEvent, Config) -> ok when
+ LogEvent :: logger:log_event(),
+ Config :: logger:handler_config().
+
+log(LogEvent, Config = #{id := Name,
+ config := #{handler_pid := HPid,
+ mode_tab := ModeTab}}) ->
+ %% if the handler has crashed, we must drop this event
+ %% and hope the handler restarts so we can try again
+ true = is_process_alive(HPid),
+ Bin = log_to_binary(LogEvent, Config),
+ call_cast_or_drop(Name, HPid, ModeTab, Bin).
+
+%%%-----------------------------------------------------------------
+%%% Remove internal fields from configuration
+filter_config(#{config:=HConfig}=Config) ->
+ Config#{config=>maps:without(?READ_ONLY_KEYS,HConfig)}.
+
+%%%-----------------------------------------------------------------
+%%% Start the handler process
+%%%
+%%% The process must always exist if the handler is registered with
+%%% logger (and must not exist if the handler is not registered).
+%%%
+%%% The handler process is linked to logger_sup, which is part of the
+%%% kernel application's supervision tree.
+start(#{id := Name} = Config0) ->
+ ChildSpec =
+ #{id => Name,
+ start => {?MODULE, start_link, [Config0]},
+ restart => temporary,
+ shutdown => 2000,
+ type => worker,
+ modules => [?MODULE]},
+ case supervisor:start_child(logger_sup, ChildSpec) of
+ {ok,Pid,Config} ->
+ ok = logger_handler_watcher:register_handler(Name,Pid),
+ {ok,Config};
+ Error ->
+ Error
+ end.
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+init(#{id := Name, module := Module,
+ formatter := Formatter, config := HConfig0} = Config0) ->
+ RegName = ?name_to_reg_name(Module,Name),
+ register(RegName, self()),
+ process_flag(trap_exit, true),
+ process_flag(message_queue_data, off_heap),
+
+ ?init_test_hooks(),
+ ?start_observation(Name),
+
+ case Module:init(Name, HConfig0) of
+ {ok,HState} ->
+ try ets:new(Name, [public]) of
+ ModeTab ->
+ ?set_mode(ModeTab, async),
+ T0 = ?timestamp(),
+ HConfig = HConfig0#{handler_pid => self(),
+ mode_tab => ModeTab},
+ Config = Config0#{config => HConfig},
+ proc_lib:init_ack({ok,self(),Config}),
+ %% Storing common config in state to avoid copying
+ %% (sending) the config data for each log message
+ CommonConfig = maps:with(?CONFIG_KEYS,HConfig),
+ State =
+ ?merge_with_stats(
+ CommonConfig#{id => Name,
+ module => Module,
+ mode_tab => ModeTab,
+ mode => async,
+ ctrl_sync_count =>
+ ?CONTROLLER_SYNC_INTERVAL,
+ last_qlen => 0,
+ last_log_ts => T0,
+ last_op => sync,
+ burst_win_ts => T0,
+ burst_msg_count => 0,
+ formatter => Formatter,
+ handler_state => HState}),
+ State1 = set_repeated_filesync(State),
+ unset_restart_flag(State1),
+ gen_server:enter_loop(?MODULE, [], State1)
+ catch
+ _:Error ->
+ unregister(RegName),
+ error_notify({init_handler,Name,Error}),
+ proc_lib:init_ack(Error)
+ end;
+ Error ->
+ unregister(RegName),
+ error_notify({init_handler,Name,Error}),
+ proc_lib:init_ack(Error)
+ end.
+
+%% This is the synchronous log event.
+handle_call({log, Bin}, _From, State) ->
+ {Result,State1} = do_log(Bin, call, State),
+ %% Result == ok | dropped
+ {reply,Result, State1};
+
+handle_call(filesync, _From, State = #{id := Name,
+ module := Module,
+ handler_state := HandlerState}) ->
+ {Result,HandlerState1} = Module:filesync(Name,sync,HandlerState),
+ {reply, Result, State#{handler_state=>HandlerState1, last_op=>sync}};
+
+handle_call({change_config, #{formatter:=Formatter, config:=NewHConfig}}, _From,
+ State = #{filesync_repeat_interval := FSyncInt0}) ->
+ %% In the future, if handler_state must be updated due to config
+ %% change, then we need to add a callback to Module here.
+ CommonConfig = maps:with(?CONFIG_KEYS,NewHConfig),
+ State1 = maps:merge(State, CommonConfig),
+ State2 =
+ case maps:get(filesync_repeat_interval, NewHConfig) of
+ FSyncInt0 ->
+ State1;
+ _FSyncInt1 ->
+ set_repeated_filesync(cancel_repeated_filesync(State1))
+ end,
+ {reply, ok, State2#{formatter:=Formatter}};
+
+handle_call(info, _From, State) ->
+ {reply, State, State};
+
+handle_call(reset, _From,
+ #{id:=Name,module:=Module,handler_state:=HandlerState}=State) ->
+ State1 = ?merge_with_stats(State),
+ {reply, ok, State1#{last_qlen => 0,
+ last_log_ts => ?timestamp(),
+ handler_state => Module:reset_state(Name,HandlerState)}};
+
+handle_call(stop, _From, State) ->
+ {stop, {shutdown,stopped}, ok, State}.
+
+%% This is the asynchronous log event.
+handle_cast({log, Bin}, State) ->
+ {_,State1} = do_log(Bin, cast, State),
+ {noreply, State1};
+
+%% If FILESYNC_REPEAT_INTERVAL is set to a millisec value, this
+%% clause gets called repeatedly by the handler. In order to
+%% guarantee that a filesync *always* happens after the last log
+%% event, the repeat operation must be active!
+handle_cast(repeated_filesync,State = #{filesync_repeat_interval := no_repeat}) ->
+ %% This clause handles a race condition which may occur when
+ %% config changes filesync_repeat_interval from an integer value
+ %% to no_repeat.
+ {noreply,State};
+handle_cast(repeated_filesync,
+ State = #{id := Name,
+ module := Module,
+ handler_state := HandlerState,
+ last_op := LastOp}) ->
+ State1 =
+ if LastOp == sync ->
+ State;
+ true ->
+ {_,HS} = Module:filesync(Name, async, HandlerState),
+ State#{handler_state => HS, last_op => sync}
+ end,
+ {noreply,set_repeated_filesync(State1)}.
+
+handle_info(Info, #{id := Name, module := Module,
+ handler_state := HandlerState} = State) ->
+ {noreply,State#{handler_state => Module:handle_info(Name,Info,HandlerState)}}.
+
+terminate(Reason, State = #{id := Name,
+ module := Module,
+ handler_state := HandlerState}) ->
+ _ = cancel_repeated_filesync(State),
+ _ = Module:terminate(Name, Reason, HandlerState),
+ ok = stop_or_restart(Name, Reason, State),
+ unregister(?name_to_reg_name(Module, Name)),
+ ok.
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+
+%%%-----------------------------------------------------------------
+%%% Internal functions
+call(Module, Name, Op) when is_atom(Name) ->
+ call(?name_to_reg_name(Module,Name), Op);
+call(_, Name, Op) ->
+ {error,{badarg,{Op,[Name]}}}.
+
+call(Server, Msg) ->
+ try
+ gen_server:call(Server, Msg, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end.
+
+%% check for overload between every event (and set Mode to async,
+%% sync or drop accordingly), but never flush the whole mailbox
+%% before LogWindowSize events have been handled
+do_log(Bin, CallOrCast, State = #{id:=Name, mode:=Mode0}) ->
+ T1 = ?timestamp(),
+
+ %% check if the handler is getting overloaded, or if it's
+ %% recovering from overload (the check must be done for each
+ %% event to react quickly to large bursts of events and
+ %% to ensure that the handler can never end up in drop mode
+ %% with an empty mailbox, which would stop operation)
+ {Mode1,QLen,Mem,State1} = check_load(State),
+
+ if (Mode1 == drop) andalso (Mode0 =/= drop) ->
+ log_handler_info(Name, "Handler ~p switched to drop mode",
+ [Name], State);
+ (Mode0 == drop) andalso ((Mode1 == async) orelse (Mode1 == sync)) ->
+ log_handler_info(Name, "Handler ~p switched to ~w mode",
+ [Name,Mode1], State);
+ true ->
+ ok
+ end,
+
+ %% kill the handler if it can't keep up with the load
+ kill_if_choked(Name, QLen, Mem, State),
+
+ if Mode1 == flush ->
+ flush(Name, QLen, T1, State1);
+ true ->
+ write(Name, Mode1, T1, Bin, CallOrCast, State1)
+ end.
+
+%% this clause is called by do_log/3 after an overload check
+%% has been performed, where QLen > FlushQLen
+flush(Name, _QLen0, T1, State=#{last_log_ts := _T0, mode_tab := ModeTab}) ->
+ %% flush messages in the mailbox (a limited number in
+ %% order to not cause long delays)
+ NewFlushed = flush_log_events(?FLUSH_MAX_N),
+
+ %% write info in log about flushed messages
+ log_handler_info(Name, "Handler ~p flushed ~w log events",
+ [Name,NewFlushed], State),
+
+ %% because of the receive loop when flushing messages, the
+ %% handler will be scheduled out often and the mailbox could
+ %% grow very large, so we'd better check the queue again here
+ {_,_QLen1} = process_info(self(), message_queue_len),
+ ?observe(Name,{max_qlen,_QLen1}),
+
+ %% Add 1 for the current log event
+ ?observe(Name,{flushed,NewFlushed+1}),
+
+ State1 = ?update_max_time(?diff_time(T1,_T0),State),
+ State2 = ?update_max_qlen(_QLen1,State1),
+ {dropped,?update_other(flushed,FLUSHED,NewFlushed,
+ State2#{mode => ?set_mode(ModeTab,async),
+ last_qlen => 0,
+ last_log_ts => T1})}.
+
+%% this clause is called to write to file
+write(Name, Mode, T1, Bin, _CallOrCast,
+ State = #{module := Module,
+ handler_state := HandlerState,
+ mode_tab := ModeTab,
+ ctrl_sync_count := CtrlSync,
+ last_qlen := LastQLen,
+ last_log_ts := T0}) ->
+ %% check if we need to limit the number of writes
+ %% during a burst of log events
+ {DoWrite,State1} = limit_burst(State),
+
+ %% only log synhrounously every ?CONTROLLER_SYNC_INTERVAL time, to
+ %% give the handler time between writes so it can keep up with
+ %% incoming messages
+ {Result,LastQLen1,HandlerState1} =
+ if DoWrite, CtrlSync == 0 ->
+ ?observe(Name,{_CallOrCast,1}),
+ {_,HS1} = Module:write(Name, sync, Bin, HandlerState),
+ {ok,element(2, process_info(self(), message_queue_len)),HS1};
+ DoWrite ->
+ ?observe(Name,{_CallOrCast,1}),
+ {_,HS1} = Module:write(Name, async, Bin, HandlerState),
+ {ok,LastQLen,HS1};
+ not DoWrite ->
+ ?observe(Name,{flushed,1}),
+ {dropped,LastQLen,HandlerState}
+ end,
+
+ %% Check if the time since the previous log event is long enough -
+ %% and the queue length small enough - to assume the mailbox has
+ %% been emptied, and if so, do filesync operation and reset mode to
+ %% async. Note that this is the best we can do to detect an idle
+ %% handler without setting a timer after each log call/cast. If the
+ %% time between two consecutive log events is fast and no new
+ %% event comes in after the last one, idle state won't be detected!
+ Time = ?diff_time(T1,T0),
+ State2 =
+ if (LastQLen1 < ?FILESYNC_OK_QLEN) andalso
+ (Time > ?IDLE_DETECT_TIME_USEC) ->
+ {_,HS2} = Module:filesync(Name,async,HandlerState),
+ State1#{mode => ?change_mode(ModeTab, Mode, async),
+ burst_msg_count => 0,
+ handler_state => HS2};
+ true ->
+ State1#{mode => Mode, handler_state => HandlerState1}
+ end,
+ State3 = ?update_calls_or_casts(_CallOrCast,1,State2),
+ State4 = ?update_max_qlen(LastQLen1,State3),
+ State5 =
+ ?update_max_time(Time,
+ State4#{last_qlen := LastQLen1,
+ last_log_ts => T1,
+ last_op => write,
+ ctrl_sync_count =>
+ if CtrlSync==0 -> ?CONTROLLER_SYNC_INTERVAL;
+ true -> CtrlSync-1
+ end}),
+ {Result,State5}.
+
+log_handler_info(Name, Format, Args, #{module:=Module,
+ formatter:=Formatter,
+ handler_state:=HandlerState}) ->
+ Config = #{formatter=>Formatter},
+ Meta = #{time=>erlang:system_time(microsecond)},
+ Bin = log_to_binary(#{level => notice,
+ msg => {Format,Args},
+ meta => Meta}, Config),
+ _ = Module:write(Name, async, Bin, HandlerState),
+ ok.
%%%-----------------------------------------------------------------
%%% Convert log data on any form to binary
@@ -94,46 +537,65 @@ string_to_binary(String) ->
throw(Error)
end.
-
%%%-----------------------------------------------------------------
%%% Check that the configuration term is valid
-check_common_config({mode_tab,_Tid}) ->
- valid;
-check_common_config({handler_pid,Pid}) when is_pid(Pid) ->
- valid;
-
-check_common_config({sync_mode_qlen,N}) when is_integer(N) ->
- valid;
-check_common_config({drop_mode_qlen,N}) when is_integer(N) ->
- valid;
-check_common_config({flush_qlen,N}) when is_integer(N) ->
- valid;
-
-check_common_config({burst_limit_enable,Bool}) when Bool == true;
- Bool == false ->
- valid;
-check_common_config({burst_limit_max_count,N}) when is_integer(N) ->
- valid;
-check_common_config({burst_limit_window_time,N}) when is_integer(N) ->
- valid;
-
-check_common_config({overload_kill_enable,Bool}) when Bool == true;
- Bool == false ->
- valid;
-check_common_config({overload_kill_qlen,N}) when is_integer(N) ->
- valid;
-check_common_config({overload_kill_mem_size,N}) when is_integer(N) ->
- valid;
-check_common_config({overload_kill_restart_after,NorA}) when is_integer(NorA);
- NorA == infinity ->
- valid;
-
-check_common_config({filesync_repeat_interval,NorA}) when is_integer(NorA);
- NorA == no_repeat ->
- valid;
-check_common_config(_) ->
- invalid.
+check_config(Config) when is_map(Config) ->
+ case check_common_config(maps:to_list(Config)) of
+ ok ->
+ case overload_levels_ok(Config) of
+ true ->
+ ok;
+ false ->
+ Faulty = maps:with([sync_mode_qlen,
+ drop_mode_qlen,
+ flush_qlen],Config),
+ {error,{invalid_levels,Faulty}}
+ end;
+ Error ->
+ Error
+ end.
+
+check_common_config([{sync_mode_qlen,N}|Config]) when is_integer(N) ->
+ check_common_config(Config);
+check_common_config([{drop_mode_qlen,N}|Config]) when is_integer(N) ->
+ check_common_config(Config);
+check_common_config([{flush_qlen,N}|Config]) when is_integer(N) ->
+ check_common_config(Config);
+check_common_config([{burst_limit_enable,Bool}|Config]) when is_boolean(Bool) ->
+ check_common_config(Config);
+check_common_config([{burst_limit_max_count,N}|Config]) when is_integer(N) ->
+ check_common_config(Config);
+check_common_config([{burst_limit_window_time,N}|Config]) when is_integer(N) ->
+ check_common_config(Config);
+check_common_config([{overload_kill_enable,Bool}|Config]) when is_boolean(Bool) ->
+ check_common_config(Config);
+check_common_config([{overload_kill_qlen,N}|Config]) when is_integer(N) ->
+ check_common_config(Config);
+check_common_config([{overload_kill_mem_size,N}|Config]) when is_integer(N) ->
+ check_common_config(Config);
+check_common_config([{overload_kill_restart_after,NorA}|Config])
+ when is_integer(NorA); NorA == infinity ->
+ check_common_config(Config);
+check_common_config([{filesync_repeat_interval,NorA}|Config])
+ when is_integer(NorA); NorA == no_repeat ->
+ check_common_config(Config);
+check_common_config([{Key,Value}|_]) ->
+ {error,#{Key=>Value}};
+check_common_config([]) ->
+ ok.
+get_default_config() ->
+ #{sync_mode_qlen => ?SYNC_MODE_QLEN,
+ drop_mode_qlen => ?DROP_MODE_QLEN,
+ flush_qlen => ?FLUSH_QLEN,
+ burst_limit_enable => ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count => ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time => ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable => ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen => ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size => ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after => ?OVERLOAD_KILL_RESTART_AFTER,
+ filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}.
%%%-----------------------------------------------------------------
%%% Overload Protection
@@ -146,15 +608,13 @@ call_cast_or_drop(_Name, HandlerPid, ModeTab, Bin) ->
async ->
gen_server:cast(HandlerPid, {log,Bin});
sync ->
- try gen_server:call(HandlerPid, {log,Bin}, ?DEFAULT_CALL_TIMEOUT) of
- %% if return value from call == dropped, the
- %% message has been flushed by handler and should
- %% therefore not be counted as dropped in stats
- ok -> ok;
- dropped -> ok
- catch
- _:{timeout,_} ->
- ?observe(_Name,{dropped,1})
+ case call(HandlerPid, {log,Bin}) of
+ ok ->
+ ok;
+ _Other ->
+ %% dropped or {error,handler_busy}
+ ?observe(_Name,{dropped,1}),
+ ok
end;
drop ->
?observe(_Name,{dropped,1})
@@ -165,10 +625,8 @@ call_cast_or_drop(_Name, HandlerPid, ModeTab, Bin) ->
end,
ok.
-handler_exit(_Name, Reason) ->
- exit(Reason).
-
-set_restart_flag(Name, Module) ->
+set_restart_flag(#{id := Name, module := Module} = State) ->
+ log_handler_info(Name, "Handler ~p overloaded and stopping", [Name], State),
Flag = list_to_atom(lists:concat([Module,"_",Name,"_restarting"])),
spawn(fun() ->
register(Flag, self()),
@@ -176,14 +634,14 @@ set_restart_flag(Name, Module) ->
end),
ok.
-unset_restart_flag(Name, Module) ->
+unset_restart_flag(#{id := Name, module := Module} = State) ->
Flag = list_to_atom(lists:concat([Module,"_",Name,"_restarting"])),
case whereis(Flag) of
undefined ->
- false;
+ ok;
Pid ->
exit(Pid, kill),
- true
+ log_handler_info(Name, "Handler ~p restarted", [Name], State)
end.
check_load(State = #{id:=_Name, mode_tab := ModeTab, mode := Mode,
@@ -221,46 +679,41 @@ check_load(State = #{id:=_Name, mode_tab := ModeTab, mode := Mode,
?update_other(flushes,FLUSHES,_NewFlushes,
State1#{last_qlen => QLen})}.
-limit_burst(#{burst_limit_enable := false}) ->
- {true,0,0};
+limit_burst(#{burst_limit_enable := false}=State) ->
+ {true,State};
limit_burst(#{burst_win_ts := BurstWinT0,
burst_msg_count := BurstMsgCount,
burst_limit_window_time := BurstLimitWinTime,
- burst_limit_max_count := BurstLimitMaxCnt}) ->
+ burst_limit_max_count := BurstLimitMaxCnt} = State) ->
if (BurstMsgCount >= BurstLimitMaxCnt) ->
%% the limit for allowed messages has been reached
BurstWinT1 = ?timestamp(),
case ?diff_time(BurstWinT1,BurstWinT0) of
BurstCheckTime when BurstCheckTime < (BurstLimitWinTime*1000) ->
%% we're still within the burst time frame
- {false,BurstWinT0,BurstMsgCount};
+ {false,?update_other(burst_drops,BURSTS,1,State)};
_BurstCheckTime ->
%% burst time frame passed, reset counters
- {true,BurstWinT1,0}
+ {true,State#{burst_win_ts => BurstWinT1,
+ burst_msg_count => 0}}
end;
true ->
%% the limit for allowed messages not yet reached
- {true,BurstWinT0,BurstMsgCount+1}
+ {true,State#{burst_win_ts => BurstWinT0,
+ burst_msg_count => BurstMsgCount+1}}
end.
-kill_if_choked(Name, QLen, Mem, HandlerMod,
- State = #{overload_kill_enable := KillIfOL,
- overload_kill_qlen := OLKillQLen,
- overload_kill_mem_size := OLKillMem}) ->
+kill_if_choked(Name, QLen, Mem, State = #{overload_kill_enable := KillIfOL,
+ overload_kill_qlen := OLKillQLen,
+ overload_kill_mem_size := OLKillMem}) ->
if KillIfOL andalso
((QLen > OLKillQLen) orelse (Mem > OLKillMem)) ->
- HandlerMod:log_handler_info(Name,
- "Handler ~p overloaded and stopping",
- [Name], State),
- set_restart_flag(Name, HandlerMod),
- handler_exit(Name, {shutdown,{overloaded,Name,QLen,Mem}});
+ set_restart_flag(State),
+ exit({shutdown,{overloaded,Name,QLen,Mem}});
true ->
ok
end.
-flush_log_events() ->
- flush_log_events(-1).
-
flush_log_events(Limit) ->
process_flag(priority, high),
Flushed = flush_log_events(0, Limit),
@@ -283,16 +736,29 @@ flush_log_events(N, Limit) ->
0 -> N
end.
-cancel_timer(TRef) when is_atom(TRef) -> ok;
-cancel_timer(TRef) -> timer:cancel(TRef).
+set_repeated_filesync(#{filesync_repeat_interval:=FSyncInt} = State)
+ when is_integer(FSyncInt) ->
+ {ok,TRef} = timer:apply_after(FSyncInt, gen_server, cast,
+ [self(),repeated_filesync]),
+ State#{rep_sync_tref=>TRef};
+set_repeated_filesync(State) ->
+ State.
+cancel_repeated_filesync(State) ->
+ case maps:take(rep_sync_tref,State) of
+ {TRef,State1} ->
+ _ = timer:cancel(TRef),
+ State1;
+ error ->
+ State
+ end.
stop_or_restart(Name, {shutdown,Reason={overloaded,_Name,_QLen,_Mem}},
#{overload_kill_restart_after := RestartAfter}) ->
%% If we're terminating because of an overload situation (see
- %% logger_h_common:kill_if_choked/4), we need to remove the handler
- %% and set a restart timer. A separate process must perform this
- %% in order to avoid deadlock.
+ %% kill_if_choked/4), we need to remove the handler and set a
+ %% restart timer. A separate process must perform this in order to
+ %% avoid deadlock.
HandlerPid = self(),
ConfigResult = logger:get_handler_config(Name),
RemoveAndRestart =
@@ -334,6 +800,3 @@ overload_levels_ok(HandlerConfig) ->
error_notify(Term) ->
?internal_log(error, Term).
-
-info_notify(Term) ->
- ?internal_log(info, Term).
diff --git a/lib/kernel/src/logger_h_common.hrl b/lib/kernel/src/logger_h_common.hrl
index e0a7b6e3ca..261b0a6246 100644
--- a/lib/kernel/src/logger_h_common.hrl
+++ b/lib/kernel/src/logger_h_common.hrl
@@ -137,7 +137,7 @@
ets:insert(?TEST_HOOKS_TAB, {internal_log,{logger,internal_log}}),
ets:insert(?TEST_HOOKS_TAB, {file_write,ok}),
ets:insert(?TEST_HOOKS_TAB, {file_datasync,ok}),
- ets:insert(?TEST_HOOKS_TAB, {disk_log_blog,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_write,ok}),
ets:insert(?TEST_HOOKS_TAB, {disk_log_sync,ok})).
-define(set_internal_log(MOD_FUNC),
@@ -150,7 +150,7 @@
ets:insert(?TEST_HOOKS_TAB, {internal_log,{logger,internal_log}}),
ets:insert(?TEST_HOOKS_TAB, {file_write,ok}),
ets:insert(?TEST_HOOKS_TAB, {file_datasync,ok}),
- ets:insert(?TEST_HOOKS_TAB, {disk_log_blog,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_write,ok}),
ets:insert(?TEST_HOOKS_TAB, {disk_log_sync,ok})).
-define(internal_log(TYPE, TERM),
@@ -171,11 +171,11 @@
[{_,ERROR}] -> ERROR
catch _:_ -> file:datasync(DEVICE) end).
- -define(disk_log_blog(LOG, DATA),
- try ets:lookup(?TEST_HOOKS_TAB, disk_log_blog) of
- [{_,ok}] -> disk_log:blog(LOG, DATA);
+ -define(disk_log_write(LOG, MODE, DATA),
+ try ets:lookup(?TEST_HOOKS_TAB, disk_log_write) of
+ [{_,ok}] -> disk_log_write(LOG, MODE, DATA);
[{_,ERROR}] -> ERROR
- catch _:_ -> disk_log:blog(LOG, DATA) end).
+ catch _:_ -> disk_log_write(LOG, MODE, DATA) end).
-define(disk_log_sync(LOG),
try ets:lookup(?TEST_HOOKS_TAB, disk_log_sync) of
@@ -194,7 +194,7 @@
-define(internal_log(TYPE, TERM), logger:internal_log(TYPE, TERM)).
-define(file_write(DEVICE, DATA), file:write(DEVICE, DATA)).
-define(file_datasync(DEVICE), file:datasync(DEVICE)).
- -define(disk_log_blog(LOG, DATA), disk_log:blog(LOG, DATA)).
+ -define(disk_log_write(LOG, MODE, DATA), disk_log_write(LOG, MODE, DATA)).
-define(disk_log_sync(LOG), disk_log:sync(LOG)).
-define(DEFAULT_CALL_TIMEOUT, 10000).
-endif.
@@ -210,7 +210,7 @@
-ifdef(SAVE_STATS).
-define(merge_with_stats(STATE),
STATE#{flushes => 0, flushed => 0, drops => 0,
- casts => 0, calls => 0,
+ burst_drops => 0, casts => 0, calls => 0,
max_qlen => 0, max_time => 0}).
-define(update_max_qlen(QLEN, STATE),
diff --git a/lib/kernel/src/logger_std_h.erl b/lib/kernel/src/logger_std_h.erl
index 42e0f5caf4..ebe741e331 100644
--- a/lib/kernel/src/logger_std_h.erl
+++ b/lib/kernel/src/logger_std_h.erl
@@ -19,8 +19,6 @@
%%
-module(logger_std_h).
--behaviour(gen_server).
-
-include("logger.hrl").
-include("logger_internal.hrl").
-include("logger_h_common.hrl").
@@ -28,52 +26,28 @@
-include_lib("kernel/include/file.hrl").
%% API
--export([start_link/3, info/1, filesync/1, reset/1]).
+-export([info/1, filesync/1, reset/1]).
-%% gen_server and proc_lib callbacks
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
+%% logger_h_common callbacks
+-export([init/2, check_config/4, reset_state/2,
+ filesync/3, write/4, handle_info/3, terminate/3]).
%% logger callbacks
-export([log/2, adding_handler/1, removing_handler/1, changing_config/3,
filter_config/1]).
-%% handler internal
--export([log_handler_info/4]).
-
%%%===================================================================
%%% API
%%%===================================================================
%%%-----------------------------------------------------------------
-%%% Start a standard handler process and link to caller.
-%%% This function is called by the kernel supervisor when this
-%%% handler process gets added
--spec start_link(Name, Config, HandlerState) -> {ok,Pid} | {error,Reason} when
- Name :: atom(),
- Config :: logger:handler_config(),
- HandlerState :: map(),
- Pid :: pid(),
- Reason :: term().
-
-start_link(Name, Config, HandlerState) ->
- proc_lib:start_link(?MODULE,init,[[Name,Config,HandlerState]]).
-
-%%%-----------------------------------------------------------------
%%%
-spec filesync(Name) -> ok | {error,Reason} when
Name :: atom(),
Reason :: handler_busy | {badarg,term()}.
-filesync(Name) when is_atom(Name) ->
- try
- gen_server:call(?name_to_reg_name(?MODULE,Name),
- filesync, ?DEFAULT_CALL_TIMEOUT)
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
filesync(Name) ->
- {error,{badarg,{filesync,[Name]}}}.
+ logger_h_common:filesync(?MODULE,Name).
%%%-----------------------------------------------------------------
%%%
@@ -82,15 +56,8 @@ filesync(Name) ->
Info :: term(),
Reason :: handler_busy | {badarg,term()}.
-info(Name) when is_atom(Name) ->
- try
- gen_server:call(?name_to_reg_name(?MODULE,Name),
- info, ?DEFAULT_CALL_TIMEOUT)
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
info(Name) ->
- {error,{badarg,{info,[Name]}}}.
+ logger_h_common:info(?MODULE,Name).
%%%-----------------------------------------------------------------
%%%
@@ -98,47 +65,75 @@ info(Name) ->
Name :: atom(),
Reason :: handler_busy | {badarg,term()}.
-reset(Name) when is_atom(Name) ->
- try
- gen_server:call(?name_to_reg_name(?MODULE,Name),
- reset, ?DEFAULT_CALL_TIMEOUT)
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
reset(Name) ->
- {error,{badarg,{reset,[Name]}}}.
-
+ logger_h_common:reset(?MODULE,Name).
%%%===================================================================
-%%% logger callbacks
+%%% logger callbacks - just forward to logger_h_common
%%%===================================================================
%%%-----------------------------------------------------------------
%%% Handler being added
-adding_handler(#{id:=Name}=Config) ->
- case check_config(adding, Config) of
- {ok, #{config:=HConfig}=Config1} ->
- %% create initial handler state by merging defaults with config
- HState = maps:merge(get_init_state(), HConfig),
- case logger_h_common:overload_levels_ok(HState) of
- true ->
- start(Name, Config1, HState);
- false ->
- #{sync_mode_qlen := SMQL,
- drop_mode_qlen := DMQL,
- flush_qlen := FQL} = HState,
- {error,{invalid_levels,{SMQL,DMQL,FQL}}}
- end;
+-spec adding_handler(Config) -> {ok,Config} | {error,Reason} when
+ Config :: logger:handler_config(),
+ Reason :: term().
+
+adding_handler(Config) ->
+ logger_h_common:adding_handler(Config).
+
+%%%-----------------------------------------------------------------
+%%% Updating handler config
+-spec changing_config(SetOrUpdate, OldConfig, NewConfig) ->
+ {ok,Config} | {error,Reason} when
+ SetOrUpdate :: set | update,
+ OldConfig :: logger:handler_config(),
+ NewConfig :: logger:handler_config(),
+ Config :: logger:handler_config(),
+ Reason :: term().
+
+changing_config(SetOrUpdate, OldConfig, NewConfig) ->
+ logger_h_common:changing_config(SetOrUpdate, OldConfig, NewConfig).
+
+%%%-----------------------------------------------------------------
+%%% Handler being removed
+-spec removing_handler(Config) -> ok when
+ Config :: logger:handler_config().
+
+removing_handler(Config) ->
+ logger_h_common:removing_handler(Config).
+
+%%%-----------------------------------------------------------------
+%%% Log a string or report
+-spec log(LogEvent, Config) -> ok when
+ LogEvent :: logger:log_event(),
+ Config :: logger:handler_config().
+
+log(LogEvent, Config) ->
+ logger_h_common:log(LogEvent, Config).
+
+%%%-----------------------------------------------------------------
+%%% Remove internal fields from configuration
+-spec filter_config(Config) -> Config when
+ Config :: logger:handler_config().
+
+filter_config(Config) ->
+ logger_h_common:filter_config(Config).
+
+%%%===================================================================
+%%% logger_h_common callbacks
+%%%===================================================================
+init(Name, #{type := Type}) ->
+ case open_log_file(Name, Type) of
+ {ok,FileCtrlPid} ->
+ {ok,#{type=>Type,file_ctrl_pid=>FileCtrlPid}};
Error ->
Error
end.
-%%%-----------------------------------------------------------------
-%%% Updating handler config
-changing_config(SetOrUpdate,OldConfig=#{config:=OldHConfig},NewConfig) ->
+check_config(_Name,set,undefined,NewHConfig) ->
+ check_config(maps:merge(get_default_config(),NewHConfig));
+check_config(_Name,SetOrUpdate,OldHConfig,NewHConfig0) ->
WriteOnce = maps:with([type],OldHConfig),
- ReadOnly = maps:with([handler_pid,mode_tab],OldHConfig),
- NewHConfig0 = maps:get(config, NewConfig, #{}),
Default =
case SetOrUpdate of
set ->
@@ -148,48 +143,24 @@ changing_config(SetOrUpdate,OldConfig=#{config:=OldHConfig},NewConfig) ->
OldHConfig
end,
- %% Allow (accidentially) included read-only fields - just overwrite them
- NewHConfig = maps:merge(maps:merge(Default, NewHConfig0),ReadOnly),
+ NewHConfig = maps:merge(Default, NewHConfig0),
- %% But fail if write-once fields are changed
+ %% Fail if write-once fields are changed
case maps:with([type],NewHConfig) of
WriteOnce ->
- changing_config1(maps:get(handler_pid,OldHConfig),
- OldConfig,
- NewConfig#{config=>NewHConfig});
+ check_config(NewHConfig);
Other ->
- {error,{illegal_config_change,#{config=>WriteOnce},#{config=>Other}}}
+ {error,{illegal_config_change,?MODULE,WriteOnce,Other}}
end.
-changing_config1(HPid, OldConfig, NewConfig) ->
- case check_config(changing, NewConfig) of
- Result = {ok,NewConfig1} ->
- try gen_server:call(HPid, {change_config,OldConfig,NewConfig1},
- ?DEFAULT_CALL_TIMEOUT) of
- ok -> Result;
- HError -> HError
- catch
- _:{timeout,_} -> {error,handler_busy}
- end;
- Error ->
- Error
- end.
-
-check_config(adding, Config) ->
- %% Merge in defaults on handler level
- HConfig0 = maps:get(config, Config, #{}),
- HConfig = maps:merge(get_default_config(),HConfig0),
+check_config(#{type:=Type}=HConfig) ->
case check_h_config(maps:to_list(HConfig)) of
+ ok when is_atom(Type) ->
+ {ok,HConfig#{filesync_repeat_interval=>no_repeat}};
ok ->
- {ok,Config#{config=>HConfig}};
- Error ->
- Error
- end;
-check_config(changing, Config) ->
- HConfig = maps:get(config, Config, #{}),
- case check_h_config(maps:to_list(HConfig)) of
- ok -> {ok,Config};
- Error -> Error
+ {ok,HConfig};
+ {error,{Key,Value}} ->
+ {error,{invalid_config,?MODULE,#{Key=>Value}}}
end.
check_h_config([{type,Type} | Config]) when Type == standard_io;
@@ -200,219 +171,40 @@ check_h_config([{type,{file,File}} | Config]) when is_list(File) ->
check_h_config([{type,{file,File,Modes}} | Config]) when is_list(File),
is_list(Modes) ->
check_h_config(Config);
-check_h_config([Other | Config]) ->
- case logger_h_common:check_common_config(Other) of
- valid ->
- check_h_config(Config);
- invalid ->
- {error,{invalid_config,?MODULE,Other}}
- end;
+check_h_config([Other | _]) ->
+ {error,Other};
check_h_config([]) ->
ok.
-
-%%%-----------------------------------------------------------------
-%%% Handler being removed
-removing_handler(#{id:=Name}) ->
- stop(Name).
-
-%%%-----------------------------------------------------------------
-%%% Log a string or report
--spec log(LogEvent, Config) -> ok when
- LogEvent :: logger:log_event(),
- Config :: logger:handler_config().
-
-log(LogEvent, Config = #{id := Name,
- config := #{handler_pid := HPid,
- mode_tab := ModeTab}}) ->
- %% if the handler has crashed, we must drop this event
- %% and hope the handler restarts so we can try again
- true = is_process_alive(HPid),
- Bin = logger_h_common:log_to_binary(LogEvent, Config),
- logger_h_common:call_cast_or_drop(Name, HPid, ModeTab, Bin).
-
-%%%-----------------------------------------------------------------
-%%% Remove internal fields from configuration
-filter_config(#{config:=HConfig}=Config) ->
- Config#{config=>maps:without([handler_pid,mode_tab],HConfig)}.
-
-%%%===================================================================
-%%% gen_server callbacks
-%%%===================================================================
-
-init([Name, Config = #{config := HConfig},
- State0 = #{type := Type, file_ctrl_sync_int := FileCtrlSyncInt}]) ->
- RegName = ?name_to_reg_name(?MODULE,Name),
- register(RegName, self()),
- process_flag(trap_exit, true),
- process_flag(message_queue_data, off_heap),
-
- ?init_test_hooks(),
- ?start_observation(Name),
-
- case do_init(Name, Type) of
- {ok,InitState} ->
- try ets:new(Name, [public]) of
- ModeTab ->
- ?set_mode(ModeTab, async),
- State = maps:merge(State0, InitState),
- T0 = ?timestamp(),
- State1 =
- ?merge_with_stats(State#{
- mode_tab => ModeTab,
- mode => async,
- file_ctrl_sync => FileCtrlSyncInt,
- last_qlen => 0,
- last_log_ts => T0,
- last_op => sync,
- burst_win_ts => T0,
- burst_msg_count => 0}),
- Config1 =
- Config#{config => HConfig#{handler_pid => self(),
- mode_tab => ModeTab}},
- proc_lib:init_ack({ok,self(),Config1}),
- gen_server:cast(self(), repeated_filesync),
- gen_server:enter_loop(?MODULE, [], State1)
- catch
- _:Error ->
- unregister(RegName),
- logger_h_common:error_notify({init_handler,Name,Error}),
- proc_lib:init_ack(Error)
- end;
- Error ->
- unregister(RegName),
- logger_h_common:error_notify({init_handler,Name,Error}),
- proc_lib:init_ack(Error)
- end.
-
-do_init(Name, Type) ->
- case open_log_file(Name, Type) of
- {ok,FileCtrlPid} ->
- case logger_h_common:unset_restart_flag(Name, ?MODULE) of
- true ->
- %% inform about restart
- gen_server:cast(self(), {log_handler_info,
- "Handler ~p restarted",
- [Name]});
- false ->
- %% initial start
- ok
- end,
- {ok,#{id=>Name,type=>Type,file_ctrl_pid=>FileCtrlPid}};
- Error ->
- Error
- end.
-
-%% This is the synchronous log event.
-handle_call({log, Bin}, _From, State) ->
- {Result,State1} = do_log(Bin, call, State),
- %% Result == ok | dropped
- {reply,Result, State1};
-
-handle_call(filesync, _From, State = #{type := Type,
- file_ctrl_pid := FileCtrlPid}) ->
- if is_atom(Type) ->
- {reply, ok, State};
- true ->
- {reply, file_ctrl_filesync_sync(FileCtrlPid), State#{last_op=>sync}}
- end;
-
-handle_call({change_config,_OldConfig,NewConfig}, _From,
- State = #{filesync_repeat_interval := FSyncInt0}) ->
- HConfig = maps:get(config, NewConfig, #{}),
- State1 = maps:merge(State, HConfig),
- case logger_h_common:overload_levels_ok(State1) of
- true ->
- _ =
- case maps:get(filesync_repeat_interval, HConfig, undefined) of
- undefined ->
- ok;
- no_repeat ->
- _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
- State,
- undefined));
- FSyncInt0 ->
- ok;
- _FSyncInt1 ->
- _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
- State,
- undefined)),
- gen_server:cast(self(), repeated_filesync)
- end,
- {reply, ok, State1};
- false ->
- #{sync_mode_qlen := SMQL,
- drop_mode_qlen := DMQL,
- flush_qlen := FQL} = State1,
- {reply, {error,{invalid_levels,{SMQL,DMQL,FQL}}}, State}
- end;
-
-handle_call(info, _From, State) ->
- {reply, State, State};
-
-handle_call(reset, _From, State) ->
- State1 = ?merge_with_stats(State),
- {reply, ok, State1#{last_qlen => 0,
- last_log_ts => ?timestamp()}};
-
-handle_call(stop, _From, State) ->
- {stop, {shutdown,stopped}, ok, State}.
-
-%% This is the asynchronous log event.
-handle_cast({log, Bin}, State) ->
- {_,State1} = do_log(Bin, cast, State),
- {noreply, State1};
-
-handle_cast({log_handler_info, Format, Args}, State = #{id:=Name}) ->
- log_handler_info(Name, Format, Args, State),
- {noreply, State};
-
-%% If FILESYNC_REPEAT_INTERVAL is set to a millisec value, this
-%% clause gets called repeatedly by the handler. In order to
-%% guarantee that a filesync *always* happens after the last log
-%% event, the repeat operation must be active!
-handle_cast(repeated_filesync,
- State = #{type := Type,
- file_ctrl_pid := FileCtrlPid,
- filesync_repeat_interval := FSyncInt,
- last_op := LastOp}) ->
- State1 =
- if not is_atom(Type), is_integer(FSyncInt) ->
- %% only do filesync if something has been
- %% written since last time we checked
- if LastOp == sync ->
- ok;
- true ->
- file_ctrl_filesync_async(FileCtrlPid)
- end,
- {ok,TRef} =
- timer:apply_after(FSyncInt, gen_server,cast,
- [self(),repeated_filesync]),
- State#{rep_sync_tref => TRef, last_op => sync};
- true ->
- State
- end,
- {noreply,State1}.
-
-handle_info({'EXIT',Pid,Why}, State = #{id := Name, type := FileInfo}) ->
- case maps:get(file_ctrl_pid, State, undefined) of
- Pid ->
- %% file error, terminate handler
- logger_h_common:handler_exit(Name,
- {error,{write_failed,FileInfo,Why}});
- _Other ->
- %% ignore EXIT
- ok
- end,
- {noreply, State};
-
-handle_info(_Info, State) ->
- {noreply, State}.
-
-terminate(Reason, State = #{id:=Name, file_ctrl_pid:=FWPid,
- type:=_FileInfo}) ->
- _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, State,
- undefined)),
+get_default_config() ->
+ #{type => standard_io}.
+
+filesync(_Name, _Mode, #{type := Type}=State) when is_atom(Type) ->
+ {ok,State};
+filesync(_Name, async, #{file_ctrl_pid := FileCtrlPid} = State) ->
+ ok = file_ctrl_filesync_async(FileCtrlPid),
+ {ok,State};
+filesync(_Name, sync, #{file_ctrl_pid := FileCtrlPid} = State) ->
+ Result = file_ctrl_filesync_sync(FileCtrlPid),
+ {Result,State}.
+
+write(_Name, async, Bin, #{file_ctrl_pid:=FileCtrlPid} = State) ->
+ ok = file_write_async(FileCtrlPid, Bin),
+ {ok,State};
+write(_Name, sync, Bin, #{file_ctrl_pid:=FileCtrlPid} = State) ->
+ Result = file_write_sync(FileCtrlPid, Bin),
+ {Result,State}.
+
+reset_state(_Name, State) ->
+ State.
+
+handle_info(_Name, {'EXIT',Pid,Why}, #{type := FileInfo, file_ctrl_pid := Pid}) ->
+ %% file_ctrl_pid died, file error, terminate handler
+ exit({error,{write_failed,FileInfo,Why}});
+handle_info(_, _, State) ->
+ State.
+
+terminate(_Name, _Reason, #{file_ctrl_pid:=FWPid}) ->
case is_process_alive(FWPid) of
true ->
unlink(FWPid),
@@ -423,17 +215,12 @@ terminate(Reason, State = #{id:=Name, file_ctrl_pid:=FWPid,
ok
after
?DEFAULT_CALL_TIMEOUT ->
- exit(FWPid, kill)
+ exit(FWPid, kill),
+ ok
end;
false ->
ok
- end,
- ok = logger_h_common:stop_or_restart(Name, Reason, State),
- unregister(?name_to_reg_name(?MODULE, Name)),
- ok.
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
+ end.
%%%===================================================================
%%% Internal functions
@@ -441,203 +228,6 @@ code_change(_OldVsn, State, _Extra) ->
%%%-----------------------------------------------------------------
%%%
-get_default_config() ->
- #{type => standard_io,
- sync_mode_qlen => ?SYNC_MODE_QLEN,
- drop_mode_qlen => ?DROP_MODE_QLEN,
- flush_qlen => ?FLUSH_QLEN,
- burst_limit_enable => ?BURST_LIMIT_ENABLE,
- burst_limit_max_count => ?BURST_LIMIT_MAX_COUNT,
- burst_limit_window_time => ?BURST_LIMIT_WINDOW_TIME,
- overload_kill_enable => ?OVERLOAD_KILL_ENABLE,
- overload_kill_qlen => ?OVERLOAD_KILL_QLEN,
- overload_kill_mem_size => ?OVERLOAD_KILL_MEM_SIZE,
- overload_kill_restart_after => ?OVERLOAD_KILL_RESTART_AFTER,
- filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}.
-
-get_init_state() ->
- #{file_ctrl_sync_int => ?CONTROLLER_SYNC_INTERVAL,
- filesync_ok_qlen => ?FILESYNC_OK_QLEN}.
-
-%%%-----------------------------------------------------------------
-%%% Add a standard handler to the logger.
-%%% This starts a dedicated handler process which should always
-%%% exist if the handler is registered with logger (and should not
-%%% exist if the handler is not registered).
-%%%
-%%% Handler specific config should be provided with a sub map associated
-%%% with a key named 'config', e.g:
-%%%
-%%% Config = #{config => #{sync_mode_qlen => 50}
-%%%
-%%% The standard handler process is linked to logger_sup, which is
-%%% part of the kernel application's supervision tree.
-start(Name, Config, HandlerState) ->
- LoggerStdH =
- #{id => Name,
- start => {?MODULE, start_link, [Name,Config,HandlerState]},
- restart => temporary,
- shutdown => 2000,
- type => worker,
- modules => [?MODULE]},
- case supervisor:start_child(logger_sup, LoggerStdH) of
- {ok,Pid,Config1} ->
- ok = logger_handler_watcher:register_handler(Name,Pid),
- {ok,Config1};
- Error ->
- Error
- end.
-
-%%%-----------------------------------------------------------------
-%%% Stop and remove the handler.
-stop(Name) ->
- case whereis(?name_to_reg_name(?MODULE,Name)) of
- undefined ->
- ok;
- Pid ->
- %% We don't want to do supervisor:terminate_child here
- %% since we need to distinguish this explicit stop from a
- %% system termination in order to avoid circular attempts
- %% at removing the handler (implying deadlocks and
- %% timeouts).
- %% And we don't need to do supervisor:delete_child, since
- %% the restart type is temporary, which means that the
- %% child specification is automatically removed from the
- %% supervisor when the process dies.
- _ = gen_server:call(Pid, stop),
- ok
- end.
-
-%%%-----------------------------------------------------------------
-%%% Logging and overload control.
--define(update_file_ctrl_sync(C, Interval),
- if C == 0 -> Interval;
- true -> C-1 end).
-
-%% check for overload between every event (and set Mode to async,
-%% sync or drop accordingly), but never flush the whole mailbox
-%% before LogWindowSize events have been handled
-do_log(Bin, CallOrCast, State = #{id:=Name, mode:=Mode0}) ->
- T1 = ?timestamp(),
-
- %% check if the handler is getting overloaded, or if it's
- %% recovering from overload (the check must be done for each
- %% event to react quickly to large bursts of events and
- %% to ensure that the handler can never end up in drop mode
- %% with an empty mailbox, which would stop operation)
- {Mode1,QLen,Mem,State1} = logger_h_common:check_load(State),
-
- if (Mode1 == drop) andalso (Mode0 =/= drop) ->
- log_handler_info(Name, "Handler ~p switched to drop mode",
- [Name], State);
- (Mode0 == drop) andalso ((Mode1 == async) orelse (Mode1 == sync)) ->
- log_handler_info(Name, "Handler ~p switched to ~w mode",
- [Name,Mode1], State);
- true ->
- ok
- end,
-
- %% kill the handler if it can't keep up with the load
- logger_h_common:kill_if_choked(Name, QLen, Mem, ?MODULE, State),
-
- if Mode1 == flush ->
- flush(Name, QLen, T1, State1);
- true ->
- write(Name, Mode1, T1, Bin, CallOrCast, State1)
- end.
-
-%% this clause is called by do_log/3 after an overload check
-%% has been performed, where QLen > FlushQLen
-flush(Name, _QLen0, T1, State=#{last_log_ts := _T0, mode_tab := ModeTab}) ->
- %% flush messages in the mailbox (a limited number in
- %% order to not cause long delays)
- NewFlushed = logger_h_common:flush_log_events(?FLUSH_MAX_N),
-
- %% write info in log about flushed messages
- log_handler_info(Name, "Handler ~p flushed ~w log events",
- [Name,NewFlushed], State),
-
- %% because of the receive loop when flushing messages, the
- %% handler will be scheduled out often and the mailbox could
- %% grow very large, so we'd better check the queue again here
- {_,_QLen1} = process_info(self(), message_queue_len),
- ?observe(Name,{max_qlen,_QLen1}),
-
- %% Add 1 for the current log event
- ?observe(Name,{flushed,NewFlushed+1}),
-
- State1 = ?update_max_time(?diff_time(T1,_T0),State),
- {dropped,?update_other(flushed,FLUSHED,NewFlushed,
- State1#{mode => ?set_mode(ModeTab,async),
- last_qlen => 0,
- last_log_ts => T1})}.
-
-%% this clause is called to write to file
-write(_Name, Mode, T1, Bin, _CallOrCast,
- State = #{mode_tab := ModeTab,
- file_ctrl_pid := FileCtrlPid,
- file_ctrl_sync := FileCtrlSync,
- last_qlen := LastQLen,
- last_log_ts := T0,
- file_ctrl_sync_int := FileCtrlSyncInt}) ->
- %% check if we need to limit the number of writes
- %% during a burst of log events
- {DoWrite,BurstWinT,BurstMsgCount} = logger_h_common:limit_burst(State),
-
- %% only send a synhrounous event to the file controller process
- %% every FileCtrlSyncInt time, to give the handler time between
- %% file writes so it can keep up with incoming messages
- {Result,LastQLen1} =
- if DoWrite, FileCtrlSync == 0 ->
- ?observe(_Name,{_CallOrCast,1}),
- file_write_sync(FileCtrlPid, Bin, false),
- {ok,element(2, process_info(self(), message_queue_len))};
- DoWrite ->
- ?observe(_Name,{_CallOrCast,1}),
- file_write_async(FileCtrlPid, Bin),
- {ok,LastQLen};
- not DoWrite ->
- ?observe(_Name,{flushed,1}),
- {dropped,LastQLen}
- end,
-
- %% Check if the time since the previous log event is long enough -
- %% and the queue length small enough - to assume the mailbox has
- %% been emptied, and if so, do filesync operation and reset mode to
- %% async. Note that this is the best we can do to detect an idle
- %% handler without setting a timer after each log call/cast. If the
- %% time between two consecutive log events is fast and no new
- %% event comes in after the last one, idle state won't be detected!
- Time = ?diff_time(T1,T0),
- {Mode1,BurstMsgCount1} =
- if (LastQLen1 < ?FILESYNC_OK_QLEN) andalso
- (Time > ?IDLE_DETECT_TIME_USEC) ->
- %% do filesync if necessary
- case maps:get(type, State) of
- Std when is_atom(Std) ->
- ok;
- _File ->
- file_ctrl_filesync_async(FileCtrlPid)
- end,
- {?change_mode(ModeTab, Mode, async),0};
- true ->
- {Mode,BurstMsgCount}
- end,
- State1 =
- ?update_calls_or_casts(_CallOrCast,1,State),
- State2 =
- ?update_max_time(Time,
- State1#{mode => Mode1,
- last_qlen := LastQLen1,
- last_log_ts => T1,
- last_op => write,
- burst_win_ts => BurstWinT,
- burst_msg_count => BurstMsgCount1,
- file_ctrl_sync =>
- ?update_file_ctrl_sync(FileCtrlSync,
- FileCtrlSyncInt)}),
- {Result,State2}.
-
open_log_file(HandlerName, FileInfo) ->
case file_ctrl_start(HandlerName, FileInfo) of
OK = {ok,_FileCtrlPid} -> OK;
@@ -669,19 +259,6 @@ close_log_file(Fd) ->
_ = file:close(Fd).
-log_handler_info(Name, Format, Args, #{file_ctrl_pid := FileCtrlPid}) ->
- Config =
- case logger:get_handler_config(Name) of
- {ok,Conf} -> Conf;
- _ -> #{formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}
- end,
- Meta = #{time=>erlang:system_time(microsecond)},
- Bin = logger_h_common:log_to_binary(#{level => notice,
- msg => {Format,Args},
- meta => Meta}, Config),
- _ = file_write_async(FileCtrlPid, Bin),
- ok.
-
%%%-----------------------------------------------------------------
%%% File control process
@@ -708,24 +285,19 @@ file_write_async(Pid, Bin) ->
Pid ! {log,Bin},
ok.
-file_write_sync(Pid, Bin, FileSync) ->
- case file_ctrl_call(Pid, {log,self(),Bin,FileSync}) of
- {error,Reason} ->
- {error,{write_failed,Bin,Reason}};
- Result ->
- Result
- end.
+file_write_sync(Pid, Bin) ->
+ file_ctrl_call(Pid, {log,Bin}).
file_ctrl_filesync_async(Pid) ->
Pid ! filesync,
ok.
file_ctrl_filesync_sync(Pid) ->
- file_ctrl_call(Pid, {filesync,self()}).
+ file_ctrl_call(Pid, filesync).
file_ctrl_call(Pid, Msg) ->
MRef = monitor(process, Pid),
- Pid ! {Msg,MRef},
+ Pid ! {Msg,{self(),MRef}},
receive
{MRef,Result} ->
demonitor(MRef, [flush]),
@@ -743,74 +315,40 @@ file_ctrl_init(HandlerName, FileInfo, Starter) when is_tuple(FileInfo) ->
case do_open_log_file(FileInfo) of
{ok,Fd} ->
Starter ! {self(),ok},
- file_ctrl_loop(Fd, file, FileName, false, ok, ok, HandlerName);
+ file_ctrl_loop(Fd, FileName, false, ok, ok, HandlerName);
{error,Reason} ->
Starter ! {self(),{error,{open_failed,FileName,Reason}}}
end;
file_ctrl_init(HandlerName, StdDev, Starter) ->
Starter ! {self(),ok},
- file_ctrl_loop(StdDev, standard_io, StdDev, false, ok, ok, HandlerName).
+ file_ctrl_loop(StdDev, StdDev, false, ok, ok, HandlerName).
-file_ctrl_loop(Fd, Type, DevName, Synced,
+file_ctrl_loop(Fd, DevName, Synced,
PrevWriteResult, PrevSyncResult, HandlerName) ->
receive
%% asynchronous event
{log,Bin} ->
- Result = if Type == file ->
- write_to_dev(Fd, Bin, DevName,
- PrevWriteResult, HandlerName);
- true ->
- io:put_chars(Fd, Bin)
- end,
- file_ctrl_loop(Fd, Type, DevName, false,
+ Result = write_to_dev(Fd, Bin, DevName, PrevWriteResult, HandlerName),
+ file_ctrl_loop(Fd, DevName, false,
Result, PrevSyncResult, HandlerName);
%% synchronous event
- {{log,From,Bin,FileSync},MRef} ->
- if Type == file ->
- %% check that file hasn't been deleted
- CheckFile =
- fun() -> {ok,_} = file:read_file_info(DevName) end,
- spawn_link(CheckFile),
- WResult = write_to_dev(Fd, Bin, DevName,
- PrevWriteResult, HandlerName),
- {Synced1,SResult} =
- if not FileSync ->
- {false,PrevSyncResult};
- true ->
- case sync_dev(Fd, DevName,
- PrevSyncResult, HandlerName) of
- ok -> {true,ok};
- Error -> {false,Error}
- end
- end,
- From ! {MRef,ok},
- file_ctrl_loop(Fd, Type, DevName, Synced1,
- WResult, SResult, HandlerName);
- true ->
- _ = io:put_chars(Fd, Bin),
- From ! {MRef,ok},
- file_ctrl_loop(Fd, Type, DevName, false,
- ok, PrevSyncResult, HandlerName)
- end;
+ {{log,Bin},{From,MRef}} ->
+ check_exist(Fd, DevName),
+ Result = write_to_dev(Fd, Bin, DevName, PrevWriteResult, HandlerName),
+ From ! {MRef,ok},
+ file_ctrl_loop(Fd, DevName, false,
+ Result, PrevSyncResult, HandlerName);
- filesync when not Synced ->
- Result = sync_dev(Fd, DevName, PrevSyncResult, HandlerName),
- file_ctrl_loop(Fd, Type, DevName, true,
+ filesync ->
+ Result = sync_dev(Fd, DevName, Synced, PrevSyncResult, HandlerName),
+ file_ctrl_loop(Fd, DevName, true,
PrevWriteResult, Result, HandlerName);
- filesync ->
- file_ctrl_loop(Fd, Type, DevName, true,
- PrevWriteResult, PrevSyncResult, HandlerName);
-
- {{filesync,From},MRef} ->
- Result = if not Synced ->
- sync_dev(Fd, DevName, PrevSyncResult, HandlerName);
- true ->
- ok
- end,
+ {filesync,{From,MRef}} ->
+ Result = sync_dev(Fd, DevName, Synced, PrevSyncResult, HandlerName),
From ! {MRef,ok},
- file_ctrl_loop(Fd, Type, DevName, true,
+ file_ctrl_loop(Fd, DevName, true,
PrevWriteResult, Result, HandlerName);
stop ->
@@ -818,26 +356,30 @@ file_ctrl_loop(Fd, Type, DevName, Synced,
stopped
end.
+check_exist(DevName, DevName) when is_atom(DevName) ->
+ ok;
+check_exist(_Fd, FileName) ->
+ _ = spawn_link(fun() -> {ok,_} = file:read_file_info(FileName) end),
+ ok.
+
+write_to_dev(DevName, Bin, _DevName, _PrevWriteResult, _HandlerName)
+ when is_atom(DevName) ->
+ io:put_chars(DevName, Bin);
write_to_dev(Fd, Bin, FileName, PrevWriteResult, HandlerName) ->
- case ?file_write(Fd, Bin) of
- ok ->
- ok;
- PrevWriteResult ->
- %% don't report same error twice
- PrevWriteResult;
- Error ->
- logger_h_common:error_notify({HandlerName,write,FileName,Error}),
- Error
- end.
+ Result = ?file_write(Fd, Bin),
+ maybe_notify_error(write,Result,PrevWriteResult,FileName,HandlerName).
-sync_dev(Fd, DevName, PrevSyncResult, HandlerName) ->
- case ?file_datasync(Fd) of
- ok ->
- ok;
- PrevSyncResult ->
- %% don't report same error twice
- PrevSyncResult;
- Error ->
- logger_h_common:error_notify({HandlerName,filesync,DevName,Error}),
- Error
- end.
+sync_dev(_Fd, _FileName, true, PrevSyncResult, _HandlerName) ->
+ PrevSyncResult;
+sync_dev(Fd, FileName, false, PrevSyncResult, HandlerName) ->
+ Result = ?file_datasync(Fd),
+ maybe_notify_error(filesync,Result,PrevSyncResult,FileName,HandlerName).
+
+maybe_notify_error(_Op, ok, _PrevResult, _FileName, _HandlerName) ->
+ ok;
+maybe_notify_error(_Op, PrevResult, PrevResult, _FileName, _HandlerName) ->
+ %% don't report same error twice
+ PrevResult;
+maybe_notify_error(Op, Error, _PrevResult, FileName, HandlerName) ->
+ logger_h_common:error_notify({HandlerName,Op,FileName,Error}),
+ Error.
diff --git a/lib/kernel/src/net_kernel.erl b/lib/kernel/src/net_kernel.erl
index 3cf11fd7b1..a9dc77837e 100644
--- a/lib/kernel/src/net_kernel.erl
+++ b/lib/kernel/src/net_kernel.erl
@@ -808,7 +808,8 @@ handle_info({AcceptPid, {accept_pending,MyNode,Node,Address,Type}}, State) ->
_:_ ->
error_logger:error_msg("~n** Cannot get connection id for node ~w~n",
[Node]),
- AcceptPid ! {self(),{accept_pending,nok_pending}}
+ AcceptPid ! {self(),{accept_pending,nok_pending}},
+ {noreply, State}
end
end;
diff --git a/lib/kernel/test/logger_disk_log_h_SUITE.erl b/lib/kernel/test/logger_disk_log_h_SUITE.erl
index 905c2c52c5..87b8250781 100644
--- a/lib/kernel/test/logger_disk_log_h_SUITE.erl
+++ b/lib/kernel/test/logger_disk_log_h_SUITE.erl
@@ -332,16 +332,18 @@ errors(Config) ->
%%! Check how bad log_opts are handled!
{error,{illegal_config_change,
- #{config:=#{type:=wrap}},
- #{config:=#{type:=halt}}}} =
+ logger_disk_log_h,
+ #{type:=wrap},
+ #{type:=halt}}} =
logger:update_handler_config(Name1,
config,
#{type=>halt,
file=>LogFile1}),
{error,{illegal_config_change,
- #{config:=#{file:=LogFile1}},
- #{config:=#{file:="newfilename"}}}} =
+ logger_disk_log_h,
+ #{file:=LogFile1},
+ #{file:="newfilename"}}} =
logger:update_handler_config(Name1,
config,
#{file=>"newfilename"}),
@@ -411,20 +413,25 @@ formatter_fail(cleanup,_Config) ->
ok.
config_fail(_Config) ->
- {error,{handler_not_added,{invalid_config,logger_disk_log_h,{bad,bad}}}} =
+ {error,{handler_not_added,{invalid_config,logger_disk_log_h,#{bad:=bad}}}} =
logger:add_handler(?MODULE,logger_disk_log_h,
#{config => #{bad => bad},
filter_default=>log,
formatter=>{?MODULE,self()}}),
- {error,{handler_not_added,{invalid_levels,{_,1,_}}}} =
+ {error,{handler_not_added,{invalid_config,logger_disk_log_h,
+ {invalid_levels,#{drop_mode_qlen:=1}}}}} =
logger:add_handler(?MODULE,logger_disk_log_h,
#{config => #{drop_mode_qlen=>1}}),
- {error,{handler_not_added,{invalid_levels,{43,42,_}}}} =
+ {error,{handler_not_added,{invalid_config,logger_disk_log_h,
+ {invalid_levels,#{sync_mode_qlen:=43,
+ drop_mode_qlen:=42}}}}} =
logger:add_handler(?MODULE,logger_disk_log_h,
#{config => #{sync_mode_qlen=>43,
drop_mode_qlen=>42}}),
- {error,{handler_not_added,{invalid_levels,{_,43,42}}}} =
+ {error,{handler_not_added,{invalid_config,logger_disk_log_h,
+ {invalid_levels,#{drop_mode_qlen:=43,
+ flush_qlen:=42}}}}} =
logger:add_handler(?MODULE,logger_disk_log_h,
#{config => #{drop_mode_qlen=>43,
flush_qlen=>42}}),
@@ -433,20 +440,17 @@ config_fail(_Config) ->
#{filter_default=>log,
formatter=>{?MODULE,self()}}),
%% can't change the disk log options for a log already in use
- {error,{illegal_config_change,_,_}} =
+ {error,{illegal_config_change,logger_disk_log_h,_,_}} =
logger:update_handler_config(?MODULE,config,
#{max_no_files=>2}),
- %% can't change name of an existing handler
- {error,{illegal_config_change,_,_}} =
- logger:update_handler_config(?MODULE,id,bad),
%% incorrect values of OP params
{ok,#{config := HConfig}} = logger:get_handler_config(?MODULE),
- {error,{invalid_levels,_}} =
+ {error,{invalid_config,logger_disk_log_h,{invalid_levels,_}}} =
logger:update_handler_config(?MODULE,config,
HConfig#{sync_mode_qlen=>100,
flush_qlen=>99}),
%% invalid name of config parameter
- {error,{invalid_config,logger_disk_log_h,{filesync_rep_int,2000}}} =
+ {error,{invalid_config,logger_disk_log_h,#{filesync_rep_int:=2000}}} =
logger:update_handler_config(?MODULE, config,
HConfig#{filesync_rep_int => 2000}),
ok.
@@ -487,10 +491,11 @@ reconfig(Config) ->
overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL,
- log_opts := #{type := ?DISK_LOG_TYPE,
- max_no_files := ?DISK_LOG_MAX_NO_FILES,
- max_no_bytes := ?DISK_LOG_MAX_NO_BYTES,
- file := DiskLogFile}} =
+ handler_state :=
+ #{log_opts := #{type := ?DISK_LOG_TYPE,
+ max_no_files := ?DISK_LOG_MAX_NO_FILES,
+ max_no_bytes := ?DISK_LOG_MAX_NO_BYTES,
+ file := DiskLogFile}}} =
logger_disk_log_h:info(?MODULE),
{ok,#{config :=
#{sync_mode_qlen := ?SYNC_MODE_QLEN,
@@ -572,10 +577,11 @@ reconfig(Config) ->
max_no_files => 1,
max_no_bytes => 1024,
file => File}}),
- #{log_opts := #{type := halt,
- max_no_files := 1,
- max_no_bytes := 1024,
- file := File}} =
+ #{handler_state :=
+ #{log_opts := #{type := halt,
+ max_no_files := 1,
+ max_no_bytes := 1024,
+ file := File}}} =
logger_disk_log_h:info(?MODULE),
{ok,#{config :=
#{type := halt,
@@ -596,13 +602,13 @@ reconfig(Config) ->
%% You are not allowed to actively set the write once fields
%% (type, max_no_files, max_no_bytes, file) in runtime.
- {error, {illegal_config_change,_,_}} =
+ {error, {illegal_config_change,_,_,_}} =
logger:set_handler_config(?MODULE,config,#{type=>wrap}),
- {error, {illegal_config_change,_,_}} =
+ {error, {illegal_config_change,_,_,_}} =
logger:set_handler_config(?MODULE,config,#{max_no_files=>2}),
- {error, {illegal_config_change,_,_}} =
+ {error, {illegal_config_change,_,_,_}} =
logger:set_handler_config(?MODULE,config,#{max_no_bytes=>2048}),
- {error, {illegal_config_change,_,_}} =
+ {error, {illegal_config_change,_,_,_}} =
logger:set_handler_config(?MODULE,config,#{file=>"otherfile.log"}),
{ok,C7} = logger:get_handler_config(?MODULE),
ct:log("C7: ~p",[C7]),
@@ -630,16 +636,16 @@ sync(Config) ->
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,nl}}),
- start_tracer([{disk_log,blog,2},
- {logger_disk_log_h,disk_log_sync,2}],
- [{disk_log,blog,<<"first\n">>},
- {logger_disk_log_h,disk_log_sync}]),
+ start_tracer([{logger_disk_log_h,disk_log_write,3},
+ {disk_log,sync,1}],
+ [{logger_disk_log_h,disk_log_write,<<"first\n">>},
+ {disk_log,sync}]),
logger:notice("first", ?domain),
%% wait for automatic disk_log_sync
check_tracer(?FILESYNC_REPEAT_INTERVAL*2),
- %% check that if there's no repeated disk_log_sync active,
+ %% check that if there's no repeated filesync active,
%% a disk_log_sync is still performed when handler goes idle
{ok,#{config := HConfig}} = logger:get_handler_config(?MODULE),
HConfig1 = HConfig#{filesync_repeat_interval => no_repeat},
@@ -652,12 +658,12 @@ sync(Config) ->
%% triggered by the idle timeout between "fourth" and "fifth".
timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
- start_tracer([{disk_log,blog,2},
- {logger_disk_log_h,disk_log_sync,2}],
- [{disk_log,blog,<<"second\n">>},
- {logger_disk_log_h,disk_log_sync},
- {disk_log,blog,<<"third\n">>},
- {logger_disk_log_h,disk_log_sync}]),
+ start_tracer([{logger_disk_log_h,disk_log_write,3},
+ {disk_log,sync,1}],
+ [{logger_disk_log_h,disk_log_write,<<"second\n">>},
+ {disk_log,sync},
+ {logger_disk_log_h,disk_log_write,<<"third\n">>},
+ {disk_log,sync}]),
logger:notice("second", ?domain),
timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
@@ -667,13 +673,13 @@ sync(Config) ->
try_read_file(Log, {ok,<<"first\nsecond\nthird\n">>}, 1000),
- %% switch repeated disk_log_sync on and verify that the looping works
+ %% switch repeated filesync on and verify that the looping works
SyncInt = 1000,
WaitT = 4500,
- OneSync = {logger_disk_log_h,handle_cast,repeated_disk_log_sync},
- %% receive 1 initial repeated_disk_log_sync, then 1 per sec
- start_tracer([{logger_disk_log_h,handle_cast,2}],
- [OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]),
+ OneSync = {logger_h_common,handle_cast,repeated_filesync},
+ %% receive 1 repeated_filesync per sec
+ start_tracer([{logger_h_common,handle_cast,2}],
+ [OneSync || _ <- lists:seq(1, trunc(WaitT/SyncInt))]),
HConfig2 = HConfig#{filesync_repeat_interval => SyncInt},
ok = logger:update_handler_config(?MODULE, config, HConfig2),
@@ -716,7 +722,7 @@ disk_log_wrap(Config) ->
end,
{ok,_} = dbg:tracer(process, {TraceFun, Tester}),
{ok,_} = dbg:p(whereis(h_proc_name()), [c]),
- {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 3, []),
Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,MaxBytes)],
ct:pal("String = ~p (~w)", [Text, erts_debug:size(Text)]),
@@ -734,7 +740,7 @@ disk_log_wrap(Config) ->
timer:sleep(1000),
dbg:stop_clear(),
Received = lists:flatmap(fun({trace,_M,handle_info,
- [{disk_log,_Node,_Name,What},_]}) ->
+ [_,{disk_log,_Node,_Name,What},_]}) ->
[{trace,What}];
({log,_}) ->
[]
@@ -770,7 +776,7 @@ disk_log_full(Config) ->
end,
{ok,_} = dbg:tracer(process, {TraceFun, Tester}),
{ok,_} = dbg:p(whereis(h_proc_name()), [c]),
- {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 3, []),
NoOfChars = 5,
Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,NoOfChars)],
@@ -780,14 +786,18 @@ disk_log_full(Config) ->
timer:sleep(2000),
dbg:stop_clear(),
Received = lists:flatmap(fun({trace,_M,handle_info,
- [{disk_log,_Node,_Name,What},_]}) ->
+ [_,{disk_log,_Node,_Name,What},_]}) ->
[{trace,What}];
({log,_}) ->
[]
end, test_server:messages_get()),
ct:pal("Trace =~n~p", [Received]),
- [{trace,full},
- {trace,{error_status,{error,{full,_}}}}] = Received,
+
+ %% The tail here could be an error_status notification, if the
+ %% last write was synchronous, but in most cases it will not be
+ [{trace,full}|_] = Received,
+ %% [{trace,full},
+ %% {trace,{error_status,{error,{full,_}}}}] = Received,
ok.
disk_log_full(cleanup, _Config) ->
dbg:stop_clear(),
@@ -819,14 +829,14 @@ disk_log_events(Config) ->
end,
{ok,_} = dbg:tracer(process, {TraceFun, Tester}),
{ok,_} = dbg:p(whereis(h_proc_name()), [c]),
- {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 3, []),
[whereis(h_proc_name()) ! E || E <- Events],
%% wait for trace messages
timer:sleep(2000),
dbg:stop_clear(),
Received = lists:map(fun({trace,_M,handle_info,
- [Got,_]}) -> Got
+ [_,Got,_]}) -> Got
end, test_server:messages_get()),
ct:pal("Trace =~n~p", [Received]),
NoOfEvents = length(Events),
@@ -849,13 +859,15 @@ write_failure(Config) ->
false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
- rpc:call(Node, ?MODULE, set_result, [disk_log_blog,ok]),
+ rpc:call(Node, ?MODULE, set_result, [disk_log_write,ok]),
HState = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]),
- ct:pal("LogOpts = ~p", [LogOpts = maps:get(log_opts, HState)]),
+ ct:pal("LogOpts = ~p", [LogOpts = maps:get(log_opts,
+ maps:get(handler_state,HState))]),
+ %% ?check and ?check_no_log in this test only check for internal log events
ok = log_on_remote_node(Node, "Logged1"),
rpc:call(Node, logger_disk_log_h, filesync, [?STANDARD_HANDLER]),
- ?check_no_log,
+ ?check_no_log, % no internal log when write ok
SyncRepInt = case (fun() -> is_atom(?FILESYNC_REPEAT_INTERVAL) end)() of
true -> 5500;
@@ -864,24 +876,26 @@ write_failure(Config) ->
try_read_file(Log, {ok,<<"Logged1\n">>}, SyncRepInt),
- rpc:call(Node, ?MODULE, set_result, [disk_log_blog,{error,no_such_log}]),
+ rpc:call(Node, ?MODULE, set_result, [disk_log_write,{error,no_such_log}]),
ok = log_on_remote_node(Node, "Cause simple error printout"),
+ %% this should have caused an internal log
?check({error,{?STANDARD_HANDLER,log,LogOpts,{error,no_such_log}}}),
-
+
ok = log_on_remote_node(Node, "No second error printout"),
- ?check_no_log,
+ ?check_no_log, % but don't log same error twice
- rpc:call(Node, ?MODULE, set_result, [disk_log_blog,
+ rpc:call(Node, ?MODULE, set_result, [disk_log_write,
{error,{full,?STANDARD_HANDLER}}]),
ok = log_on_remote_node(Node, "Cause simple error printout"),
+ %% this was a different error, so it should be logged
?check({error,{?STANDARD_HANDLER,log,LogOpts,
{error,{full,?STANDARD_HANDLER}}}}),
- rpc:call(Node, ?MODULE, set_result, [disk_log_blog,ok]),
+ rpc:call(Node, ?MODULE, set_result, [disk_log_write,ok]),
ok = log_on_remote_node(Node, "Logged2"),
rpc:call(Node, logger_disk_log_h, filesync, [?STANDARD_HANDLER]),
- ?check_no_log,
+ ?check_no_log, % no internal log when write ok
try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, SyncRepInt),
ok.
write_failure(cleanup, _Config) ->
@@ -901,7 +915,7 @@ sync_failure(Config) ->
rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]),
HState = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]),
- LogOpts = maps:get(log_opts, HState),
+ LogOpts = maps:get(log_opts, maps:get(handler_state,HState)),
SyncInt = 500,
ok = rpc:call(Node, logger, update_handler_config,
@@ -1606,10 +1620,10 @@ tpl([{M,F,A}|Trace]) ->
tpl([]) ->
ok.
-tracer({trace,_,call,{logger_disk_log_h,handle_cast,[Op|_]},Caller},
+tracer({trace,_,call,{logger_h_common,handle_cast,[Op|_]},Caller},
{Pid,[{Mod,Func,Op}|Expected]}) ->
maybe_tracer_done(Pid,Expected,{Mod,Func,Op},Caller);
-tracer({trace,_,call,{Mod=disk_log,Func=blog,[_,Data]},Caller}, {Pid,[{Mod,Func,Data}|Expected]}) ->
+tracer({trace,_,call,{Mod=logger_disk_log_h,Func=disk_log_write,[_,_,Data]},Caller}, {Pid,[{Mod,Func,Data}|Expected]}) ->
maybe_tracer_done(Pid,Expected,{Mod,Func,Data},Caller);
tracer({trace,_,call,{Mod,Func,_},Caller}, {Pid,[{Mod,Func}|Expected]}) ->
maybe_tracer_done(Pid,Expected,{Mod,Func},Caller);
diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl
index b6a09f4980..a1159f280c 100644
--- a/lib/kernel/test/logger_std_h_SUITE.erl
+++ b/lib/kernel/test/logger_std_h_SUITE.erl
@@ -140,7 +140,7 @@ all() ->
].
add_remove_instance_tty(_Config) ->
- {error,{handler_not_added,{invalid_config,logger_std_h,{type,tty}}}} =
+ {error,{handler_not_added,{invalid_config,logger_std_h,#{type:=tty}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{type => tty},
filter_default=>log,
@@ -234,7 +234,7 @@ errors(Config) ->
{error,
{handler_not_added,
- {invalid_config,logger_std_h,{type,faulty_type}}}} =
+ {invalid_config,logger_std_h,#{type:=faulty_type}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{type => faulty_type}}),
@@ -308,25 +308,30 @@ formatter_fail(cleanup,_Config) ->
logger:remove_handler(?MODULE).
config_fail(_Config) ->
- {error,{handler_not_added,{invalid_config,logger_std_h,{bad,bad}}}} =
+ {error,{handler_not_added,{invalid_config,logger_std_h,#{bad:=bad}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{bad => bad},
filter_default=>log,
formatter=>{?MODULE,self()}}),
{error,{handler_not_added,{invalid_config,logger_std_h,
- {restart_type,bad}}}} =
+ #{restart_type:=bad}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{restart_type => bad},
filter_default=>log,
formatter=>{?MODULE,self()}}),
- {error,{handler_not_added,{invalid_levels,{_,1,_}}}} =
+ {error,{handler_not_added,{invalid_config,logger_std_h,
+ {invalid_levels,#{drop_mode_qlen:=1}}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{drop_mode_qlen=>1}}),
- {error,{handler_not_added,{invalid_levels,{43,42,_}}}} =
+ {error,{handler_not_added,{invalid_config,logger_std_h,
+ {invalid_levels,#{sync_mode_qlen:=43,
+ drop_mode_qlen:=42}}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{sync_mode_qlen=>43,
drop_mode_qlen=>42}}),
- {error,{handler_not_added,{invalid_levels,{_,43,42}}}} =
+ {error,{handler_not_added,{invalid_config,logger_std_h,
+ {invalid_levels,#{drop_mode_qlen:=43,
+ flush_qlen:=42}}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{drop_mode_qlen=>43,
flush_qlen=>42}}),
@@ -334,15 +339,15 @@ config_fail(_Config) ->
ok = logger:add_handler(?MODULE,logger_std_h,
#{filter_default=>log,
formatter=>{?MODULE,self()}}),
- {error,{illegal_config_change,#{config:=#{type:=_}},#{config:=#{type:=_}}}} =
+ {error,{illegal_config_change,logger_std_h,#{type:=_},#{type:=_}}} =
logger:set_handler_config(?MODULE,config,
#{type=>{file,"file"}}),
- {error,{invalid_levels,_}} =
+ {error,{invalid_config,logger_std_h,{invalid_levels,_}}} =
logger:set_handler_config(?MODULE,config,
#{sync_mode_qlen=>100,
flush_qlen=>99}),
- {error,{invalid_config,logger_std_h,{filesync_rep_int,2000}}} =
+ {error,{invalid_config,logger_std_h,#{filesync_rep_int:=2000}}} =
logger:set_handler_config(?MODULE, config,
#{filesync_rep_int => 2000}),
@@ -468,8 +473,8 @@ reconfig(Config) ->
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,self()}}),
#{id := ?MODULE,
- type := standard_io,
- file_ctrl_pid := FileCtrlPid,
+ handler_state := #{type := standard_io,
+ file_ctrl_pid := FileCtrlPid},
sync_mode_qlen := ?SYNC_MODE_QLEN,
drop_mode_qlen := ?DROP_MODE_QLEN,
flush_qlen := ?FLUSH_QLEN,
@@ -480,7 +485,7 @@ reconfig(Config) ->
overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
- filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL} = DefaultInfo =
+ filesync_repeat_interval := no_repeat} = DefaultInfo =
logger_std_h:info(?MODULE),
{ok,
@@ -496,7 +501,7 @@ reconfig(Config) ->
overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
- filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL} =
+ filesync_repeat_interval := no_repeat} =
DefaultHConf}}
= logger:get_handler_config(?MODULE),
@@ -511,10 +516,10 @@ reconfig(Config) ->
overload_kill_qlen => 100000,
overload_kill_mem_size => 10000000,
overload_kill_restart_after => infinity,
- filesync_repeat_interval => no_repeat}),
+ filesync_repeat_interval => 5000}),
#{id := ?MODULE,
- type := standard_io,
- file_ctrl_pid := FileCtrlPid,
+ handler_state := #{type := standard_io,
+ file_ctrl_pid := FileCtrlPid},
sync_mode_qlen := 1,
drop_mode_qlen := 2,
flush_qlen := 3,
@@ -584,7 +589,7 @@ reconfig(Config) ->
%% You are not allowed to actively set 'type' in runtime, since
%% this is a write once field.
- {error, {illegal_config_change,_,_}} =
+ {error, {illegal_config_change,logger_std_h,_,_}} =
logger:set_handler_config(?MODULE,config,#{type=>standard_io}),
{ok,#{config:=C6}} = logger:get_handler_config(?MODULE),
ct:log("C6: ~p",[C6]),
@@ -620,7 +625,7 @@ file_opts(Config) ->
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,self()}}),
- #{type := OkType} = logger_std_h:info(?MODULE),
+ #{handler_state := #{type := OkType}} = logger_std_h:info(?MODULE),
logger:notice(M1=?msg,?domain),
?check(M1),
B1 = ?bin(M1),
@@ -643,10 +648,8 @@ sync(Config) ->
%% check repeated filesync happens
start_tracer([{logger_std_h, write_to_dev, 5},
- {logger_std_h, sync_dev, 4},
{file, datasync, 1}],
[{logger_std_h, write_to_dev, <<"first\n">>},
- {logger_std_h, sync_dev},
{file,datasync}]),
logger:notice("first", ?domain),
@@ -655,10 +658,8 @@ sync(Config) ->
%% check that explicit filesync is only done once
start_tracer([{logger_std_h, write_to_dev, 5},
- {logger_std_h, sync_dev, 4},
{file, datasync, 1}],
[{logger_std_h, write_to_dev, <<"second\n">>},
- {logger_std_h, sync_dev},
{file,datasync},
{no_more,500}
]),
@@ -679,13 +680,10 @@ sync(Config) ->
%% triggered by the idle timeout between "thrid" and "fourth".
timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
start_tracer([{logger_std_h, write_to_dev, 5},
- {logger_std_h, sync_dev, 4},
{file, datasync, 1}],
[{logger_std_h, write_to_dev, <<"third\n">>},
- {logger_std_h, sync_dev},
{file,datasync},
{logger_std_h, write_to_dev, <<"fourth\n">>},
- {logger_std_h, sync_dev},
{file,datasync}]),
logger:notice("third", ?domain),
%% wait for automatic filesync
@@ -697,10 +695,10 @@ sync(Config) ->
%% switch repeated filesync on and verify that the looping works
SyncInt = 1000,
WaitT = 4500,
- OneSync = {logger_std_h,handle_cast,repeated_filesync},
- %% receive 1 initial repeated_filesync, then 1 per sec
- start_tracer([{logger_std_h,handle_cast,2}],
- [OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]),
+ OneSync = {logger_h_common,handle_cast,repeated_filesync},
+ %% receive 1 repeated_filesync per sec
+ start_tracer([{logger_h_common,handle_cast,2}],
+ [OneSync || _ <- lists:seq(1, trunc(WaitT/SyncInt))]),
ok = logger:update_handler_config(?MODULE, config,
#{filesync_repeat_interval => SyncInt}),
@@ -1533,7 +1531,7 @@ start_op_trace() ->
{ok,_} = dbg:p(self(), [c]),
MS1 = dbg:fun2ms(fun([_]) -> return_trace() end),
- {ok,_} = dbg:tp(logger_h_common, check_load, 1, MS1),
+ {ok,_} = dbg:tpl(logger_h_common, check_load, 1, MS1),
{ok,_} = dbg:tpl(logger_h_common, flush_log_requests, 2, []),
@@ -1607,7 +1605,9 @@ analyse(Msgs) ->
start_tracer(Trace,Expected) ->
Pid = self(),
- FileCtrlPid = maps:get(file_ctrl_pid, logger_std_h:info(?MODULE)),
+ FileCtrlPid = maps:get(file_ctrl_pid,
+ maps:get(handler_state,
+ logger_std_h:info(?MODULE))),
dbg:tracer(process,{fun tracer/2,{Pid,Expected}}),
dbg:p(whereis(h_proc_name()),[c]),
dbg:p(FileCtrlPid,[c]),
@@ -1628,7 +1628,7 @@ tpl([{M,F,A}|Trace]) ->
tpl([]) ->
ok.
-tracer({trace,_,call,{logger_std_h,handle_cast,[Op|_]}},
+tracer({trace,_,call,{logger_h_common,handle_cast,[Op|_]}},
{Pid,[{Mod,Func,Op}|Expected]}) ->
maybe_tracer_done(Pid,Expected,{Mod,Func,Op});
tracer({trace,_,call,{Mod=logger_std_h,Func=write_to_dev,[_,Data,_,_,_]}},
diff --git a/lib/kernel/vsn.mk b/lib/kernel/vsn.mk
index df95174c9f..35e6a16a49 100644
--- a/lib/kernel/vsn.mk
+++ b/lib/kernel/vsn.mk
@@ -1 +1 @@
-KERNEL_VSN = 6.1
+KERNEL_VSN = 6.1.1
diff --git a/lib/observer/src/crashdump_viewer.erl b/lib/observer/src/crashdump_viewer.erl
index 0627c15b1c..97bb344cbf 100644
--- a/lib/observer/src/crashdump_viewer.erl
+++ b/lib/observer/src/crashdump_viewer.erl
@@ -589,9 +589,9 @@ truncated_here(Tag) ->
case get(truncated) of
true ->
case get(last_tag) of
- Tag -> % Tag == {TagType,Id}
+ {Tag,_Pos} -> % Tag == {TagType,Id}
true;
- {Tag,_Id} ->
+ {{Tag,_Id},_Pos} ->
true;
_LastTag ->
truncated_earlier(Tag)
@@ -846,8 +846,8 @@ do_read_file(File) ->
case check_dump_version(Id) of
{ok,DumpVsn} ->
reset_tables(),
- insert_index(Tag,Id,N1+1),
- put_last_tag(Tag,""),
+ insert_index(Tag,Id,Pos=N1+1),
+ put_last_tag(Tag,"",Pos),
DecodeOpts = get_decode_opts(DumpVsn),
indexify(Fd,DecodeOpts,Rest,N1),
end_progress(),
@@ -915,23 +915,11 @@ indexify(Fd,DecodeOpts,Bin,N) ->
_ ->
insert_index(Tag,Id,NewPos)
end,
- case put_last_tag(Tag,Id) of
- {?proc_heap,LastId} ->
- [{_,LastPos}] = lookup_index(?proc_heap,LastId),
+ case put_last_tag(Tag,Id,NewPos) of
+ {{?proc_heap,LastId},LastPos} ->
ets:insert(cdv_heap_file_chars,{LastId,N+Start+1-LastPos});
- {?literals,[]} ->
- case get(truncated_reason) of
- undefined ->
- [{_,LastPos}] = lookup_index(?literals,[]),
- ets:insert(cdv_heap_file_chars,
- {literals,N+Start+1-LastPos});
- _ ->
- %% Literals are truncated. Make sure we never
- %% attempt to read in the literals. (Heaps that
- %% references literals will show markers for
- %% incomplete heaps, but will otherwise work.)
- delete_index(?literals, [])
- end;
+ {{?literals,[]},LastPos} ->
+ ets:insert(cdv_heap_file_chars,{literals,N+Start+1-LastPos});
_ -> ok
end,
indexify(Fd,DecodeOpts,Rest,N1);
@@ -973,10 +961,18 @@ tag(Fd,<<>>,N,Gat,Di,Now) ->
check_if_truncated() ->
case get(last_tag) of
- {?ende,_} ->
+ {{?ende,_},_} ->
put(truncated,false),
put(truncated_proc,false);
- TruncatedTag ->
+ {{?literals,[]},_} ->
+ put(truncated,true),
+ put(truncated_proc,false),
+ %% Literals are truncated. Make sure we never
+ %% attempt to read in the literals. (Heaps that
+ %% references literals will show markers for
+ %% incomplete heaps, but will otherwise work.)
+ delete_index(?literals, []);
+ {TruncatedTag,_} ->
put(truncated,true),
find_truncated_proc(TruncatedTag)
end.
@@ -984,7 +980,6 @@ check_if_truncated() ->
find_truncated_proc({Tag,_Id}) when Tag==?atoms;
Tag==?binary;
Tag==?instr_data;
- Tag==?literals;
Tag==?memory_status;
Tag==?memory_map ->
put(truncated_proc,false);
@@ -2870,12 +2865,12 @@ parse_heap_term("Yc"++Line0, Addr, DecodeOpts, D0) -> %Reference-counted binary.
SymbolicBin = {'#CDVBin',Start},
Term = cdvbin(Offset, Sz, SymbolicBin),
D1 = gb_trees:insert(Addr, Term, D0),
- D = gb_trees:insert(Binp, SymbolicBin, D1),
+ D = gb_trees:enter(Binp, SymbolicBin, D1),
{Term,Line,D};
[] ->
Term = '#CDVNonexistingBinary',
D1 = gb_trees:insert(Addr, Term, D0),
- D = gb_trees:insert(Binp, Term, D1),
+ D = gb_trees:enter(Binp, Term, D1),
{Term,Line,D}
end;
parse_heap_term("Ys"++Line0, Addr, DecodeOpts, D0) -> %Sub binary.
@@ -2887,12 +2882,17 @@ parse_heap_term("Ys"++Line0, Addr, DecodeOpts, D0) -> %Sub binary.
{Term,Line,D};
parse_heap_term("Mf"++Line0, Addr, DecodeOpts, D0) -> %Flatmap.
{Size,":"++Line1} = get_hex(Line0),
- {Keys,":"++Line2,D1} = parse_term(Line1, DecodeOpts, D0),
- {Values,Line,D2} = parse_tuple(Size, Line2, Addr,DecodeOpts, D1, []),
- Pairs = zip_tuples(tuple_size(Keys), Keys, Values, []),
- Map = maps:from_list(Pairs),
- D = gb_trees:update(Addr, Map, D2),
- {Map,Line,D};
+ case parse_term(Line1, DecodeOpts, D0) of
+ {Keys,":"++Line2,D1} when is_tuple(Keys) ->
+ {Values,Line,D2} = parse_tuple(Size, Line2, Addr,DecodeOpts, D1, []),
+ Pairs = zip_tuples(tuple_size(Keys), Keys, Values, []),
+ Map = maps:from_list(Pairs),
+ D = gb_trees:update(Addr, Map, D2),
+ {Map,Line,D};
+ {Incomplete,_Line,D1} ->
+ D = gb_trees:insert(Addr, Incomplete, D1),
+ {Incomplete,"",D}
+ end;
parse_heap_term("Mh"++Line0, Addr, DecodeOpts, D0) -> %Head node in a hashmap.
{MapSize,":"++Line1} = get_hex(Line0),
{N,":"++Line2} = get_hex(Line1),
@@ -3051,7 +3051,7 @@ do_deref_ptr(Lookup, Line, DecodeOpts, D0) ->
{Term,Line,D0};
none ->
put(incomplete_heap, true),
- {['#CDVIncompleteHeap'],Line,D0};
+ {'#CDVIncompleteHeap',Line,D0};
{line,Addr,NewLine} ->
D = parse_line(Addr, NewLine, DecodeOpts, D0),
do_deref_ptr(Lookup, Line, DecodeOpts, D)
@@ -3275,13 +3275,13 @@ tag_to_atom(UnknownTag) ->
%%%-----------------------------------------------------------------
%%% Store last tag for use when truncated, and reason if aborted
-put_last_tag(?abort,Reason) ->
- %% Don't overwrite the real last tag, and make sure to return
- %% the previous last tag.
- put(truncated_reason,Reason),
- get(last_tag);
-put_last_tag(Tag,Id) ->
- put(last_tag,{Tag,Id}).
+put_last_tag(?abort,Reason,_Pos) ->
+ %% Don't overwrite the real last tag, and don't return it either,
+ %% since that would make the caller of this function believe that
+ %% the tag was complete.
+ put(truncated_reason,Reason);
+put_last_tag(Tag,Id,Pos) ->
+ put(last_tag,{{Tag,Id},Pos}).
%%%-----------------------------------------------------------------
%%% Fetch next chunk from crashdump file
diff --git a/lib/observer/test/crashdump_helper.erl b/lib/observer/test/crashdump_helper.erl
index 576d112154..d5d3649525 100644
--- a/lib/observer/test/crashdump_helper.erl
+++ b/lib/observer/test/crashdump_helper.erl
@@ -64,6 +64,7 @@ n1_proc(Creator,_N2,Pid2,Port2,_L) ->
put(ref,Ref),
put(pid,Pid),
put(bin,Bin),
+ put(proc_bins,create_proc_bins()),
put(bins,create_binaries()),
put(sub_bin,SubBin),
put(sub_bins,create_sub_binaries(get(bins))),
@@ -119,6 +120,23 @@ create_sub_binary(Bin, Start, LenSub) ->
<<_:Start/bytes,Sub:Len/bytes,_/bytes>> = Bin,
Sub.
+create_proc_bins() ->
+ Parent = self(),
+ Pid =
+ spawn(
+ fun() ->
+ %% Just reverse the list here, so this binary is not
+ %% confused with the one created in n1_proc/5 above,
+ %% which is used for testing truncation (see
+ %% crashdump_viewer_SUITE:truncate_dump_binary/1)
+ Bin = list_to_binary(lists:reverse(lists:seq(1, 255))),
+ <<A:65/bytes,B:65/bytes,C/bytes>> = Bin,
+ Parent ! {self(),{A,B,C}}
+ end),
+ receive
+ {Pid,ProcBins} -> ProcBins
+ end.
+
%%%
%%% Test dumping of maps. Dumping of maps only from OTP 20.2.
%%%
diff --git a/lib/observer/test/crashdump_viewer_SUITE.erl b/lib/observer/test/crashdump_viewer_SUITE.erl
index ed53c46a0d..8c5e618f4a 100644
--- a/lib/observer/test/crashdump_viewer_SUITE.erl
+++ b/lib/observer/test/crashdump_viewer_SUITE.erl
@@ -400,6 +400,13 @@ special(File,Procs) ->
crashdump_viewer:expand_binary({SOffset,SSize,SPos}),
io:format(" expand binary ok",[]),
+ ProcBins = proplists:get_value(proc_bins,Dict),
+ {['#CDVBin',0,65,ProcBin],
+ ['#CDVBin',65,65,ProcBin],
+ ['#CDVBin',130,125,ProcBin]} = ProcBins,
+ io:format(" ProcBins ok",[]),
+
+
Binaries = crashdump_helper:create_binaries(),
verify_binaries(Binaries, proplists:get_value(bins,Dict)),
io:format(" binaries ok",[]),
diff --git a/lib/public_key/asn1/OTP-PKIX.asn1 b/lib/public_key/asn1/OTP-PKIX.asn1
index 10a83555af..9bcd99fba3 100644
--- a/lib/public_key/asn1/OTP-PKIX.asn1
+++ b/lib/public_key/asn1/OTP-PKIX.asn1
@@ -326,8 +326,13 @@ PublicKeyAlgorithm ::= SEQUENCE {
OPTIONAL }
SupportedSignatureAlgorithms SIGNATURE-ALGORITHM-CLASS ::= {
- dsa-with-sha1 | dsaWithSHA1 | md2-with-rsa-encryption |
- md5-with-rsa-encryption | sha1-with-rsa-encryption | sha-1with-rsa-encryption |
+ dsa-with-sha1 | dsaWithSHA1 |
+ dsa-with-sha224 |
+ dsa-with-sha256 |
+ md2-with-rsa-encryption |
+ md5-with-rsa-encryption |
+ sha1-with-rsa-encryption |
+ sha-1with-rsa-encryption |
sha224-with-rsa-encryption |
sha256-with-rsa-encryption |
sha384-with-rsa-encryption |
@@ -368,6 +373,14 @@ SupportedPublicKeyAlgorithms PUBLIC-KEY-ALGORITHM-CLASS ::= {
ID id-dsaWithSHA1
TYPE DSAParams }
+ dsa-with-sha224 SIGNATURE-ALGORITHM-CLASS ::= {
+ ID id-dsa-with-sha224
+ TYPE DSAParams }
+
+ dsa-with-sha256 SIGNATURE-ALGORITHM-CLASS ::= {
+ ID id-dsa-with-sha256
+ TYPE DSAParams }
+
id-dsa-with-sha224 OBJECT IDENTIFIER ::= {
joint-iso-ccitt(2) country(16) us(840) organization(1) gov(101)
csor(3) algorithms(4) id-dsa-with-sha2(3) 1 }
diff --git a/lib/public_key/doc/src/notes.xml b/lib/public_key/doc/src/notes.xml
index 62b4b4ca1b..7ed60ed3ca 100644
--- a/lib/public_key/doc/src/notes.xml
+++ b/lib/public_key/doc/src/notes.xml
@@ -35,6 +35,22 @@
<file>notes.xml</file>
</header>
+<section><title>Public_Key 1.6.3</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Add DSA SHA2 oids in public_keys ASN1-spec and
+ public_key:pkix_sign_types/1</p>
+ <p>
+ Own Id: OTP-15367</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Public_Key 1.6.2</title>
<section><title>Fixed Bugs and Malfunctions</title>
diff --git a/lib/public_key/doc/src/public_key.xml b/lib/public_key/doc/src/public_key.xml
index a4d7e4a734..ee3877ddd0 100644
--- a/lib/public_key/doc/src/public_key.xml
+++ b/lib/public_key/doc/src/public_key.xml
@@ -109,6 +109,13 @@
</datatype>
<datatype>
+ <name name="ed_public_key"/>
+ <desc>
+ <warning><p>This format of the EdDSA curves is temporary and may change without prior notice!</p></warning>
+ </desc>
+ </datatype>
+
+ <datatype>
<name name="private_key"/>
<name name="rsa_private_key"/>
<name name="dsa_private_key"/>
@@ -118,6 +125,14 @@
</datatype>
<datatype>
+ <name name="ed_private_key"/>
+ <desc>
+ <warning><p>This format of the EdDSA curves is temporary and may change without prior notice!</p></warning>
+ </desc>
+ </datatype>
+
+
+ <datatype>
<name name="key_params"/>
<desc>
</desc>
diff --git a/lib/public_key/src/pubkey_pem.erl b/lib/public_key/src/pubkey_pem.erl
index b92790554f..d7e5bc3ad8 100644
--- a/lib/public_key/src/pubkey_pem.erl
+++ b/lib/public_key/src/pubkey_pem.erl
@@ -222,7 +222,9 @@ pem_start('CertificateList') ->
pem_start('EcpkParameters') ->
<<"-----BEGIN EC PARAMETERS-----">>;
pem_start('ECPrivateKey') ->
- <<"-----BEGIN EC PRIVATE KEY-----">>.
+ <<"-----BEGIN EC PRIVATE KEY-----">>;
+pem_start({no_asn1, new_openssh}) -> %% Temporarily in the prototype of this format
+ <<"-----BEGIN OPENSSH PRIVATE KEY-----">>.
pem_end(<<"-----BEGIN CERTIFICATE-----">>) ->
<<"-----END CERTIFICATE-----">>;
@@ -250,6 +252,8 @@ pem_end(<<"-----BEGIN EC PARAMETERS-----">>) ->
<<"-----END EC PARAMETERS-----">>;
pem_end(<<"-----BEGIN EC PRIVATE KEY-----">>) ->
<<"-----END EC PRIVATE KEY-----">>;
+pem_end(<<"-----BEGIN OPENSSH PRIVATE KEY-----">>) ->
+ <<"-----END OPENSSH PRIVATE KEY-----">>;
pem_end(_) ->
undefined.
@@ -278,7 +282,10 @@ asn1_type(<<"-----BEGIN X509 CRL-----">>) ->
asn1_type(<<"-----BEGIN EC PARAMETERS-----">>) ->
'EcpkParameters';
asn1_type(<<"-----BEGIN EC PRIVATE KEY-----">>) ->
- 'ECPrivateKey'.
+ 'ECPrivateKey';
+asn1_type(<<"-----BEGIN OPENSSH PRIVATE KEY-----">>) ->
+ {no_asn1, new_openssh}. %% Temporarily in the prototype of this format
+
pem_decrypt() ->
<<"Proc-Type: 4,ENCRYPTED">>.
diff --git a/lib/public_key/src/pubkey_ssh.erl b/lib/public_key/src/pubkey_ssh.erl
index 02c061efc9..d0ef4abfb1 100644
--- a/lib/public_key/src/pubkey_ssh.erl
+++ b/lib/public_key/src/pubkey_ssh.erl
@@ -25,7 +25,8 @@
-export([decode/2, encode/2,
dh_gex_group/4,
- dh_gex_group_sizes/0
+ dh_gex_group_sizes/0,
+pad/2, new_openssh_encode/1, new_openssh_decode/1 % For test and experiments
]).
-define(UINT32(X), X:32/unsigned-big-integer).
@@ -67,6 +68,8 @@ decode(Bin, rfc4716_public_key) ->
rfc4716_decode(Bin);
decode(Bin, ssh2_pubkey) ->
ssh2_pubkey_decode(Bin);
+decode(Bin, new_openssh) ->
+ new_openssh_decode(Bin);
decode(Bin, Type) ->
openssh_decode(Bin, Type).
@@ -177,6 +180,70 @@ join_entry([Line | Lines], Entry) ->
rfc4716_pubkey_decode(BinKey) -> ssh2_pubkey_decode(BinKey).
+%% From https://github.com/openssh/openssh-portable/blob/master/PROTOCOL.key
+new_openssh_decode(<<"openssh-key-v1",0,
+ ?DEC_BIN(CipherName, _L1),
+ ?DEC_BIN(KdfName, _L2),
+ ?DEC_BIN(KdfOptions, _L3),
+ ?UINT32(N), % number of keys
+ ?DEC_BIN(PublicKey, _L4),
+ ?DEC_BIN(Encrypted, _L5),
+ _Rest/binary
+ >>) ->
+ %%io:format("CipherName = ~p~nKdfName = ~p~nKdfOptions = ~p~nPublicKey = ~p~nN = ~p~nEncrypted = ~p~nRest = ~p~n", [CipherName, KdfName, KdfOptions, PublicKey, N, Encrypted, _Rest]),
+ new_openssh_decode(CipherName, KdfName, KdfOptions, PublicKey, N, Encrypted).
+
+new_openssh_decode(<<"none">>, <<"none">>, <<"">>, _PublicKey, 1,
+ <<?UINT32(CheckInt),
+ ?UINT32(CheckInt),
+ ?DEC_BIN(Type, _Lt),
+ ?DEC_BIN(PubKey, _Lpu),
+ ?DEC_BIN(PrivPubKey, _Lpripub),
+ ?DEC_BIN(_Comment, _C1),
+ _Pad/binary>>) ->
+ case {Type,PrivPubKey} of
+ {<<"ssh-ed25519">>,
+ <<PrivKey:32/binary, PubKey:32/binary>>} ->
+ {ed_pri, ed25519, PubKey, PrivKey};
+
+ {<<"ssh-ed448">>,
+ <<PrivKey:57/binary, PubKey/binary>>} -> % "Intelligent" guess from
+ % https://tools.ietf.org/html/draft-ietf-curdle-ssh-ed25519-ed448
+ {ed_pri, ed448, PubKey, PrivKey}
+ end.
+
+
+new_openssh_encode({ed_pri,_,PubKey,PrivKey}=Key) ->
+ Type = key_type(Key),
+ CheckInt = 17*256+17, %crypto:strong_rand_bytes(4),
+ Comment = <<>>,
+ PublicKey = <<?STRING(Type),?STRING(PubKey)>>,
+ CipherName = <<"none">>,
+ KdfName = <<"none">>,
+ KdfOptions = <<>>,
+ BlockSize = 8, % Crypto dependent
+ NumKeys = 1,
+ Encrypted0 = <<?UINT32(CheckInt),
+ ?UINT32(CheckInt),
+ ?STRING(Type),
+ ?STRING(PubKey),
+ ?STRING(<<PrivKey/binary,PubKey/binary>>),
+ ?STRING(Comment)
+ >>,
+ Pad = pad(size(Encrypted0), BlockSize),
+ Encrypted = <<Encrypted0/binary, Pad/binary>>,
+ <<"openssh-key-v1",0,
+ ?STRING(CipherName),
+ ?STRING(KdfName),
+ ?STRING(KdfOptions),
+ ?UINT32(NumKeys),
+ ?STRING(PublicKey),
+ ?STRING(Encrypted)>>.
+
+pad(N, BlockSize) when N>BlockSize -> pad(N rem BlockSize, BlockSize);
+pad(N, BlockSize) -> list_to_binary(lists:seq(1,BlockSize-N)).
+
+
openssh_decode(Bin, FileType) ->
Lines = binary:split(Bin, <<"\n">>, [global]),
do_openssh_decode(FileType, Lines, []).
@@ -235,6 +302,8 @@ do_openssh_decode(openssh_public_key = FileType, [Line | Lines], Acc) ->
<<"ssh-rsa">> -> true;
<<"ssh-dss">> -> true;
<<"ecdsa-sha2-",Curve/binary>> -> is_ssh_curvename(Curve);
+ <<"ssh-ed25519">> -> true;
+ <<"ssh-ed448">> -> true;
_ -> false
end,
@@ -247,7 +316,9 @@ do_openssh_decode(openssh_public_key = FileType, [Line | Lines], Acc) ->
Comment = string:strip(string_decode(iolist_to_binary(Comment0)), right, $\n),
do_openssh_decode(FileType, Lines,
[{openssh_pubkey_decode(KeyType, Base64Enc),
- [{comment, Comment}]} | Acc])
+ [{comment, Comment}]} | Acc]);
+ _ when KnownKeyType==false ->
+ do_openssh_decode(FileType, Lines, Acc)
end.
@@ -386,6 +457,10 @@ line_end(Comment) ->
key_type(#'RSAPublicKey'{}) -> <<"ssh-rsa">>;
key_type({_, #'Dss-Parms'{}}) -> <<"ssh-dss">>;
+key_type({ed_pub,ed25519,_}) -> <<"ssh-ed25519">>;
+key_type({ed_pub,ed448,_}) -> <<"ssh-ed448">>;
+key_type({ed_pri,ed25519,_,_}) -> <<"ssh-ed25519">>;
+key_type({ed_pri,ed448,_,_}) -> <<"ssh-ed448">>;
key_type({#'ECPoint'{}, {namedCurve,Curve}}) -> <<"ecdsa-sha2-", (public_key:oid2ssh_curvename(Curve))/binary>>.
comma_list_encode([Option], []) ->
@@ -404,7 +479,12 @@ ssh2_pubkey_encode({Y, #'Dss-Parms'{p = P, q = Q, g = G}}) ->
<<?STRING(<<"ssh-dss">>), ?Empint(P), ?Empint(Q), ?Empint(G), ?Empint(Y)>>;
ssh2_pubkey_encode(Key={#'ECPoint'{point = Q}, {namedCurve,OID}}) ->
Curve = public_key:oid2ssh_curvename(OID),
- <<?STRING(key_type(Key)), ?Estring(Curve), ?Estring(Q)>>.
+ <<?STRING(key_type(Key)), ?Estring(Curve), ?Estring(Q)>>;
+ssh2_pubkey_encode({ed_pub, ed25519, Key}) ->
+ <<?STRING(<<"ssh-ed25519">>), ?Estring(Key)>>;
+ssh2_pubkey_encode({ed_pub, ed448, Key}) ->
+ <<?STRING(<<"ssh-ed448">>), ?Estring(Key)>>.
+
ssh2_pubkey_decode(<<?DEC_BIN(Type,_TL), Bin/binary>>) ->
@@ -430,12 +510,23 @@ ssh2_pubkey_decode(<<"ssh-dss">>,
ssh2_pubkey_decode(<<"ecdsa-sha2-",Id/binary>>,
<<?DEC_BIN(Id, _IL),
?DEC_BIN(Q, _QL)>>) ->
- {#'ECPoint'{point = Q}, {namedCurve,public_key:ssh_curvename2oid(Id)}}.
+ {#'ECPoint'{point = Q}, {namedCurve,public_key:ssh_curvename2oid(Id)}};
+
+ssh2_pubkey_decode(<<"ssh-ed25519">>,
+ <<?DEC_BIN(Key, _L)>>) ->
+ {ed_pub, ed25519, Key};
+
+ssh2_pubkey_decode(<<"ssh-ed448">>,
+ <<?DEC_BIN(Key, _L)>>) ->
+ {ed_pub, ed448, Key}.
+
is_key_field(<<"ssh-dss">>) -> true;
is_key_field(<<"ssh-rsa">>) -> true;
+is_key_field(<<"ssh-ed25519">>) -> true;
+is_key_field(<<"ssh-ed448">>) -> true;
is_key_field(<<"ecdsa-sha2-",Id/binary>>) -> is_ssh_curvename(Id);
is_key_field(_) -> false.
diff --git a/lib/public_key/src/public_key.erl b/lib/public_key/src/public_key.erl
index dca1e0766e..75d40d2e8a 100644
--- a/lib/public_key/src/public_key.erl
+++ b/lib/public_key/src/public_key.erl
@@ -68,8 +68,8 @@
pki_asn1_type/0, asn1_type/0, ssh_file/0, der_encoded/0,
key_params/0, digest_type/0]).
--type public_key() :: rsa_public_key() | dsa_public_key() | ec_public_key().
--type private_key() :: rsa_private_key() | dsa_private_key() | ec_private_key().
+-type public_key() :: rsa_public_key() | dsa_public_key() | ec_public_key() | ed_public_key() .
+-type private_key() :: rsa_private_key() | dsa_private_key() | ec_private_key() | ed_private_key() .
-type rsa_public_key() :: #'RSAPublicKey'{}.
-type rsa_private_key() :: #'RSAPrivateKey'{}.
@@ -79,6 +79,9 @@
-type ecpk_parameters_api() :: ecpk_parameters() | #'ECParameters'{} | {namedCurve, Name::crypto:ec_named_curve()}.
-type ec_public_key() :: {#'ECPoint'{}, ecpk_parameters_api()}.
-type ec_private_key() :: #'ECPrivateKey'{}.
+-type ed_public_key() :: {ed_pub, ed25519|ed448, Key::binary()}.
+-type ed_private_key() :: {ed_pri, ed25519|ed448, Pub::binary(), Priv::binary()}.
+
-type key_params() :: #'DHParameter'{} | {namedCurve, oid()} | #'ECParameters'{} |
{rsa, Size::integer(), PubExp::integer()}.
-type der_encoded() :: binary().
@@ -166,6 +169,8 @@ pem_entry_decode({'SubjectPublicKeyInfo', Der, _}) ->
ECCParams = der_decode('EcpkParameters', Params),
{#'ECPoint'{point = Key0}, ECCParams}
end;
+pem_entry_decode({{no_asn1,new_openssh}, Special, not_encrypted}) ->
+ ssh_decode(Special, new_openssh);
pem_entry_decode({Asn1Type, Der, not_encrypted}) when is_atom(Asn1Type),
is_binary(Der) ->
der_decode(Asn1Type, Der).
@@ -1070,8 +1075,9 @@ pkix_verify_hostname_match_fun(https) ->
-spec ssh_decode(SshBin, Type) ->
Decoded
when SshBin :: binary(),
- Type :: ssh2_pubkey | OtherType,
+ Type :: ssh2_pubkey | OtherType | InternalType,
OtherType :: public_key | ssh_file(),
+ InternalType :: new_openssh,
Decoded :: Decoded_ssh2_pubkey
| Decoded_OtherType,
Decoded_ssh2_pubkey :: public_key(),
@@ -1090,7 +1096,8 @@ ssh_decode(SshBin, Type) when is_binary(SshBin),
Type == openssh_public_key;
Type == auth_keys;
Type == known_hosts;
- Type == ssh2_pubkey ->
+ Type == ssh2_pubkey;
+ Type == new_openssh ->
pubkey_ssh:decode(SshBin, Type).
%%--------------------------------------------------------------------
@@ -1233,6 +1240,8 @@ format_sign_key(#'DSAPrivateKey'{p = P, q = Q, g = G, x = X}) ->
{dss, [P, Q, G, X]};
format_sign_key(#'ECPrivateKey'{privateKey = PrivKey, parameters = Param}) ->
{ecdsa, [PrivKey, ec_curve_spec(Param)]};
+format_sign_key({ed_pri, Curve, _Pub, Priv}) ->
+ {eddsa, [Priv,Curve]};
format_sign_key(_) ->
badarg.
@@ -1242,6 +1251,8 @@ format_verify_key({#'ECPoint'{point = Point}, Param}) ->
{ecdsa, [Point, ec_curve_spec(Param)]};
format_verify_key({Key, #'Dss-Parms'{p = P, q = Q, g = G}}) ->
{dss, [P, Q, G, Key]};
+format_verify_key({ed_pub, Curve, Key}) ->
+ {eddsa, [Key,Curve]};
%% Convert private keys to public keys
format_verify_key(#'RSAPrivateKey'{modulus = Mod, publicExponent = Exp}) ->
format_verify_key(#'RSAPublicKey'{modulus = Mod, publicExponent = Exp});
diff --git a/lib/public_key/vsn.mk b/lib/public_key/vsn.mk
index 4e52028c36..96eaf4f962 100644
--- a/lib/public_key/vsn.mk
+++ b/lib/public_key/vsn.mk
@@ -1 +1 @@
-PUBLIC_KEY_VSN = 1.6.2
+PUBLIC_KEY_VSN = 1.6.3
diff --git a/lib/reltool/src/reltool_utils.erl b/lib/reltool/src/reltool_utils.erl
index 060a0912f9..2afa386cb3 100644
--- a/lib/reltool/src/reltool_utils.erl
+++ b/lib/reltool/src/reltool_utils.erl
@@ -47,6 +47,9 @@
call/2, cast/2, reply/3]).
+%% For testing
+-export([erl_libs/2]).
+
-include_lib("kernel/include/file.hrl").
-include_lib("wx/include/wx.hrl").
-include("reltool.hrl").
@@ -55,7 +58,15 @@ root_dir() ->
code:root_dir().
erl_libs() ->
- string:lexemes(os:getenv("ERL_LIBS", ""), ":;").
+ erl_libs(os:getenv("ERL_LIBS", ""), os:type()).
+
+erl_libs(ErlLibs, OsType) when is_list(ErlLibs) ->
+ Sep =
+ case OsType of
+ {win32, _} -> ";";
+ _ -> ":"
+ end,
+ string:lexemes(ErlLibs, Sep).
lib_dirs(Dir) ->
case erl_prim_loader:list_dir(Dir) of
diff --git a/lib/reltool/test/reltool_server_SUITE.erl b/lib/reltool/test/reltool_server_SUITE.erl
index 4e1937d479..9ee61b595d 100644
--- a/lib/reltool/test/reltool_server_SUITE.erl
+++ b/lib/reltool/test/reltool_server_SUITE.erl
@@ -142,7 +142,8 @@ all() ->
use_selected_vsn,
use_selected_vsn_relative_path,
non_standard_vsn_id,
- undefined_regexp].
+ undefined_regexp,
+ windows_erl_libs].
groups() ->
[].
@@ -2546,10 +2547,21 @@ undefined_regexp(_Config) ->
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Checks that reltool_utils can correctly read Windows ERL_LIBS
+
+windows_erl_libs(_Config) ->
+ WinErlLibs =
+ "C:\\Program Files\\Erlang Libs;C:\\Program Files\\More Erlang Libs",
+ Ret = reltool_utils:erl_libs(WinErlLibs, {win32, nt}),
+ ?m(["C:\\Program Files\\Erlang Libs","C:\\Program Files\\More Erlang Libs"],
+ Ret),
+ ok.
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% Library functions
erl_libs() ->
- string:lexemes(os:getenv("ERL_LIBS", ""), ":;").
+ reltool_utils:erl_libs().
datadir(Config) ->
%% Removes the trailing slash...
diff --git a/lib/sasl/src/sasl.app.src b/lib/sasl/src/sasl.app.src
index 688aff16f1..5d45af0b50 100644
--- a/lib/sasl/src/sasl.app.src
+++ b/lib/sasl/src/sasl.app.src
@@ -43,5 +43,5 @@
{env, []},
{mod, {sasl, []}},
{runtime_dependencies, ["tools-2.6.14","stdlib-3.4","kernel-5.3",
- "erts-9.0"]}]}.
+ "erts-@OTP-13468@"]}]}.
diff --git a/lib/sasl/src/systools_make.erl b/lib/sasl/src/systools_make.erl
index 983df58ef7..f085246924 100644
--- a/lib/sasl/src/systools_make.erl
+++ b/lib/sasl/src/systools_make.erl
@@ -1562,7 +1562,7 @@ mandatory_modules() ->
preloaded() ->
%% Sorted
- [erl_prim_loader,erl_tracer,erlang,
+ [atomics, counters, erl_prim_loader,erl_tracer,erlang,
erts_code_purger,erts_dirty_process_signal_handler,
erts_internal,erts_literal_area_collector,
init,otp_ring0,persistent_term,prim_buffer,prim_eval,prim_file,
diff --git a/lib/ssh/doc/src/notes.xml b/lib/ssh/doc/src/notes.xml
index 7e77c6a457..42bdf667f8 100644
--- a/lib/ssh/doc/src/notes.xml
+++ b/lib/ssh/doc/src/notes.xml
@@ -230,6 +230,22 @@
</section>
</section>
+<section><title>Ssh 4.6.9.2</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Incompatibility with newer OpenSSH fixed. Previously
+ versions 7.8 and later could cause Erlang SSH to exit.</p>
+ <p>
+ Own Id: OTP-15413</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>Ssh 4.6.9.1</title>
<section><title>Fixed Bugs and Malfunctions</title>
<list>
@@ -3869,4 +3885,3 @@
</section>
</chapter>
-
diff --git a/lib/ssh/doc/src/ssh.xml b/lib/ssh/doc/src/ssh.xml
index b75b4a33c2..8435fced11 100644
--- a/lib/ssh/doc/src/ssh.xml
+++ b/lib/ssh/doc/src/ssh.xml
@@ -183,22 +183,6 @@
</datatype>
<datatype>
- <name name="pref_public_key_algs_client_option"/>
- <desc>
- <p>List of user (client) public key algorithms to try to use.</p>
- <p>The default value is the <c>public_key</c> entry in the list returned by
- <seealso marker="#default_algorithms/0">ssh:default_algorithms/0</seealso>.
- </p>
- <p>If there is no public key of a specified type available, the corresponding entry is ignored.
- Note that the available set is dependent on the underlying cryptolib and current user's public keys.
- </p>
- <p>See also the option <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso>
- for specifying the path to the user's keys.
- </p>
- </desc>
- </datatype>
-
- <datatype>
<name name="host_accepting_client_options"/>
<name name="accept_hosts"/>
<name name="fp_digest_alg"/>
@@ -397,9 +381,20 @@
<datatype>
<name name="exec_daemon_option"/>
+ <name name="exec_spec"/>
+ <desc/>
+ </datatype>
+ <datatype>
+ <name name="exec_fun"/>
+ <desc/>
+ </datatype>
+ <datatype>
<name name="'exec_fun/1'"/>
<name name="'exec_fun/2'"/>
<name name="'exec_fun/3'"/>
+ <desc/>
+ </datatype>
+ <datatype>
<name name="exec_result"/>
<desc>
<p>This option changes how the daemon execute exec-requests from clients. The term in the return value
@@ -783,6 +778,22 @@
</datatype>
<datatype>
+ <name name="pref_public_key_algs_common_option"/>
+ <desc>
+ <p>List of user (client) public key algorithms to try to use.</p>
+ <p>The default value is the <c>public_key</c> entry in the list returned by
+ <seealso marker="#default_algorithms/0">ssh:default_algorithms/0</seealso>.
+ </p>
+ <p>If there is no public key of a specified type available, the corresponding entry is ignored.
+ Note that the available set is dependent on the underlying cryptolib and current user's public keys.
+ </p>
+ <p>See also the option <seealso marker="ssh_file#type-user_dir_common_option"><c>user_dir</c></seealso>
+ for specifying the path to the user's keys.
+ </p>
+ </desc>
+ </datatype>
+
+ <datatype>
<name name="disconnectfun_common_option"/>
<desc>
<p>Provides a fun to implement your own logging when the peer disconnects.</p>
diff --git a/lib/ssh/doc/src/ssh_app.xml b/lib/ssh/doc/src/ssh_app.xml
index eb804e67dc..0c22a50c3f 100644
--- a/lib/ssh/doc/src/ssh_app.xml
+++ b/lib/ssh/doc/src/ssh_app.xml
@@ -175,6 +175,8 @@
<item>ecdsa-sha2-nistp384</item>
<item>ecdsa-sha2-nistp521</item>
<item>ecdsa-sha2-nistp256</item>
+ <item>ssh-ed25519</item>
+ <item>ssh-ed448</item>
<item>ssh-rsa</item>
<item>rsa-sha2-256</item>
<item>rsa-sha2-512</item>
@@ -378,7 +380,11 @@
<item>
<url href="https://tools.ietf.org/html/draft-ietf-curdle-ssh-curves">Secure Shell (SSH) Key Exchange Method using Curve25519 and Curve448 (work in progress)</url>
</item>
-
+
+ <item>
+ <url href="https://tools.ietf.org/html/draft-ietf-curdle-ssh-ed25519-ed448">Ed25519 and Ed448 public key algorithms for the Secure Shell (SSH) protocol (work in progress)</url>
+ </item>
+
</list>
</section>
diff --git a/lib/ssh/doc/src/ssh_file.xml b/lib/ssh/doc/src/ssh_file.xml
index ae6ba2e1d9..6681d9c306 100644
--- a/lib/ssh/doc/src/ssh_file.xml
+++ b/lib/ssh/doc/src/ssh_file.xml
@@ -93,6 +93,8 @@
<item><marker id="SYSDIR-ssh_host_dsa_key"/><c>ssh_host_dsa_key</c> - private dss host key (optional)</item>
<item><marker id="SYSDIR-ssh_host_rsa_key"/><c>ssh_host_rsa_key</c> - private rsa host key (optional)</item>
<item><marker id="SYSDIR-ssh_host_ecdsa_key"/><c>ssh_host_ecdsa_key</c> - private ecdsa host key (optional)</item>
+ <item><marker id="SYSDIR-ssh_host_ed25519_key"/><c>ssh_host_ed25519_key</c> - private eddsa host key for curve 25519 (optional)</item>
+ <item><marker id="SYSDIR-ssh_host_ed448_key"/><c>ssh_host_ed448_key</c> - private eddsa host key for curve 448 (optional)</item>
</list>
<p>At least one host key must be defined. The default value of SYSDIR is <marker id="#/etc/ssh"/><c>/etc/ssh</c>.
</p>
@@ -115,6 +117,8 @@
<item><marker id="USERDIR-id_dsa"/><c>id_dsa</c> - private dss user key (optional)</item>
<item><marker id="USERDIR-id_rsa"/><c>id_rsa</c> - private rsa user key (optional)</item>
<item><marker id="USERDIR-id_ecdsa"/><c>id_ecdsa</c> - private ecdsa user key (optional)</item>
+ <item><marker id="USERDIR-id_ed25519"/><c>id_ed25519</c> - private eddsa user key for curve 25519 (optional)</item>
+ <item><marker id="USERDIR-id_ed448"/><c>id_ed448</c> - private eddsa user key for curve 448 (optional)</item>
</list>
<p>The default value of USERDIR is <c>/home/</c><seealso marker="#LOCALUSER"><c>LOCALUSER</c></seealso><c>/.ssh</c>.
</p>
@@ -157,6 +161,7 @@
<p>If the user's DSA, RSA or ECDSA key is protected by a passphrase, it can be
supplied with thoose options.
</p>
+ <p>Note that EdDSA passhrases (Curves 25519 and 448) are not implemented.</p>
</desc>
</datatype>
@@ -183,6 +188,8 @@
<item><seealso marker="#SYSDIR-ssh_host_rsa_key"><c>SYSDIR/ssh_host_rsa_key</c></seealso></item>
<item><seealso marker="#SYSDIR-ssh_host_dsa_key"><c>SYSDIR/ssh_host_dsa_key</c></seealso></item>
<item><seealso marker="#SYSDIR-ssh_host_ecdsa_key"><c>SYSDIR/ssh_host_ecdsa_key</c></seealso></item>
+ <item><seealso marker="#SYSDIR-ssh_host_ed25519_key"><c>SYSDIR/ssh_host_ed25519_key</c></seealso></item>
+ <item><seealso marker="#SYSDIR-ssh_host_ed448_key"><c>SYSDIR/ssh_host_ed448_key</c>c></seealso></item>
</list>
</desc>
</func>
@@ -261,11 +268,14 @@
<item><seealso marker="#type-pubkey_passphrase_client_options">rsa_pass_phrase</seealso></item>
<item><seealso marker="#type-pubkey_passphrase_client_options">ecdsa_pass_phrase</seealso></item>
</list>
+ <p>Note that EdDSA passhrases (Curves 25519 and 448) are not implemented.</p>
<p><strong>Files</strong></p>
<list>
<item><seealso marker="#USERDIR-id_dsa"><c>USERDIR/id_dsa</c></seealso></item>
<item><seealso marker="#USERDIR-id_rsa"><c>USERDIR/id_rsa</c></seealso></item>
<item><seealso marker="#USERDIR-id_ecdsa"><c>USERDIR/id_ecdsa</c></seealso></item>
+ <item><seealso marker="#USERDIR-id_ed25519"><c>USERDIR/id_ed25519</c></seealso></item>
+ <item><seealso marker="#USERDIR-id_ed448"><c>USERDIR/id_ed448</c></seealso></item>
</list>
</desc>
</func>
diff --git a/lib/ssh/src/ssh.hrl b/lib/ssh/src/ssh.hrl
index 3ac74c4925..923e9309f4 100644
--- a/lib/ssh/src/ssh.hrl
+++ b/lib/ssh/src/ssh.hrl
@@ -129,6 +129,8 @@
-type pubkey_alg() :: 'ecdsa-sha2-nistp256' |
'ecdsa-sha2-nistp384' |
'ecdsa-sha2-nistp521' |
+ 'ssh-ed25519' |
+ 'ssh-ed448' |
'rsa-sha2-256' |
'rsa-sha2-512' |
'ssh-dss' |
@@ -182,6 +184,7 @@
| ssh_msg_debug_fun_common_option()
| rekey_limit_common_option()
| id_string_common_option()
+ | pref_public_key_algs_common_option()
| preferred_algorithms_common_option()
| modify_algorithms_common_option()
| auth_methods_common_option()
@@ -209,6 +212,7 @@
{ssh_msg_debug_fun, fun((ssh:connection_ref(),AlwaysDisplay::boolean(),Msg::binary(),LanguageTag::binary()) -> any()) } .
-type id_string_common_option() :: {id_string, string() | random | {random,Nmin::pos_integer(),Nmax::pos_integer()} }.
+-type pref_public_key_algs_common_option() :: {pref_public_key_algs, [pubkey_alg()] } .
-type preferred_algorithms_common_option():: {preferred_algorithms, algs_list()}.
-type modify_algorithms_common_option() :: {modify_algorithms, modify_algs_list()}.
-type auth_methods_common_option() :: {auth_methods, string() }.
@@ -227,8 +231,7 @@
-type client_option() ::
- pref_public_key_algs_client_option()
- | ssh_file:pubkey_passphrase_client_options()
+ ssh_file:pubkey_passphrase_client_options()
| host_accepting_client_options()
| authentication_client_options()
| diffie_hellman_group_exchange_client_option()
@@ -239,11 +242,14 @@
| ?COMMON_OPTION .
-type opaque_client_options() ::
- {keyboard_interact_fun, fun((term(),term(),term()) -> term())}
+ {keyboard_interact_fun, fun((Name::iodata(),
+ Instruction::iodata(),
+ Prompts::[{Prompt::iodata(),Echo::boolean()}]
+ ) ->
+ [Response::iodata()]
+ )}
| opaque_common_options().
--type pref_public_key_algs_client_option() :: {pref_public_key_algs, [pubkey_alg()] } .
-
-type host_accepting_client_options() ::
{silently_accept_hosts, accept_hosts()}
| {user_interaction, boolean()}
@@ -293,8 +299,9 @@
-type 'shell_fun/1'() :: fun((User::string()) -> pid()) .
-type 'shell_fun/2'() :: fun((User::string(), PeerAddr::inet:ip_address()) -> pid()).
--type exec_daemon_option() :: {exec, 'exec_fun/1'() | 'exec_fun/2'() | 'exec_fun/3'() }.
-
+-type exec_daemon_option() :: {exec, exec_spec()} .
+-type exec_spec() :: {direct, exec_fun()} .
+-type exec_fun() :: 'exec_fun/1'() | 'exec_fun/2'() | 'exec_fun/3'().
-type 'exec_fun/1'() :: fun((Cmd::string()) -> exec_result()) .
-type 'exec_fun/2'() :: fun((Cmd::string(), User::string()) -> exec_result()) .
-type 'exec_fun/3'() :: fun((Cmd::string(), User::string(), ClientAddr::ip_port()) -> exec_result()) .
@@ -380,9 +387,6 @@
algorithms, %% #alg{}
- key_cb, %% Private/Public key callback module
- io_cb, %% Interaction callback module
-
send_mac = none, %% send MAC algorithm
send_mac_key, %% key used in send MAC algorithm
send_mac_size = 0,
diff --git a/lib/ssh/src/ssh_auth.erl b/lib/ssh/src/ssh_auth.erl
index 4e4aa440de..9632168e65 100644
--- a/lib/ssh/src/ssh_auth.erl
+++ b/lib/ssh/src/ssh_auth.erl
@@ -91,8 +91,10 @@ unique(L) ->
%%%---- userauth_request_msg "callbacks"
-password_msg([#ssh{opts = Opts, io_cb = IoCb,
- user = User, service = Service} = Ssh0]) ->
+password_msg([#ssh{opts = Opts,
+ user = User,
+ service = Service} = Ssh0]) ->
+ IoCb = ?GET_INTERNAL_OPT(io_cb, Opts),
{Password,Ssh} =
case ?GET_OPT(password, Opts) of
undefined when IoCb == ssh_no_io ->
@@ -137,9 +139,7 @@ keyboard_interactive_msg([#ssh{user = User,
get_public_key(SigAlg, #ssh{opts = Opts}) ->
KeyAlg = key_alg(SigAlg),
- {KeyCb,KeyCbOpts} = ?GET_OPT(key_cb, Opts),
- UserOpts = ?GET_OPT(user_options, Opts),
- case KeyCb:user_key(KeyAlg, [{key_cb_private,KeyCbOpts}|UserOpts]) of
+ case ssh_transport:call_KeyCb(user_key, [KeyAlg], Opts) of
{ok, PrivKey} ->
try
%% Check the key - the KeyCb may be a buggy plugin
@@ -387,11 +387,9 @@ handle_userauth_info_request(#ssh_msg_userauth_info_request{name = Name,
instruction = Instr,
num_prompts = NumPrompts,
data = Data},
- #ssh{opts = Opts,
- io_cb = IoCb
- } = Ssh) ->
+ #ssh{opts=Opts} = Ssh) ->
PromptInfos = decode_keyboard_interactive_prompts(NumPrompts,Data),
- case keyboard_interact_get_responses(IoCb, Opts, Name, Instr, PromptInfos) of
+ case keyboard_interact_get_responses(Opts, Name, Instr, PromptInfos) of
not_ok ->
not_ok;
Responses ->
@@ -498,9 +496,7 @@ get_password_option(Opts, User) ->
pre_verify_sig(User, KeyBlob, Opts) ->
try
Key = public_key:ssh_decode(KeyBlob, ssh2_pubkey), % or exception
- {KeyCb,KeyCbOpts} = ?GET_OPT(key_cb, Opts),
- UserOpts = ?GET_OPT(user_options, Opts),
- KeyCb:is_auth_key(Key, User, [{key_cb_private,KeyCbOpts}|UserOpts])
+ ssh_transport:call_KeyCb(is_auth_key, [Key, User], Opts)
catch
_:_ ->
false
@@ -509,10 +505,8 @@ pre_verify_sig(User, KeyBlob, Opts) ->
verify_sig(SessionId, User, Service, AlgBin, KeyBlob, SigWLen, #ssh{opts = Opts} = Ssh) ->
try
Alg = binary_to_list(AlgBin),
- {KeyCb,KeyCbOpts} = ?GET_OPT(key_cb, Opts),
- UserOpts = ?GET_OPT(user_options, Opts),
Key = public_key:ssh_decode(KeyBlob, ssh2_pubkey), % or exception
- true = KeyCb:is_auth_key(Key, User, [{key_cb_private,KeyCbOpts}|UserOpts]),
+ true = ssh_transport:call_KeyCb(is_auth_key, [Key, User], Opts),
PlainText = build_sig_data(SessionId, User, Service, KeyBlob, Alg),
<<?UINT32(AlgSigLen), AlgSig:AlgSigLen/binary>> = SigWLen,
<<?UINT32(AlgLen), _Alg:AlgLen/binary,
@@ -536,56 +530,78 @@ build_sig_data(SessionId, User, Service, KeyBlob, Alg) ->
+key_alg('rsa-sha2-256') -> 'ssh-rsa';
+key_alg('rsa-sha2-512') -> 'ssh-rsa';
+key_alg(Alg) -> Alg.
+
+%%%================================================================
+%%%
+%%% Keyboard-interactive
+%%%
+
decode_keyboard_interactive_prompts(_NumPrompts, Data) ->
ssh_message:decode_keyboard_interactive_prompts(Data, []).
-keyboard_interact_get_responses(IoCb, Opts, Name, Instr, PromptInfos) ->
- NumPrompts = length(PromptInfos),
+keyboard_interact_get_responses(Opts, Name, Instr, PromptInfos) ->
keyboard_interact_get_responses(?GET_OPT(user_interaction, Opts),
?GET_OPT(keyboard_interact_fun, Opts),
- ?GET_OPT(password, Opts), IoCb, Name,
- Instr, PromptInfos, Opts, NumPrompts).
+ ?GET_OPT(password, Opts),
+ Name,
+ Instr,
+ PromptInfos,
+ Opts).
-keyboard_interact_get_responses(_, _, not_ok, _, _, _, _, _, _) ->
+%% Don't re-try an already rejected password. This could happen if both keyboard-interactive
+%% and password methods are tried:
+keyboard_interact_get_responses(_, _, not_ok, _, _, _, _) ->
not_ok;
-keyboard_interact_get_responses(_, undefined, Password, _, _, _, _, _,
- 1) when Password =/= undefined ->
- [Password]; %% Password auth implemented with keyboard-interaction and passwd is known
-keyboard_interact_get_responses(_, _, _, _, _, _, _, _, 0) ->
+
+%% Only one password requestedm and we have got one via the 'password' option for the daemon:
+keyboard_interact_get_responses(_, undefined, Pwd, _, _, [_], _) when Pwd =/= undefined ->
+ [Pwd]; %% Password auth implemented with keyboard-interaction and passwd is known
+
+%% No password requested (keyboard-interactive):
+keyboard_interact_get_responses(_, _, _, _, _, [], _) ->
[];
-keyboard_interact_get_responses(false, undefined, undefined, _, _, _, [Prompt|_], Opts, _) ->
- ssh_no_io:read_line(Prompt, Opts); %% Throws error as keyboard interaction is not allowed
-keyboard_interact_get_responses(true, undefined, _,IoCb, Name, Instr, PromptInfos, Opts, _) ->
- keyboard_interact(IoCb, Name, Instr, PromptInfos, Opts);
-keyboard_interact_get_responses(true, Fun, _Pwd, _IoCb, Name, Instr, PromptInfos, _Opts, NumPrompts) ->
- keyboard_interact_fun(Fun, Name, Instr, PromptInfos, NumPrompts).
-
-keyboard_interact(IoCb, Name, Instr, Prompts, Opts) ->
+
+%% user_interaction is forbidden (by option user_interaction) and we have to ask
+%% the user for one or more.
+%% Throw an error:
+keyboard_interact_get_responses(false, undefined, undefined, _, _, [Prompt|_], Opts) ->
+ ssh_no_io:read_line(Prompt, Opts);
+
+%% One or more passwords are requested, we may prompt the user and no fun is used
+%% to get the responses:
+keyboard_interact_get_responses(true, undefined, _, Name, Instr, PromptInfos, Opts) ->
+ prompt_user_for_passwords(Name, Instr, PromptInfos, Opts);
+
+%% The passwords are provided with a fun. Use that one!
+keyboard_interact_get_responses(true, Fun, _Pwd, Name, Instr, PromptInfos, _Opts) ->
+ keyboard_interact_fun(Fun, Name, Instr, PromptInfos).
+
+
+
+prompt_user_for_passwords(Name, Instr, PromptInfos, Opts) ->
+ IoCb = ?GET_INTERNAL_OPT(io_cb, Opts),
write_if_nonempty(IoCb, Name),
write_if_nonempty(IoCb, Instr),
lists:map(fun({Prompt, true}) -> IoCb:read_line(Prompt, Opts);
({Prompt, false}) -> IoCb:read_password(Prompt, Opts)
end,
- Prompts).
+ PromptInfos).
-write_if_nonempty(_, "") -> ok;
-write_if_nonempty(_, <<>>) -> ok;
-write_if_nonempty(IoCb, Text) -> IoCb:format("~s~n",[Text]).
-
-
-keyboard_interact_fun(KbdInteractFun, Name, Instr, PromptInfos, NumPrompts) ->
- Prompts = lists:map(fun({Prompt, _Echo}) -> Prompt end,
- PromptInfos),
- case KbdInteractFun(Name, Instr, Prompts) of
- Rs when length(Rs) == NumPrompts ->
- Rs;
- _Rs ->
+keyboard_interact_fun(KbdInteractFun, Name, Instr, PromptInfos) ->
+ case KbdInteractFun(Name, Instr, PromptInfos) of
+ Responses when is_list(Responses),
+ length(Responses) == length(PromptInfos) ->
+ Responses;
+ _ ->
nok
end.
-key_alg('rsa-sha2-256') -> 'ssh-rsa';
-key_alg('rsa-sha2-512') -> 'ssh-rsa';
-key_alg(Alg) -> Alg.
+write_if_nonempty(_, "") -> ok;
+write_if_nonempty(_, <<>>) -> ok;
+write_if_nonempty(IoCb, Text) -> IoCb:format("~s~n",[Text]).
diff --git a/lib/ssh/src/ssh_connection_handler.erl b/lib/ssh/src/ssh_connection_handler.erl
index 4b41c10cbb..7c87591cf2 100644
--- a/lib/ssh/src/ssh_connection_handler.erl
+++ b/lib/ssh/src/ssh_connection_handler.erl
@@ -447,7 +447,6 @@ init_ssh_record(Role, Socket, Opts) ->
init_ssh_record(Role, Socket, PeerAddr, Opts) ->
AuthMethods = ?GET_OPT(auth_methods, Opts),
S0 = #ssh{role = Role,
- key_cb = ?GET_OPT(key_cb, Opts),
opts = Opts,
userauth_supported_methods = AuthMethods,
available_host_keys = available_hkey_algorithms(Role, Opts),
@@ -472,10 +471,11 @@ init_ssh_record(Role, Socket, PeerAddr, Opts) ->
S1 =
S0#ssh{c_vsn = Vsn,
c_version = Version,
- io_cb = case ?GET_OPT(user_interaction, Opts) of
- true -> ssh_io;
- false -> ssh_no_io
- end,
+ opts = ?PUT_INTERNAL_OPT({io_cb, case ?GET_OPT(user_interaction, Opts) of
+ true -> ssh_io;
+ false -> ssh_no_io
+ end},
+ Opts),
userauth_quiet_mode = ?GET_OPT(quiet_mode, Opts),
peer = {PeerName, PeerAddr},
local = LocalName
@@ -488,7 +488,6 @@ init_ssh_record(Role, Socket, PeerAddr, Opts) ->
server ->
S0#ssh{s_vsn = Vsn,
s_version = Version,
- io_cb = ?GET_INTERNAL_OPT(io_cb, Opts, ssh_io),
userauth_methods = string:tokens(AuthMethods, ","),
kb_tries_left = 3,
peer = {undefined, PeerAddr},
@@ -983,6 +982,10 @@ handle_event(_, #ssh_msg_userauth_info_request{}, {userauth_keyboard_interactive
%%% ######## {connected, client|server} ####
+%% Skip ext_info messages in connected state (for example from OpenSSH >= 7.7)
+handle_event(_, #ssh_msg_ext_info{}, {connected,_Role}, D) ->
+ {keep_state, D};
+
handle_event(_, {#ssh_msg_kexinit{},_}, {connected,Role}, D0) ->
{KeyInitMsg, SshPacket, Ssh} = ssh_transport:key_exchange_init_msg(D0#data.ssh_params),
D = D0#data{ssh_params = Ssh,
@@ -1682,18 +1685,19 @@ peer_role(client) -> server;
peer_role(server) -> client.
%%--------------------------------------------------------------------
-available_hkey_algorithms(Role, Options) ->
- KeyCb = ?GET_OPT(key_cb, Options),
- case [A || A <- available_hkey_algos(Options),
- (Role==client) orelse available_host_key(KeyCb, A, Options)
- ] of
-
- [] when Role==client ->
- error({shutdown, "No public key algs"});
-
- [] when Role==server ->
- error({shutdown, "No host key available"});
+available_hkey_algorithms(client, Options) ->
+ case available_hkey_algos(Options) of
+ [] ->
+ error({shutdown, "No public key algs"});
+ Algs ->
+ [atom_to_list(A) || A<-Algs]
+ end;
+available_hkey_algorithms(server, Options) ->
+ case [A || A <- available_hkey_algos(Options),
+ is_usable_host_key(A, Options)] of
+ [] ->
+ error({shutdown, "No host key available"});
Algs ->
[atom_to_list(A) || A<-Algs]
end.
@@ -1709,18 +1713,6 @@ available_hkey_algos(Options) ->
AvailableAndSupported.
-%% Alg :: atom()
-available_host_key({KeyCb,KeyCbOpts}, Alg, Opts) ->
- UserOpts = ?GET_OPT(user_options, Opts),
- case KeyCb:host_key(Alg, [{key_cb_private,KeyCbOpts}|UserOpts]) of
- {ok,Key} ->
- %% Check the key - the KeyCb may be a buggy plugin
- ssh_transport:valid_key_sha_alg(Key, Alg);
- _ ->
- false
- end.
-
-
send_msg(Msg, State=#data{ssh_params=Ssh0}) when is_tuple(Msg) ->
{Bytes, Ssh} = ssh_transport:ssh_packet(Msg, Ssh0),
send_bytes(Bytes, State),
@@ -1840,10 +1832,21 @@ ext_info(_, D0) ->
D0.
%%%----------------------------------------------------------------
-is_usable_user_pubkey(A, Ssh) ->
- case ssh_auth:get_public_key(A, Ssh) of
+is_usable_user_pubkey(Alg, Ssh) ->
+ try ssh_auth:get_public_key(Alg, Ssh) of
{ok,_} -> true;
_ -> false
+ catch
+ _:_ -> false
+ end.
+
+%%%----------------------------------------------------------------
+is_usable_host_key(Alg, Opts) ->
+ try ssh_transport:get_host_key(Alg, Opts)
+ of
+ _PrivHostKey -> true
+ catch
+ _:_ -> false
end.
%%%----------------------------------------------------------------
diff --git a/lib/ssh/src/ssh_file.erl b/lib/ssh/src/ssh_file.erl
index 669b0f9be2..510269bbb1 100644
--- a/lib/ssh/src/ssh_file.erl
+++ b/lib/ssh/src/ssh_file.erl
@@ -52,10 +52,11 @@
-type pubkey_passphrase_client_options() :: {dsa_pass_phrase, string()}
| {rsa_pass_phrase, string()}
+%% Not yet implemented: | {ed25519_pass_phrase, string()}
+%% Not yet implemented: | {ed448_pass_phrase, string()}
| {ecdsa_pass_phrase, string()} .
-
-define(PERM_700, 8#700).
-define(PERM_644, 8#644).
@@ -120,6 +121,8 @@ file_base_name('ssh-dss' ) -> "ssh_host_dsa_key";
file_base_name('ecdsa-sha2-nistp256') -> "ssh_host_ecdsa_key";
file_base_name('ecdsa-sha2-nistp384') -> "ssh_host_ecdsa_key";
file_base_name('ecdsa-sha2-nistp521') -> "ssh_host_ecdsa_key";
+file_base_name('ssh-ed25519' ) -> "ssh_host_ed25519_key";
+file_base_name('ssh-ed448' ) -> "ssh_host_ed448_key";
file_base_name(_ ) -> "ssh_host_key".
decode(File, Password) ->
@@ -257,6 +260,8 @@ identity_key_filename('ssh-rsa' ) -> "id_rsa";
identity_key_filename('rsa-sha2-256' ) -> "id_rsa";
identity_key_filename('rsa-sha2-384' ) -> "id_rsa";
identity_key_filename('rsa-sha2-512' ) -> "id_rsa";
+identity_key_filename('ssh-ed25519' ) -> "id_ed25519";
+identity_key_filename('ssh-ed448' ) -> "id_ed448";
identity_key_filename('ecdsa-sha2-nistp256') -> "id_ecdsa";
identity_key_filename('ecdsa-sha2-nistp384') -> "id_ecdsa";
identity_key_filename('ecdsa-sha2-nistp521') -> "id_ecdsa".
@@ -266,9 +271,12 @@ identity_pass_phrase("ssh-rsa" ) -> rsa_pass_phrase;
identity_pass_phrase("rsa-sha2-256" ) -> rsa_pass_phrase;
identity_pass_phrase("rsa-sha2-384" ) -> rsa_pass_phrase;
identity_pass_phrase("rsa-sha2-512" ) -> rsa_pass_phrase;
+%% Not yet implemented: identity_pass_phrase("ssh-ed25519" ) -> ed25519_pass_phrase;
+%% Not yet implemented: identity_pass_phrase("ssh-ed448" ) -> ed448_pass_phrase;
identity_pass_phrase("ecdsa-sha2-"++_) -> ecdsa_pass_phrase;
identity_pass_phrase(P) when is_atom(P) ->
- identity_pass_phrase(atom_to_list(P)).
+ identity_pass_phrase(atom_to_list(P));
+identity_pass_phrase(_) -> undefined.
lookup_host_key_fd(Fd, KeyToMatch, Host, KeyType) ->
case io:get_line(Fd, '') of
@@ -318,6 +326,10 @@ key_match({#'ECPoint'{},{namedCurve,Curve}}, Alg) ->
_ ->
false
end;
+key_match({ed_pub,ed25519,_}, 'ssh-ed25519') ->
+ true;
+key_match({ed_pub,ed448,_}, 'ssh-ed448') ->
+ true;
key_match(_, _) ->
false.
diff --git a/lib/ssh/src/ssh_message.erl b/lib/ssh/src/ssh_message.erl
index da4027a763..d95e58c1bb 100644
--- a/lib/ssh/src/ssh_message.erl
+++ b/lib/ssh/src/ssh_message.erl
@@ -611,7 +611,13 @@ encode_signature({_, #'Dss-Parms'{}}, _SigAlg, Signature) ->
<<?Ebinary(<<"ssh-dss">>), ?Ebinary(Signature)>>;
encode_signature({#'ECPoint'{}, {namedCurve,OID}}, _SigAlg, Signature) ->
CurveName = public_key:oid2ssh_curvename(OID),
- <<?Ebinary(<<"ecdsa-sha2-",CurveName/binary>>), ?Ebinary(Signature)>>.
+ <<?Ebinary(<<"ecdsa-sha2-",CurveName/binary>>), ?Ebinary(Signature)>>;
+encode_signature({ed_pub, ed25519,_}, _SigAlg, Signature) ->
+ <<?Ebinary(<<"ssh-ed25519">>), ?Ebinary(Signature)>>;
+encode_signature({ed_pub, ed448,_}, _SigAlg, Signature) ->
+ <<?Ebinary(<<"ssh-ed448">>), ?Ebinary(Signature)>>.
+
+
%%%################################################################
%%%#
diff --git a/lib/ssh/src/ssh_options.erl b/lib/ssh/src/ssh_options.erl
index bc9f2156bc..1010c9be55 100644
--- a/lib/ssh/src/ssh_options.erl
+++ b/lib/ssh/src/ssh_options.erl
@@ -434,6 +434,18 @@ default(client) ->
class => user_options
},
+%%% Not yet implemented {ed25519_pass_phrase, def} =>
+%%% Not yet implemented #{default => undefined,
+%%% Not yet implemented chk => fun check_string/1,
+%%% Not yet implemented class => user_options
+%%% Not yet implemented },
+%%% Not yet implemented
+%%% Not yet implemented {ed448_pass_phrase, def} =>
+%%% Not yet implemented #{default => undefined,
+%%% Not yet implemented chk => fun check_string/1,
+%%% Not yet implemented class => user_options
+%%% Not yet implemented },
+%%% Not yet implemented
{silently_accept_hosts, def} =>
#{default => false,
chk => fun check_silently_accept_hosts/1,
@@ -452,12 +464,6 @@ default(client) ->
class => user_options
},
- {pref_public_key_algs, def} =>
- #{default => ssh_transport:default_algorithms(public_key),
- chk => fun check_pref_public_key_algs/1,
- class => user_options
- },
-
{dh_gex_limits, def} =>
#{default => {1024, 6144, 8192}, % FIXME: Is this true nowadays?
chk => fun({Min,I,Max}) ->
@@ -523,6 +529,12 @@ default(common) ->
class => user_options
},
+ {pref_public_key_algs, def} =>
+ #{default => ssh_transport:default_algorithms(public_key),
+ chk => fun check_pref_public_key_algs/1,
+ class => user_options
+ },
+
{preferred_algorithms, def} =>
#{default => ssh:default_algorithms(),
chk => fun check_preferred_algorithms/1,
diff --git a/lib/ssh/src/ssh_transport.erl b/lib/ssh/src/ssh_transport.erl
index c5b0704925..9ff20454cd 100644
--- a/lib/ssh/src/ssh_transport.erl
+++ b/lib/ssh/src/ssh_transport.erl
@@ -51,7 +51,9 @@
extract_public_key/1,
ssh_packet/2, pack/2,
valid_key_sha_alg/2,
- sha/1, sign/3, verify/5]).
+ sha/1, sign/3, verify/5,
+ get_host_key/2,
+ call_KeyCb/3]).
-export([dbg_trace/3]).
@@ -147,6 +149,8 @@ supported_algorithms(public_key) ->
{'ecdsa-sha2-nistp384', [{public_keys,ecdsa}, {hashs,sha384}, {curves,secp384r1}]},
{'ecdsa-sha2-nistp521', [{public_keys,ecdsa}, {hashs,sha512}, {curves,secp521r1}]},
{'ecdsa-sha2-nistp256', [{public_keys,ecdsa}, {hashs,sha256}, {curves,secp256r1}]},
+ {'ssh-ed25519', [{public_keys,eddsa}, {curves,ed25519} ]},
+ {'ssh-ed448', [{public_keys,eddsa}, {curves,ed448} ]},
{'ssh-rsa', [{public_keys,rsa}, {hashs,sha} ]},
{'rsa-sha2-256', [{public_keys,rsa}, {hashs,sha256} ]},
{'rsa-sha2-512', [{public_keys,rsa}, {hashs,sha512} ]},
@@ -431,7 +435,8 @@ key_exchange_first_msg(Kex, Ssh0) when Kex == 'ecdh-sha2-nistp256' ;
%%%
handle_kexdh_init(#ssh_msg_kexdh_init{e = E},
Ssh0 = #ssh{algorithms = #alg{kex=Kex,
- hkey=SignAlg} = Algs}) ->
+ hkey=SignAlg} = Algs,
+ opts = Opts}) ->
%% server
{G, P} = dh_group(Kex),
if
@@ -439,7 +444,7 @@ handle_kexdh_init(#ssh_msg_kexdh_init{e = E},
Sz = dh_bits(Algs),
{Public, Private} = generate_key(dh, [P,G,2*Sz]),
K = compute_key(dh, E, Private, [P,G]),
- MyPrivHostKey = get_host_key(Ssh0, SignAlg),
+ MyPrivHostKey = get_host_key(SignAlg, Opts),
MyPubHostKey = extract_public_key(MyPrivHostKey),
H = kex_hash(Ssh0, MyPubHostKey, sha(Kex), {E,Public,K}),
H_SIG = sign(H, sha(SignAlg), MyPrivHostKey),
@@ -578,14 +583,15 @@ handle_kex_dh_gex_init(#ssh_msg_kex_dh_gex_init{e = E},
#ssh{keyex_key = {{Private, Public}, {G, P}},
keyex_info = {Min, Max, NBits},
algorithms = #alg{kex=Kex,
- hkey=SignAlg}} = Ssh0) ->
+ hkey=SignAlg},
+ opts = Opts} = Ssh0) ->
%% server
if
1=<E, E=<(P-1) ->
K = compute_key(dh, E, Private, [P,G]),
if
1<K, K<(P-1) ->
- MyPrivHostKey = get_host_key(Ssh0, SignAlg),
+ MyPrivHostKey = get_host_key(SignAlg, Opts),
MyPubHostKey = extract_public_key(MyPrivHostKey),
H = kex_hash(Ssh0, MyPubHostKey, sha(Kex), {Min,NBits,Max,P,G,E,Public,K}),
H_SIG = sign(H, sha(SignAlg), MyPrivHostKey),
@@ -653,7 +659,8 @@ handle_kex_dh_gex_reply(#ssh_msg_kex_dh_gex_reply{public_host_key = PeerPubHostK
%%%
handle_kex_ecdh_init(#ssh_msg_kex_ecdh_init{q_c = PeerPublic},
Ssh0 = #ssh{algorithms = #alg{kex=Kex,
- hkey=SignAlg}}) ->
+ hkey=SignAlg},
+ opts = Opts}) ->
%% at server
Curve = ecdh_curve(Kex),
{MyPublic, MyPrivate} = generate_key(ecdh, Curve),
@@ -661,7 +668,7 @@ handle_kex_ecdh_init(#ssh_msg_kex_ecdh_init{q_c = PeerPublic},
compute_key(ecdh, PeerPublic, MyPrivate, Curve)
of
K ->
- MyPrivHostKey = get_host_key(Ssh0, SignAlg),
+ MyPrivHostKey = get_host_key(SignAlg, Opts),
MyPubHostKey = extract_public_key(MyPrivHostKey),
H = kex_hash(Ssh0, MyPubHostKey, sha(Curve), {PeerPublic, MyPublic, K}),
H_SIG = sign(H, sha(SignAlg), MyPrivHostKey),
@@ -759,8 +766,7 @@ ext_info_message(#ssh{role=server,
send_ext_info=true,
opts = Opts} = Ssh0) ->
AlgsList = lists:map(fun erlang:atom_to_list/1,
- proplists:get_value(public_key,
- ?GET_OPT(preferred_algorithms, Opts))),
+ ?GET_OPT(pref_public_key_algs, Opts)),
Msg = #ssh_msg_ext_info{nr_extensions = 1,
data = [{"server-sig-algs", string:join(AlgsList,",")}]
},
@@ -778,10 +784,8 @@ sid(#ssh{session_id = Id}, _) -> Id.
%%
%% The host key should be read from storage
%%
-get_host_key(SSH, SignAlg) ->
- #ssh{key_cb = {KeyCb,KeyCbOpts}, opts = Opts} = SSH,
- UserOpts = ?GET_OPT(user_options, Opts),
- case KeyCb:host_key(SignAlg, [{key_cb_private,KeyCbOpts}|UserOpts]) of
+get_host_key(SignAlg, Opts) ->
+ case call_KeyCb(host_key, [SignAlg], Opts) of
{ok, PrivHostKey} ->
%% Check the key - the KeyCb may be a buggy plugin
case valid_key_sha_alg(PrivHostKey, SignAlg) of
@@ -792,6 +796,11 @@ get_host_key(SSH, SignAlg) ->
exit({error, {Result, unsupported_key_type}})
end.
+call_KeyCb(F, Args, Opts) ->
+ {KeyCb,KeyCbOpts} = ?GET_OPT(key_cb, Opts),
+ UserOpts = ?GET_OPT(user_options, Opts),
+ apply(KeyCb, F, Args ++ [[{key_cb_private,KeyCbOpts}|UserOpts]]).
+
extract_public_key(#'RSAPrivateKey'{modulus = N, publicExponent = E}) ->
#'RSAPublicKey'{modulus = N, publicExponent = E};
extract_public_key(#'DSAPrivateKey'{y = Y, p = P, q = Q, g = G}) ->
@@ -799,6 +808,8 @@ extract_public_key(#'DSAPrivateKey'{y = Y, p = P, q = Q, g = G}) ->
extract_public_key(#'ECPrivateKey'{parameters = {namedCurve,OID},
publicKey = Q}) ->
{#'ECPoint'{point=Q}, {namedCurve,OID}};
+extract_public_key({ed_pri, Alg, Pub, _Priv}) ->
+ {ed_pub, Alg, Pub};
extract_public_key(#{engine:=_, key_id:=_, algorithm:=Alg} = M) ->
case {Alg, crypto:privkey_to_pubkey(Alg, M)} of
{rsa, [E,N]} ->
@@ -858,29 +869,30 @@ accepted_host(Ssh, PeerName, Public, Opts) ->
end.
-yes_no(Ssh, Prompt) ->
- (Ssh#ssh.io_cb):yes_no(Prompt, Ssh#ssh.opts).
+yes_no(#ssh{opts=Opts}, Prompt) ->
+ IoCb = ?GET_INTERNAL_OPT(io_cb, Opts, ssh_io),
+ IoCb:yes_no(Prompt, Opts).
fmt_hostkey('ssh-rsa') -> "RSA";
fmt_hostkey('ssh-dss') -> "DSA";
+fmt_hostkey('ssh-ed25519') -> "ED25519";
+fmt_hostkey('ssh-ed448') -> "ED448";
fmt_hostkey(A) when is_atom(A) -> fmt_hostkey(atom_to_list(A));
fmt_hostkey("ecdsa"++_) -> "ECDSA";
fmt_hostkey(X) -> X.
-known_host_key(#ssh{opts = Opts, key_cb = {KeyCb,KeyCbOpts}, peer = {PeerName,_}} = Ssh,
+known_host_key(#ssh{opts = Opts, peer = {PeerName,_}} = Ssh,
Public, Alg) ->
- UserOpts = ?GET_OPT(user_options, Opts),
- case is_host_key(KeyCb, Public, PeerName, Alg, [{key_cb_private,KeyCbOpts}|UserOpts]) of
- {_,true} ->
+ case call_KeyCb(is_host_key, [Public, PeerName, Alg], Opts) of
+ true ->
ok;
- {_,false} ->
+ false ->
DoAdd = ?GET_OPT(save_accepted_host, Opts),
case accepted_host(Ssh, PeerName, Public, Opts) of
true when DoAdd == true ->
- {_,R} = add_host_key(KeyCb, PeerName, Public, [{key_cb_private,KeyCbOpts}|UserOpts]),
- R;
+ call_KeyCb(add_host_key, [PeerName, Public], Opts);
true when DoAdd == false ->
ok;
false ->
@@ -890,13 +902,6 @@ known_host_key(#ssh{opts = Opts, key_cb = {KeyCb,KeyCbOpts}, peer = {PeerName,_}
end
end.
-is_host_key(KeyCb, Public, PeerName, Alg, Data) ->
- {KeyCb, KeyCb:is_host_key(Public, PeerName, Alg, Data)}.
-
-add_host_key(KeyCb, PeerName, Public, Data) ->
- {KeyCb, KeyCb:add_host_key(PeerName, Public, Data)}.
-
-
%% Each of the algorithm strings MUST be a comma-separated list of
%% algorithm names (see ''Algorithm Naming'' in [SSH-ARCH]). Each
%% supported (allowed) algorithm MUST be listed in order of preference.
@@ -1937,6 +1942,11 @@ valid_key_sha_alg(#'RSAPrivateKey'{}, 'ssh-rsa' ) -> true;
valid_key_sha_alg({_, #'Dss-Parms'{}}, 'ssh-dss') -> true;
valid_key_sha_alg(#'DSAPrivateKey'{}, 'ssh-dss') -> true;
+valid_key_sha_alg({ed_pub, ed25519,_}, 'ssh-ed25519') -> true;
+valid_key_sha_alg({ed_pri, ed25519,_,_},'ssh-ed25519') -> true;
+valid_key_sha_alg({ed_pub, ed448,_}, 'ssh-ed448') -> true;
+valid_key_sha_alg({ed_pri, ed448,_,_}, 'ssh-ed448') -> true;
+
valid_key_sha_alg({#'ECPoint'{},{namedCurve,OID}}, Alg) -> valid_key_sha_alg_ec(OID, Alg);
valid_key_sha_alg(#'ECPrivateKey'{parameters = {namedCurve,OID}}, Alg) -> valid_key_sha_alg_ec(OID, Alg);
valid_key_sha_alg(_, _) -> false.
@@ -1946,12 +1956,17 @@ valid_key_sha_alg_ec(OID, Alg) ->
Alg == list_to_atom("ecdsa-sha2-" ++ binary_to_list(Curve)).
+-dialyzer({no_match, public_algo/1}).
+
public_algo(#'RSAPublicKey'{}) -> 'ssh-rsa'; % FIXME: Not right with draft-curdle-rsa-sha2
public_algo({_, #'Dss-Parms'{}}) -> 'ssh-dss';
+public_algo({ed_pub, ed25519,_}) -> 'ssh-ed25519';
+public_algo({ed_pub, ed448,_}) -> 'ssh-ed448';
public_algo({#'ECPoint'{},{namedCurve,OID}}) ->
Curve = public_key:oid2ssh_curvename(OID),
list_to_atom("ecdsa-sha2-" ++ binary_to_list(Curve)).
+
sha('ssh-rsa') -> sha;
sha('rsa-sha2-256') -> sha256;
sha('rsa-sha2-384') -> sha384;
@@ -1960,6 +1975,8 @@ sha('ssh-dss') -> sha;
sha('ecdsa-sha2-nistp256') -> sha(secp256r1);
sha('ecdsa-sha2-nistp384') -> sha(secp384r1);
sha('ecdsa-sha2-nistp521') -> sha(secp521r1);
+sha('ssh-ed25519') -> undefined; % Included in the spec of ed25519
+sha('ssh-ed448') -> undefined; % Included in the spec of ed448
sha(secp256r1) -> sha256;
sha(secp384r1) -> sha384;
sha(secp521r1) -> sha512;
@@ -2054,7 +2071,6 @@ ecdh_curve('curve448-sha512' ) -> x448;
ecdh_curve('curve25519-sha256' ) -> x25519;
ecdh_curve('[email protected]' ) -> x25519.
-
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%
%% Utils for default_algorithms/1 and supported_algorithms/1
diff --git a/lib/ssh/test/ssh_algorithms_SUITE.erl b/lib/ssh/test/ssh_algorithms_SUITE.erl
index 5e589e585f..02e5f40c38 100644
--- a/lib/ssh/test/ssh_algorithms_SUITE.erl
+++ b/lib/ssh/test/ssh_algorithms_SUITE.erl
@@ -184,12 +184,15 @@ init_per_testcase(TC, {public_key,Alg}, Config) ->
| ExtraOpts],
[{extra_daemon,true}|Config]);
{{ok,_}, {error,Err}} ->
+ ct:log("Alg = ~p~nOpts = ~p",[Alg,Opts]),
{skip, io_lib:format("No host key: ~p",[Err])};
{{error,Err}, {ok,_}} ->
+ ct:log("Alg = ~p~nOpts = ~p",[Alg,Opts]),
{skip, io_lib:format("No user key: ~p",[Err])};
_ ->
+ ct:log("Alg = ~p~nOpts = ~p",[Alg,Opts]),
{skip, "Neither host nor user key"}
end;
@@ -470,7 +473,9 @@ setup_pubkey(Alg, Config) ->
'rsa-sha2-512' -> ssh_test_lib:setup_rsa(DataDir, UserDir);
'ecdsa-sha2-nistp256' -> ssh_test_lib:setup_ecdsa("256", DataDir, UserDir);
'ecdsa-sha2-nistp384' -> ssh_test_lib:setup_ecdsa("384", DataDir, UserDir);
- 'ecdsa-sha2-nistp521' -> ssh_test_lib:setup_ecdsa("521", DataDir, UserDir)
+ 'ecdsa-sha2-nistp521' -> ssh_test_lib:setup_ecdsa("521", DataDir, UserDir);
+ 'ssh-ed25519' -> ssh_test_lib:setup_eddsa(ed25519, DataDir, UserDir);
+ 'ssh-ed448' -> ssh_test_lib:setup_eddsa(ed448, DataDir, UserDir)
end,
Config.
diff --git a/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed25519 b/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed25519
new file mode 100644
index 0000000000..401a3e4a9a
--- /dev/null
+++ b/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed25519
@@ -0,0 +1,7 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+QyNTUxOQAAACDm9P8/gC0IOKmwHLSvkmEtS2Xx0RRqUDqC6wY6UgDVnwAAAJg3+6xpN/us
+aQAAAAtzc2gtZWQyNTUxOQAAACDm9P8/gC0IOKmwHLSvkmEtS2Xx0RRqUDqC6wY6UgDVnw
+AAAEBzC/Z2WGJhZ3l3tIBnUc6DCbp+lXY2yc2RRpWQTdf8sub0/z+ALQg4qbActK+SYS1L
+ZfHRFGpQOoLrBjpSANWfAAAAE3VhYmhuaWxAZWx4YWRsajNxMzIBAg==
+-----END OPENSSH PRIVATE KEY-----
diff --git a/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed25519.pub b/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed25519.pub
new file mode 100644
index 0000000000..a5c03b19c1
--- /dev/null
+++ b/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed25519.pub
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOb0/z+ALQg4qbActK+SYS1LZfHRFGpQOoLrBjpSANWf uabhnil@elxadlj3q32
diff --git a/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed448 b/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed448
new file mode 100644
index 0000000000..8ecfd710dc
--- /dev/null
+++ b/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed448
@@ -0,0 +1,10 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAASgAAAAlz
+c2gtZWQ0NDgAAAA53OqeePNaG/NJmoMbELhskKrAHNhLZ6AQm1WjbpMoseNl/OFh
+1xznExpUPqTLX36fHYsAaWRHABQAAAAA0AAAEREAABERAAAACXNzaC1lZDQ0OAAA
+ADnc6p5481ob80magxsQuGyQqsAc2EtnoBCbVaNukyix42X84WHXHOcTGlQ+pMtf
+fp8diwBpZEcAFAAAAAByzSPST3FCdOdENDI3uTKQ9RH2Ql+Y5kRZ/yA+iYUIP/32
+BQBVOrwOBc0CGEvbicTM1n4YeVEmfrMo3OqeePNaG/NJmoMbELhskKrAHNhLZ6AQ
+m1WjbpMoseNl/OFh1xznExpUPqTLX36fHYsAaWRHABQAAAAAAAECAwQ=
+-----END OPENSSH PRIVATE KEY-----
+
diff --git a/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed448.pub b/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed448.pub
new file mode 100644
index 0000000000..cec0765a5d
--- /dev/null
+++ b/lib/ssh/test/ssh_algorithms_SUITE_data/id_ed448.pub
@@ -0,0 +1 @@
+ssh-ed448 AAAACXNzaC1lZDQ0OAAAADnc6p5481ob80magxsQuGyQqsAc2EtnoBCbVaNukyix42X84WHXHOcTGlQ+pMtffp8diwBpZEcAFAA= uabhnil@elxadlj3q32
diff --git a/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed25519_key b/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed25519_key
new file mode 100644
index 0000000000..13a8fcf491
--- /dev/null
+++ b/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed25519_key
@@ -0,0 +1,7 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+QyNTUxOQAAACBJSOuiYGWaO9lye8Bgafod1kw8P6cV3Xb2qJgCB6yJfQAAAJi+h4O7voeD
+uwAAAAtzc2gtZWQyNTUxOQAAACBJSOuiYGWaO9lye8Bgafod1kw8P6cV3Xb2qJgCB6yJfQ
+AAAEBaOcJfGPNemKc1wPHTCmM4Kwvh6dZ0CqY14UT361UnN0lI66JgZZo72XJ7wGBp+h3W
+TDw/pxXddvaomAIHrIl9AAAAE3VhYmhuaWxAZWx4YWRsajNxMzIBAg==
+-----END OPENSSH PRIVATE KEY-----
diff --git a/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed25519_key.pub b/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed25519_key.pub
new file mode 100644
index 0000000000..156ef4045c
--- /dev/null
+++ b/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed25519_key.pub
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIElI66JgZZo72XJ7wGBp+h3WTDw/pxXddvaomAIHrIl9 uabhnil@elxadlj3q32
diff --git a/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed448_key b/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed448_key
new file mode 100644
index 0000000000..31a7e4e8c3
--- /dev/null
+++ b/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed448_key
@@ -0,0 +1,10 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAASgAAAAlz
+c2gtZWQ0NDgAAAA5X9dEm1m0Yf0s54fsYWrUah2hNCSFpw4fig6nXYDpZ3jt8SR2
+m0bHBhvWeD3x5Q9s0foavq/oJWGAAAAA0AAAEREAABERAAAACXNzaC1lZDQ0OAAA
+ADlf10SbWbRh/Sznh+xhatRqHaE0JIWnDh+KDqddgOlneO3xJHabRscGG9Z4PfHl
+D2zR+hq+r+glYYAAAABybIKlYsuAjRDWMr6JyFE+v2ySnzTd+oyfY8mWDvbjSKNS
+jIo/zC8ETjmj/FuUSS+PAy51SaIAmPlbX9dEm1m0Yf0s54fsYWrUah2hNCSFpw4f
+ig6nXYDpZ3jt8SR2m0bHBhvWeD3x5Q9s0foavq/oJWGAAAAAAAECAwQ=
+-----END OPENSSH PRIVATE KEY-----
+
diff --git a/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed448_key.pub b/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed448_key.pub
new file mode 100644
index 0000000000..8c390dcb58
--- /dev/null
+++ b/lib/ssh/test/ssh_algorithms_SUITE_data/ssh_host_ed448_key.pub
@@ -0,0 +1 @@
+ssh-ed448 AAAACXNzaC1lZDQ0OAAAADlf10SbWbRh/Sznh+xhatRqHaE0JIWnDh+KDqddgOlneO3xJHabRscGG9Z4PfHlD2zR+hq+r+glYYA=
diff --git a/lib/ssh/test/ssh_basic_SUITE.erl b/lib/ssh/test/ssh_basic_SUITE.erl
index 778ae1e7b6..da94b5722f 100644
--- a/lib/ssh/test/ssh_basic_SUITE.erl
+++ b/lib/ssh/test/ssh_basic_SUITE.erl
@@ -56,6 +56,8 @@ groups() ->
{group, ecdsa_sha2_nistp256_key},
{group, ecdsa_sha2_nistp384_key},
{group, ecdsa_sha2_nistp521_key},
+ {group, ed25519_key},
+ {group, ed448_key},
{group, dsa_pass_key},
{group, rsa_pass_key},
{group, ecdsa_sha2_nistp256_pass_key},
@@ -94,6 +96,8 @@ groups() ->
{ecdsa_sha2_nistp256_key, [], [{group, basic}]},
{ecdsa_sha2_nistp384_key, [], [{group, basic}]},
{ecdsa_sha2_nistp521_key, [], [{group, basic}]},
+ {ed25519_key, [], [{group, basic}]},
+ {ed448_key, [], [{group, basic}]},
{rsa_host_key_is_actualy_ecdsa, [], [fail_daemon_start]},
{host_user_key_differs, [parallel], [exec_key_differs1,
exec_key_differs2,
@@ -222,6 +226,28 @@ init_per_group(ecdsa_sha2_nistp521_key, Config) ->
false ->
{skip, unsupported_pub_key}
end;
+init_per_group(ed25519_key, Config) ->
+ case lists:member('ssh-ed25519',
+ ssh_transport:default_algorithms(public_key)) of
+ true ->
+ DataDir = proplists:get_value(data_dir, Config),
+ PrivDir = proplists:get_value(priv_dir, Config),
+ ssh_test_lib:setup_eddsa(ed25519, DataDir, PrivDir),
+ Config;
+ false ->
+ {skip, unsupported_pub_key}
+ end;
+init_per_group(ed448_key, Config) ->
+ case lists:member('ssh-ed448',
+ ssh_transport:default_algorithms(public_key)) of
+ true ->
+ DataDir = proplists:get_value(data_dir, Config),
+ PrivDir = proplists:get_value(priv_dir, Config),
+ ssh_test_lib:setup_eddsa(ed448, DataDir, PrivDir),
+ Config;
+ false ->
+ {skip, unsupported_pub_key}
+ end;
init_per_group(rsa_pass_key, Config) ->
case lists:member('ssh-rsa',
ssh_transport:default_algorithms(public_key)) of
diff --git a/lib/ssh/test/ssh_basic_SUITE_data/id_ed25519 b/lib/ssh/test/ssh_basic_SUITE_data/id_ed25519
new file mode 100644
index 0000000000..401a3e4a9a
--- /dev/null
+++ b/lib/ssh/test/ssh_basic_SUITE_data/id_ed25519
@@ -0,0 +1,7 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+QyNTUxOQAAACDm9P8/gC0IOKmwHLSvkmEtS2Xx0RRqUDqC6wY6UgDVnwAAAJg3+6xpN/us
+aQAAAAtzc2gtZWQyNTUxOQAAACDm9P8/gC0IOKmwHLSvkmEtS2Xx0RRqUDqC6wY6UgDVnw
+AAAEBzC/Z2WGJhZ3l3tIBnUc6DCbp+lXY2yc2RRpWQTdf8sub0/z+ALQg4qbActK+SYS1L
+ZfHRFGpQOoLrBjpSANWfAAAAE3VhYmhuaWxAZWx4YWRsajNxMzIBAg==
+-----END OPENSSH PRIVATE KEY-----
diff --git a/lib/ssh/test/ssh_basic_SUITE_data/id_ed25519.pub b/lib/ssh/test/ssh_basic_SUITE_data/id_ed25519.pub
new file mode 100644
index 0000000000..a5c03b19c1
--- /dev/null
+++ b/lib/ssh/test/ssh_basic_SUITE_data/id_ed25519.pub
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOb0/z+ALQg4qbActK+SYS1LZfHRFGpQOoLrBjpSANWf uabhnil@elxadlj3q32
diff --git a/lib/ssh/test/ssh_basic_SUITE_data/id_ed448 b/lib/ssh/test/ssh_basic_SUITE_data/id_ed448
new file mode 100644
index 0000000000..8ecfd710dc
--- /dev/null
+++ b/lib/ssh/test/ssh_basic_SUITE_data/id_ed448
@@ -0,0 +1,10 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAASgAAAAlz
+c2gtZWQ0NDgAAAA53OqeePNaG/NJmoMbELhskKrAHNhLZ6AQm1WjbpMoseNl/OFh
+1xznExpUPqTLX36fHYsAaWRHABQAAAAA0AAAEREAABERAAAACXNzaC1lZDQ0OAAA
+ADnc6p5481ob80magxsQuGyQqsAc2EtnoBCbVaNukyix42X84WHXHOcTGlQ+pMtf
+fp8diwBpZEcAFAAAAAByzSPST3FCdOdENDI3uTKQ9RH2Ql+Y5kRZ/yA+iYUIP/32
+BQBVOrwOBc0CGEvbicTM1n4YeVEmfrMo3OqeePNaG/NJmoMbELhskKrAHNhLZ6AQ
+m1WjbpMoseNl/OFh1xznExpUPqTLX36fHYsAaWRHABQAAAAAAAECAwQ=
+-----END OPENSSH PRIVATE KEY-----
+
diff --git a/lib/ssh/test/ssh_basic_SUITE_data/id_ed448.pub b/lib/ssh/test/ssh_basic_SUITE_data/id_ed448.pub
new file mode 100644
index 0000000000..cec0765a5d
--- /dev/null
+++ b/lib/ssh/test/ssh_basic_SUITE_data/id_ed448.pub
@@ -0,0 +1 @@
+ssh-ed448 AAAACXNzaC1lZDQ0OAAAADnc6p5481ob80magxsQuGyQqsAc2EtnoBCbVaNukyix42X84WHXHOcTGlQ+pMtffp8diwBpZEcAFAA= uabhnil@elxadlj3q32
diff --git a/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed25519_key b/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed25519_key
new file mode 100644
index 0000000000..13a8fcf491
--- /dev/null
+++ b/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed25519_key
@@ -0,0 +1,7 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+QyNTUxOQAAACBJSOuiYGWaO9lye8Bgafod1kw8P6cV3Xb2qJgCB6yJfQAAAJi+h4O7voeD
+uwAAAAtzc2gtZWQyNTUxOQAAACBJSOuiYGWaO9lye8Bgafod1kw8P6cV3Xb2qJgCB6yJfQ
+AAAEBaOcJfGPNemKc1wPHTCmM4Kwvh6dZ0CqY14UT361UnN0lI66JgZZo72XJ7wGBp+h3W
+TDw/pxXddvaomAIHrIl9AAAAE3VhYmhuaWxAZWx4YWRsajNxMzIBAg==
+-----END OPENSSH PRIVATE KEY-----
diff --git a/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed25519_key.pub b/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed25519_key.pub
new file mode 100644
index 0000000000..156ef4045c
--- /dev/null
+++ b/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed25519_key.pub
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIElI66JgZZo72XJ7wGBp+h3WTDw/pxXddvaomAIHrIl9 uabhnil@elxadlj3q32
diff --git a/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed448_key b/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed448_key
new file mode 100644
index 0000000000..31a7e4e8c3
--- /dev/null
+++ b/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed448_key
@@ -0,0 +1,10 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAASgAAAAlz
+c2gtZWQ0NDgAAAA5X9dEm1m0Yf0s54fsYWrUah2hNCSFpw4fig6nXYDpZ3jt8SR2
+m0bHBhvWeD3x5Q9s0foavq/oJWGAAAAA0AAAEREAABERAAAACXNzaC1lZDQ0OAAA
+ADlf10SbWbRh/Sznh+xhatRqHaE0JIWnDh+KDqddgOlneO3xJHabRscGG9Z4PfHl
+D2zR+hq+r+glYYAAAABybIKlYsuAjRDWMr6JyFE+v2ySnzTd+oyfY8mWDvbjSKNS
+jIo/zC8ETjmj/FuUSS+PAy51SaIAmPlbX9dEm1m0Yf0s54fsYWrUah2hNCSFpw4f
+ig6nXYDpZ3jt8SR2m0bHBhvWeD3x5Q9s0foavq/oJWGAAAAAAAECAwQ=
+-----END OPENSSH PRIVATE KEY-----
+
diff --git a/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed448_key.pub b/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed448_key.pub
new file mode 100644
index 0000000000..8c390dcb58
--- /dev/null
+++ b/lib/ssh/test/ssh_basic_SUITE_data/ssh_host_ed448_key.pub
@@ -0,0 +1 @@
+ssh-ed448 AAAACXNzaC1lZDQ0OAAAADlf10SbWbRh/Sznh+xhatRqHaE0JIWnDh+KDqddgOlneO3xJHabRscGG9Z4PfHlD2zR+hq+r+glYYA=
diff --git a/lib/ssh/test/ssh_compat_SUITE.erl b/lib/ssh/test/ssh_compat_SUITE.erl
index 1c607bebe8..f4eef2dc77 100644
--- a/lib/ssh/test/ssh_compat_SUITE.erl
+++ b/lib/ssh/test/ssh_compat_SUITE.erl
@@ -648,6 +648,7 @@ setup_remote_priv_and_local_auth_keys(KeyAlg, IP, Port, UserDir, Config) ->
{silently_accept_hosts,true},
{user_interaction,false}
]),
+ rm_id_in_remote_dir(Ch, ".ssh"),
_ = ssh_sftp:make_dir(Ch, ".ssh"),
DstFile = filename:join(".ssh", dst_filename(user,KeyAlg)),
ok = ssh_sftp:write_file(Ch, DstFile, Priv),
@@ -658,6 +659,18 @@ setup_remote_priv_and_local_auth_keys(KeyAlg, IP, Port, UserDir, Config) ->
ok = ssh:close(Cc),
UserDir.
+rm_id_in_remote_dir(Ch, Dir) ->
+ case ssh_sftp:list_dir(Ch, Dir) of
+ {error,_Error} ->
+ ok;
+ {ok,FileNames} ->
+ lists:foreach(fun("id_"++_ = F) ->
+ ok = ssh_sftp:delete(Ch, filename:join(Dir,F));
+ (_) ->
+ leave
+ end, FileNames)
+ end.
+
user_priv_pub_keys(Config, KeyAlg) -> priv_pub_keys("users_keys", user, Config, KeyAlg).
host_priv_pub_keys(Config, KeyAlg) -> priv_pub_keys("host_keys", host, Config, KeyAlg).
@@ -673,6 +686,8 @@ src_filename(user, 'ssh-rsa' ) -> "id_rsa";
src_filename(user, 'rsa-sha2-256' ) -> "id_rsa";
src_filename(user, 'rsa-sha2-512' ) -> "id_rsa";
src_filename(user, 'ssh-dss' ) -> "id_dsa";
+src_filename(user, 'ssh-ed25519' ) -> "id_ed25519";
+src_filename(user, 'ssh-ed448' ) -> "id_ed448";
src_filename(user, 'ecdsa-sha2-nistp256') -> "id_ecdsa256";
src_filename(user, 'ecdsa-sha2-nistp384') -> "id_ecdsa384";
src_filename(user, 'ecdsa-sha2-nistp521') -> "id_ecdsa521";
@@ -680,6 +695,8 @@ src_filename(host, 'ssh-rsa' ) -> "ssh_host_rsa_key";
src_filename(host, 'rsa-sha2-256' ) -> "ssh_host_rsa_key";
src_filename(host, 'rsa-sha2-512' ) -> "ssh_host_rsa_key";
src_filename(host, 'ssh-dss' ) -> "ssh_host_dsa_key";
+src_filename(host, 'ssh-ed25519' ) -> "ssh_host_ed25519_key";
+src_filename(host, 'ssh-ed448' ) -> "ssh_host_ed448_key";
src_filename(host, 'ecdsa-sha2-nistp256') -> "ssh_host_ecdsa_key256";
src_filename(host, 'ecdsa-sha2-nistp384') -> "ssh_host_ecdsa_key384";
src_filename(host, 'ecdsa-sha2-nistp521') -> "ssh_host_ecdsa_key521".
@@ -688,6 +705,8 @@ dst_filename(user, 'ssh-rsa' ) -> "id_rsa";
dst_filename(user, 'rsa-sha2-256' ) -> "id_rsa";
dst_filename(user, 'rsa-sha2-512' ) -> "id_rsa";
dst_filename(user, 'ssh-dss' ) -> "id_dsa";
+dst_filename(user, 'ssh-ed25519' ) -> "id_ed25519";
+dst_filename(user, 'ssh-ed448' ) -> "id_ed448";
dst_filename(user, 'ecdsa-sha2-nistp256') -> "id_ecdsa";
dst_filename(user, 'ecdsa-sha2-nistp384') -> "id_ecdsa";
dst_filename(user, 'ecdsa-sha2-nistp521') -> "id_ecdsa";
@@ -695,6 +714,8 @@ dst_filename(host, 'ssh-rsa' ) -> "ssh_host_rsa_key";
dst_filename(host, 'rsa-sha2-256' ) -> "ssh_host_rsa_key";
dst_filename(host, 'rsa-sha2-512' ) -> "ssh_host_rsa_key";
dst_filename(host, 'ssh-dss' ) -> "ssh_host_dsa_key";
+dst_filename(host, 'ssh-ed25519' ) -> "ssh_host_ed25519_key";
+dst_filename(host, 'ssh-ed448' ) -> "ssh_host_ed448_key";
dst_filename(host, 'ecdsa-sha2-nistp256') -> "ssh_host_ecdsa_key";
dst_filename(host, 'ecdsa-sha2-nistp384') -> "ssh_host_ecdsa_key";
dst_filename(host, 'ecdsa-sha2-nistp521') -> "ssh_host_ecdsa_key".
diff --git a/lib/ssh/test/ssh_compat_SUITE_data/build_scripts/create_all b/lib/ssh/test/ssh_compat_SUITE_data/build_scripts/create_all
index 0dcf8cb570..c2e77fcc79 100755
--- a/lib/ssh/test/ssh_compat_SUITE_data/build_scripts/create_all
+++ b/lib/ssh/test/ssh_compat_SUITE_data/build_scripts/create_all
@@ -18,6 +18,12 @@ SSH_SSL_VERSIONS=(\
openssh 7.6p1 openssl 1.0.2n \
\
openssh 7.6p1 libressl 2.6.4 \
+ \
+ openssh 7.7p1 openssl 1.0.2p \
+ openssh 7.8p1 openssl 1.0.2p \
+ openssh 7.9p1 openssl 1.0.2p \
+ \
+ openssh 7.9p1 libressl 2.6.4 \
)
if [ "x$1" == "x-b" ]
diff --git a/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed25519_key b/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed25519_key
new file mode 100644
index 0000000000..13a8fcf491
--- /dev/null
+++ b/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed25519_key
@@ -0,0 +1,7 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+QyNTUxOQAAACBJSOuiYGWaO9lye8Bgafod1kw8P6cV3Xb2qJgCB6yJfQAAAJi+h4O7voeD
+uwAAAAtzc2gtZWQyNTUxOQAAACBJSOuiYGWaO9lye8Bgafod1kw8P6cV3Xb2qJgCB6yJfQ
+AAAEBaOcJfGPNemKc1wPHTCmM4Kwvh6dZ0CqY14UT361UnN0lI66JgZZo72XJ7wGBp+h3W
+TDw/pxXddvaomAIHrIl9AAAAE3VhYmhuaWxAZWx4YWRsajNxMzIBAg==
+-----END OPENSSH PRIVATE KEY-----
diff --git a/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed25519_key.pub b/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed25519_key.pub
new file mode 100644
index 0000000000..156ef4045c
--- /dev/null
+++ b/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed25519_key.pub
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIElI66JgZZo72XJ7wGBp+h3WTDw/pxXddvaomAIHrIl9 uabhnil@elxadlj3q32
diff --git a/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed448_key b/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed448_key
new file mode 100644
index 0000000000..31a7e4e8c3
--- /dev/null
+++ b/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed448_key
@@ -0,0 +1,10 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAASgAAAAlz
+c2gtZWQ0NDgAAAA5X9dEm1m0Yf0s54fsYWrUah2hNCSFpw4fig6nXYDpZ3jt8SR2
+m0bHBhvWeD3x5Q9s0foavq/oJWGAAAAA0AAAEREAABERAAAACXNzaC1lZDQ0OAAA
+ADlf10SbWbRh/Sznh+xhatRqHaE0JIWnDh+KDqddgOlneO3xJHabRscGG9Z4PfHl
+D2zR+hq+r+glYYAAAABybIKlYsuAjRDWMr6JyFE+v2ySnzTd+oyfY8mWDvbjSKNS
+jIo/zC8ETjmj/FuUSS+PAy51SaIAmPlbX9dEm1m0Yf0s54fsYWrUah2hNCSFpw4f
+ig6nXYDpZ3jt8SR2m0bHBhvWeD3x5Q9s0foavq/oJWGAAAAAAAECAwQ=
+-----END OPENSSH PRIVATE KEY-----
+
diff --git a/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed448_key.pub b/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed448_key.pub
new file mode 100644
index 0000000000..8c390dcb58
--- /dev/null
+++ b/lib/ssh/test/ssh_compat_SUITE_data/host_keys/ssh_host_ed448_key.pub
@@ -0,0 +1 @@
+ssh-ed448 AAAACXNzaC1lZDQ0OAAAADlf10SbWbRh/Sznh+xhatRqHaE0JIWnDh+KDqddgOlneO3xJHabRscGG9Z4PfHlD2zR+hq+r+glYYA=
diff --git a/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed25519 b/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed25519
new file mode 100644
index 0000000000..401a3e4a9a
--- /dev/null
+++ b/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed25519
@@ -0,0 +1,7 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAAMwAAAAtzc2gtZW
+QyNTUxOQAAACDm9P8/gC0IOKmwHLSvkmEtS2Xx0RRqUDqC6wY6UgDVnwAAAJg3+6xpN/us
+aQAAAAtzc2gtZWQyNTUxOQAAACDm9P8/gC0IOKmwHLSvkmEtS2Xx0RRqUDqC6wY6UgDVnw
+AAAEBzC/Z2WGJhZ3l3tIBnUc6DCbp+lXY2yc2RRpWQTdf8sub0/z+ALQg4qbActK+SYS1L
+ZfHRFGpQOoLrBjpSANWfAAAAE3VhYmhuaWxAZWx4YWRsajNxMzIBAg==
+-----END OPENSSH PRIVATE KEY-----
diff --git a/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed25519.pub b/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed25519.pub
new file mode 100644
index 0000000000..a5c03b19c1
--- /dev/null
+++ b/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed25519.pub
@@ -0,0 +1 @@
+ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIOb0/z+ALQg4qbActK+SYS1LZfHRFGpQOoLrBjpSANWf uabhnil@elxadlj3q32
diff --git a/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed448 b/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed448
new file mode 100644
index 0000000000..8ecfd710dc
--- /dev/null
+++ b/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed448
@@ -0,0 +1,10 @@
+-----BEGIN OPENSSH PRIVATE KEY-----
+b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAAASgAAAAlz
+c2gtZWQ0NDgAAAA53OqeePNaG/NJmoMbELhskKrAHNhLZ6AQm1WjbpMoseNl/OFh
+1xznExpUPqTLX36fHYsAaWRHABQAAAAA0AAAEREAABERAAAACXNzaC1lZDQ0OAAA
+ADnc6p5481ob80magxsQuGyQqsAc2EtnoBCbVaNukyix42X84WHXHOcTGlQ+pMtf
+fp8diwBpZEcAFAAAAAByzSPST3FCdOdENDI3uTKQ9RH2Ql+Y5kRZ/yA+iYUIP/32
+BQBVOrwOBc0CGEvbicTM1n4YeVEmfrMo3OqeePNaG/NJmoMbELhskKrAHNhLZ6AQ
+m1WjbpMoseNl/OFh1xznExpUPqTLX36fHYsAaWRHABQAAAAAAAECAwQ=
+-----END OPENSSH PRIVATE KEY-----
+
diff --git a/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed448.pub b/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed448.pub
new file mode 100644
index 0000000000..cec0765a5d
--- /dev/null
+++ b/lib/ssh/test/ssh_compat_SUITE_data/users_keys/id_ed448.pub
@@ -0,0 +1 @@
+ssh-ed448 AAAACXNzaC1lZDQ0OAAAADnc6p5481ob80magxsQuGyQqsAc2EtnoBCbVaNukyix42X84WHXHOcTGlQ+pMtffp8diwBpZEcAFAA= uabhnil@elxadlj3q32
diff --git a/lib/ssh/test/ssh_engine_SUITE.erl b/lib/ssh/test/ssh_engine_SUITE.erl
index c2e6ac1fee..3adb23acdb 100644
--- a/lib/ssh/test/ssh_engine_SUITE.erl
+++ b/lib/ssh/test/ssh_engine_SUITE.erl
@@ -126,10 +126,17 @@ simple_connect(Config) ->
load_engine() ->
case crypto:get_test_engine() of
{ok, Engine} ->
- try crypto:engine_load(<<"dynamic">>,
+ try
+ %% The test engine has it's own fake rsa sign/verify that
+ %% you don't want to use, so exclude it from methods to load:
+ Methods =
+ crypto:engine_get_all_methods() -- [engine_method_rsa],
+ crypto:engine_load(<<"dynamic">>,
[{<<"SO_PATH">>, Engine},
<<"LOAD">>],
- [])
+ [],
+ Methods
+ )
catch
error:notsup ->
{error, notsup}
diff --git a/lib/ssh/test/ssh_options_SUITE.erl b/lib/ssh/test/ssh_options_SUITE.erl
index daf62483cd..60d0da2a39 100644
--- a/lib/ssh/test/ssh_options_SUITE.erl
+++ b/lib/ssh/test/ssh_options_SUITE.erl
@@ -49,7 +49,7 @@
server_userpassword_option/1,
server_pwdfun_option/1,
server_pwdfun_4_option/1,
- server_pwdfun_4_option_repeat/1,
+ server_keyboard_interactive/1,
ssh_connect_arg4_timeout/1,
ssh_connect_negtimeout_parallel/1,
ssh_connect_negtimeout_sequential/1,
@@ -99,7 +99,7 @@ all() ->
server_userpassword_option,
server_pwdfun_option,
server_pwdfun_4_option,
- server_pwdfun_4_option_repeat,
+ server_keyboard_interactive,
{group, dir_options},
ssh_connect_timeout,
ssh_connect_arg4_timeout,
@@ -381,7 +381,7 @@ server_pwdfun_4_option(Config) ->
%%--------------------------------------------------------------------
-server_pwdfun_4_option_repeat(Config) ->
+server_keyboard_interactive(Config) ->
UserDir = proplists:get_value(user_dir, Config),
SysDir = proplists:get_value(data_dir, Config),
%% Test that the state works
@@ -396,19 +396,28 @@ server_pwdfun_4_option_repeat(Config) ->
{pwdfun,PWDFUN}]),
%% Try with passwords "incorrect", "Bad again" and finally "bar"
- KIFFUN = fun(_,_,_) ->
+ KIFFUN = fun(_Name, _Instr, _PromptInfos) ->
K={k,self()},
- case get(K) of
- undefined ->
- put(K,1),
- ["incorrect"];
- 2 ->
- put(K,3),
- ["bar"];
- S->
- put(K,S+1),
- ["Bad again"]
- end
+ Answer =
+ case get(K) of
+ undefined ->
+ put(K,1),
+ ["incorrect"];
+ 2 ->
+ put(K,3),
+ ["bar"];
+ S->
+ put(K,S+1),
+ ["Bad again"]
+ end,
+ ct:log("keyboard_interact_fun:~n"
+ " Name = ~p~n"
+ " Instruction = ~p~n"
+ " Prompts = ~p~n"
+ "~nAnswer:~n ~p~n",
+ [_Name, _Instr, _PromptInfos, Answer]),
+
+ Answer
end,
ConnectionRef2 =
diff --git a/lib/ssh/test/ssh_test_lib.erl b/lib/ssh/test/ssh_test_lib.erl
index 416cc301db..a1a7eebcde 100644
--- a/lib/ssh/test/ssh_test_lib.erl
+++ b/lib/ssh/test/ssh_test_lib.erl
@@ -408,6 +408,21 @@ ct:log("DataDir ~p:~n ~p~n~nSystDir ~p:~n ~p~n~nUserDir ~p:~n ~p",[DataDir, file
setup_ecdsa_known_host(Size, System, UserDir),
setup_ecdsa_auth_keys(Size, DataDir, UserDir).
+setup_eddsa(Alg, DataDir, UserDir) ->
+ {IdPriv, IdPub, HostPriv, HostPub} =
+ case Alg of
+ ed25519 -> {"id_ed25519", "id_ed25519.pub", "ssh_host_ed25519_key", "ssh_host_ed25519_key.pub"};
+ ed448 -> {"id_ed448", "id_ed448.pub", "ssh_host_ed448_key", "ssh_host_ed448_key.pub"}
+ end,
+ file:copy(filename:join(DataDir, IdPriv), filename:join(UserDir, IdPriv)),
+ System = filename:join(UserDir, "system"),
+ file:make_dir(System),
+ file:copy(filename:join(DataDir, HostPriv), filename:join(System, HostPriv)),
+ file:copy(filename:join(DataDir, HostPub), filename:join(System, HostPub)),
+ct:log("DataDir ~p:~n ~p~n~nSystDir ~p:~n ~p~n~nUserDir ~p:~n ~p",[DataDir, file:list_dir(DataDir), System, file:list_dir(System), UserDir, file:list_dir(UserDir)]),
+ setup_eddsa_known_host(HostPub, DataDir, UserDir),
+ setup_eddsa_auth_keys(IdPriv, DataDir, UserDir).
+
clean_dsa(UserDir) ->
del_dirs(filename:join(UserDir, "system")),
file:delete(filename:join(UserDir,"id_dsa")),
@@ -487,6 +502,11 @@ setup_ecdsa_known_host(_Size, SystemDir, UserDir) ->
[{Key, _}] = public_key:ssh_decode(SshBin, public_key),
setup_known_hosts(Key, UserDir).
+setup_eddsa_known_host(HostPub, SystemDir, UserDir) ->
+ {ok, SshBin} = file:read_file(filename:join(SystemDir, HostPub)),
+ [{Key, _}] = public_key:ssh_decode(SshBin, public_key),
+ setup_known_hosts(Key, UserDir).
+
setup_known_hosts(Key, UserDir) ->
{ok, Hostname} = inet:gethostname(),
{ok, {A, B, C, D}} = inet:getaddr(Hostname, inet),
@@ -529,6 +549,11 @@ setup_ecdsa_auth_keys(Size, Dir, UserDir) ->
PKey = #'ECPoint'{point = Q},
setup_auth_keys([{ {PKey,Param}, [{comment, "Test"}]}], UserDir).
+setup_eddsa_auth_keys(IdPriv, Dir, UserDir) ->
+ {ok, Pem} = file:read_file(filename:join(Dir, IdPriv)),
+ {ed_pri, Alg, Pub, _} = public_key:pem_entry_decode(hd(public_key:pem_decode(Pem))),
+ setup_auth_keys([{{ed_pub,Alg,Pub}, [{comment, "Test"}]}], UserDir).
+
setup_auth_keys(Keys, Dir) ->
AuthKeys = public_key:ssh_encode(Keys, auth_keys),
AuthKeysFile = filename:join(Dir, "authorized_keys"),
diff --git a/lib/ssl/doc/src/notes.xml b/lib/ssl/doc/src/notes.xml
index 673431ed0a..46fd8ab180 100644
--- a/lib/ssl/doc/src/notes.xml
+++ b/lib/ssl/doc/src/notes.xml
@@ -333,6 +333,38 @@
</section>
+<section><title>SSL 8.2.6.4</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Add engine support for RSA key exchange</p>
+ <p>
+ Own Id: OTP-15420</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>SSL 8.2.6.3</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ Extend check for undelivered data at closing, could under
+ some circumstances fail to deliverd all data that was
+ acctualy recivied.</p>
+ <p>
+ Own Id: OTP-15412</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>SSL 8.2.6.2</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -3163,5 +3195,3 @@
</section>
</section>
</chapter>
-
-
diff --git a/lib/ssl/src/ssl_handshake.erl b/lib/ssl/src/ssl_handshake.erl
index dc89fb0029..14df1d2e02 100644
--- a/lib/ssl/src/ssl_handshake.erl
+++ b/lib/ssl/src/ssl_handshake.erl
@@ -925,6 +925,13 @@ premaster_secret(EncSecret, #'RSAPrivateKey'{} = RSAPrivateKey) ->
catch
_:_ ->
throw(?ALERT_REC(?FATAL, ?DECRYPT_ERROR))
+ end;
+premaster_secret(EncSecret, #{algorithm := rsa} = Engine) ->
+ try crypto:private_decrypt(rsa, EncSecret, maps:remove(algorithm, Engine),
+ [{rsa_pad, rsa_pkcs1_padding}])
+ catch
+ _:_ ->
+ throw(?ALERT_REC(?FATAL, ?DECRYPT_ERROR))
end.
%%====================================================================
%% Extensions handling
diff --git a/lib/ssl/src/tls_sender.erl b/lib/ssl/src/tls_sender.erl
index 8d1938cee7..a245ee2465 100644
--- a/lib/ssl/src/tls_sender.erl
+++ b/lib/ssl/src/tls_sender.erl
@@ -319,7 +319,7 @@ handle_info({'DOWN', Monitor, _, _, _}, _,
#data{connection_monitor = Monitor} = StateData) ->
{stop, normal, StateData};
handle_info(_,_,_) ->
- {keep_state_and_data}.
+ keep_state_and_data.
send_tls_alert(Alert, #data{negotiated_version = Version,
socket = Socket,
diff --git a/lib/ssl/test/ssl_bench_test_lib.erl b/lib/ssl/test/ssl_bench_test_lib.erl
index e5cbb911bd..47bcd41608 100644
--- a/lib/ssl/test/ssl_bench_test_lib.erl
+++ b/lib/ssl/test/ssl_bench_test_lib.erl
@@ -58,13 +58,13 @@ setup(Name) ->
Path = code:get_path(),
true = rpc:call(Node, code, set_path, [Path]),
ok = rpc:call(Node, ?MODULE, setup_server, [node()]),
- io:format("Client (~p) using ~s~n",[node(), code:which(ssl)]),
+ io:format("Client (~p) using ~ts~n",[node(), code:which(ssl)]),
(Node =:= node()) andalso restrict_schedulers(client),
Node.
setup_server(ClientNode) ->
(ClientNode =:= node()) andalso restrict_schedulers(server),
- io:format("Server (~p) using ~s~n",[node(), code:which(ssl)]),
+ io:format("Server (~p) using ~ts~n",[node(), code:which(ssl)]),
ok.
restrict_schedulers(Type) ->
diff --git a/lib/ssl/test/ssl_engine_SUITE.erl b/lib/ssl/test/ssl_engine_SUITE.erl
index 1423c99dc2..a39a62e550 100644
--- a/lib/ssl/test/ssl_engine_SUITE.erl
+++ b/lib/ssl/test/ssl_engine_SUITE.erl
@@ -46,10 +46,17 @@ init_per_suite(Config) ->
ssl_test_lib:clean_start(),
case crypto:get_test_engine() of
{ok, EngineName} ->
- try crypto:engine_load(<<"dynamic">>,
- [{<<"SO_PATH">>, EngineName},
- <<"LOAD">>],
- []) of
+ try
+ %% The test engine has it's own fake rsa sign/verify that
+ %% you don't want to use, so exclude it from methods to load:
+ Methods =
+ crypto:engine_get_all_methods() -- [engine_method_rsa],
+ crypto:engine_load(<<"dynamic">>,
+ [{<<"SO_PATH">>, EngineName},
+ <<"LOAD">>],
+ [],
+ Methods)
+ of
{ok, Engine} ->
[{engine, Engine} |Config];
{error, Reason} ->
@@ -90,12 +97,14 @@ end_per_testcase(_TestCase, Config) ->
private_key(Config) when is_list(Config) ->
ClientFileBase = filename:join([proplists:get_value(priv_dir, Config), "client_engine"]),
ServerFileBase = filename:join([proplists:get_value(priv_dir, Config), "server_engine"]),
+ Ext = x509_test:extensions([{key_usage, [digitalSignature, keyEncipherment]}]),
#{server_config := ServerConf,
client_config := ClientConf} = GenCertData =
public_key:pkix_test_data(#{server_chain =>
#{root => [{key, ssl_test_lib:hardcode_rsa_key(1)}],
intermediates => [[{key, ssl_test_lib:hardcode_rsa_key(2)}]],
- peer => [{key, ssl_test_lib:hardcode_rsa_key(3)}
+ peer => [{extensions, Ext},
+ {key, ssl_test_lib:hardcode_rsa_key(3)}
]},
client_chain =>
#{root => [{key, ssl_test_lib:hardcode_rsa_key(4)}],
@@ -131,6 +140,12 @@ private_key(Config) when is_list(Config) ->
%% Test with engine
test_tls_connection(EngineServerConf, EngineClientConf, Config),
+ %% Test with engine and rsa keyexchange
+ RSASuites = all_kex_rsa_suites([{tls_version, 'tlsv1.2'} | Config]),
+
+ test_tls_connection([{ciphers, RSASuites}, {versions, ['tlsv1.2']} | EngineServerConf],
+ [{ciphers, RSASuites}, {versions, ['tlsv1.2']} | EngineClientConf], Config),
+
%% Test with engine and present file arugments
test_tls_connection(EngineFileServerConf, EngineFileClientConf, Config),
@@ -160,3 +175,8 @@ test_tls_connection(ServerConf, ClientConf, Config) ->
ssl_test_lib:check_result(Server, ok, Client, ok),
ssl_test_lib:close(Server),
ssl_test_lib:close(Client).
+
+all_kex_rsa_suites(Config) ->
+ Version = proplists:get_value(tls_version, Config),
+ All = ssl:cipher_suites(all, Version),
+ ssl:filter_cipher_suites(All,[{key_exchange, fun(rsa) -> true;(_) -> false end}]).
diff --git a/lib/stdlib/doc/src/calendar.xml b/lib/stdlib/doc/src/calendar.xml
index 6b4fa7f98a..5aee635c38 100644
--- a/lib/stdlib/doc/src/calendar.xml
+++ b/lib/stdlib/doc/src/calendar.xml
@@ -403,7 +403,11 @@
default is <c>second</c>. If some other unit is given
(<c>millisecond</c>, <c>microsecond</c>, or
<c>nanosecond</c>), the formatted string includes a
- fraction of a second.</p>
+ fraction of a second. The number of fractional second
+ digits is three, six, or nine depending on what time unit
+ is chosen. Notice that trailing zeros are not removed from
+ the fraction.
+ </p>
</item>
</taglist>
<pre>
diff --git a/lib/stdlib/doc/src/notes.xml b/lib/stdlib/doc/src/notes.xml
index d800885b16..039f087708 100644
--- a/lib/stdlib/doc/src/notes.xml
+++ b/lib/stdlib/doc/src/notes.xml
@@ -504,6 +504,21 @@
</section>
+<section><title>STDLIB 3.4.5.1</title>
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>List subtraction (The <c>--</c> operator) will now
+ yield properly on large inputs.</p>
+ <p>
+ Own Id: OTP-15371</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>STDLIB 3.4.5</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -1658,6 +1673,21 @@
</section>
+<section><title>STDLIB 2.8.0.1</title>
+
+ <section><title>Improvements and New Features</title>
+ <list>
+ <item>
+ <p>List subtraction (The <c>--</c> operator) will now
+ yield properly on large inputs.</p>
+ <p>
+ Own Id: OTP-15371</p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
<section><title>STDLIB 2.8</title>
<section><title>Fixed Bugs and Malfunctions</title>
@@ -7827,4 +7857,3 @@
</section>
</section>
</chapter>
-
diff --git a/lib/stdlib/src/calendar.erl b/lib/stdlib/src/calendar.erl
index 9a600c1972..bb5d450cd6 100644
--- a/lib/stdlib/src/calendar.erl
+++ b/lib/stdlib/src/calendar.erl
@@ -693,14 +693,11 @@ local_offset(SystemTime, Unit) ->
UniversalSecs = datetime_to_gregorian_seconds(UniversalTime),
LocalSecs - UniversalSecs.
+fraction_str(1, _Time) ->
+ "";
fraction_str(Factor, Time) ->
- case Time rem Factor of
- 0 ->
- "";
- Fraction ->
- FS = io_lib:fwrite(".~*..0B", [log10(Factor), abs(Fraction)]),
- string:trim(FS, trailing, "0")
- end.
+ Fraction = Time rem Factor,
+ io_lib:fwrite(".~*..0B", [log10(Factor), abs(Fraction)]).
fraction(second, _) ->
0;
diff --git a/lib/stdlib/src/erl_lint.erl b/lib/stdlib/src/erl_lint.erl
index e9ac2fcdff..e0cd68617b 100644
--- a/lib/stdlib/src/erl_lint.erl
+++ b/lib/stdlib/src/erl_lint.erl
@@ -2262,8 +2262,7 @@ expr({'fun',Line,Body}, Vt, St) ->
{[],St};
{function,M,F,A} ->
%% New in R15.
- {Bvt, St1} = expr_list([M,F,A], Vt, St),
- {vtupdate(Bvt, Vt),St1}
+ expr_list([M,F,A], Vt, St)
end;
expr({named_fun,_,'_',Cs}, Vt, St) ->
fun_clauses(Cs, Vt, St);
diff --git a/lib/stdlib/test/calendar_SUITE.erl b/lib/stdlib/test/calendar_SUITE.erl
index 55118e251c..df62c0921d 100644
--- a/lib/stdlib/test/calendar_SUITE.erl
+++ b/lib/stdlib/test/calendar_SUITE.erl
@@ -183,14 +183,15 @@ rfc3339(Config) when is_list(Config) ->
D = [{time_designator, $\s}],
Z = [{offset, "Z"}],
- "1985-04-12T23:20:50.52Z" = test_parse("1985-04-12T23:20:50.52Z", Ms),
- "1985-04-12T23:20:50.52Z" = test_parse("1985-04-12t23:20:50.52z", Ms),
- "1985-04-12T21:20:50.52Z" =
+ "1985-04-12T23:20:50.520Z" = test_parse("1985-04-12T23:20:50.52Z", Ms),
+ "1985-04-12T23:20:50.520Z" = test_parse("1985-04-12t23:20:50.52z", Ms),
+ "1985-04-12T21:20:50.520Z" =
test_parse("1985-04-12T23:20:50.52+02:00", Ms),
"1985-04-12T23:20:50Z" = test_parse("1985-04-12T23:20:50.52Z", S),
- "1985-04-12T23:20:50.52Z" = test_parse("1985-04-12T23:20:50.52Z", Ms),
- "1985-04-12T23:20:50.52Z" = test_parse("1985-04-12t23:20:50.52z", Mys),
- "1985-04-12 21:20:50.52Z" =
+ "1985-04-12T23:20:50.520Z" = test_parse("1985-04-12T23:20:50.52Z", Ms),
+ "1985-04-12T23:20:50.520000Z" =
+ test_parse("1985-04-12t23:20:50.52z", Mys),
+ "1985-04-12 21:20:50.520000000Z" =
test_parse("1985-04-12 23:20:50.52+02:00", Ns++D),
"1985-04-12T23:20:50Z" = test_parse("1985-04-12T23:20:50.52Z"),
"1996-12-20T00:39:57Z" = test_parse("1996-12-19T16:39:57-08:00"),
@@ -221,17 +222,20 @@ rfc3339(Config) when is_list(Config) ->
"1970-01-02T00:00:00Z" = test_parse("1970-01-01T23:59:60Z"),
"1970-01-02T00:00:00Z" = test_parse("1970-01-01T23:59:60.5Z"),
"1970-01-02T00:00:00Z" = test_parse("1970-01-01T23:59:60.55Z"),
- "1970-01-02T00:00:00.55Z" = test_parse("1970-01-01T23:59:60.55Z", Ms),
- "1970-01-02T00:00:00.55Z" = test_parse("1970-01-01T23:59:60.55Z", Mys),
- "1970-01-02T00:00:00.55Z" = test_parse("1970-01-01T23:59:60.55Z", Ns),
+ "1970-01-02T00:00:00.550Z" = test_parse("1970-01-01T23:59:60.55Z", Ms),
+ "1970-01-02T00:00:00.550000Z" =
+ test_parse("1970-01-01T23:59:60.55Z", Mys),
+ "1970-01-02T00:00:00.550000000Z" =
+ test_parse("1970-01-01T23:59:60.55Z", Ns),
"1970-01-02T00:00:00.999999Z" =
test_parse("1970-01-01T23:59:60.999999Z", Mys),
- "1970-01-02T00:00:01Z" =
+ "1970-01-02T00:00:01.000Z" =
test_parse("1970-01-01T23:59:60.999999Z", Ms),
"1970-01-01T00:00:00Z" = test_parse("1970-01-01T00:00:00+00:00"),
"1970-01-01T00:00:00Z" = test_parse("1970-01-01T00:00:00-00:00"),
"1969-12-31T00:01:00Z" = test_parse("1970-01-01T00:00:00+23:59"),
- "1918-11-11T09:00:00Z" = test_parse("1918-11-11T11:00:00+02:00", Mys),
+ "1918-11-11T09:00:00.000000Z" =
+ test_parse("1918-11-11T11:00:00+02:00", Mys),
"1970-01-01T00:00:00.000001Z" =
test_parse("1970-01-01T00:00:00.000001Z", Mys),
@@ -242,26 +246,26 @@ rfc3339(Config) when is_list(Config) ->
test_time(erlang:system_time(millisecond), Ms),
test_time(erlang:system_time(microsecond), Mys++[{offset, "-02:20"}]),
- T = erlang:system_time(second),
- TS = do_format(T, []),
- TS = do_format(T * 1000, Ms),
- TS = do_format(T * 1000 * 1000, Mys),
- TS = do_format(T * 1000 * 1000 * 1000, Ns),
-
946720800 = TO = do_parse("2000-01-01 10:00:00Z", []),
Str = "2000-01-01T10:02:00+00:02",
Str = do_format(TO, [{offset, 120}]),
- Str = do_format(TO * 1000, [{offset, 120 * 1000}]++Ms),
- Str = do_format(TO * 1000 * 1000, [{offset, 120 * 1000 * 1000}]++Mys),
- Str = do_format(TO * 1000 * 1000 * 1000,
- [{offset, 120 * 1000 * 1000 * 1000}]++Ns),
+ "2000-01-01T10:02:00.000+00:02" =
+ do_format(TO * 1000, [{offset, 120 * 1000}]++Ms),
+ "2000-01-01T10:02:00.000000+00:02" =
+ do_format(TO * 1000 * 1000, [{offset, 120 * 1000 * 1000}]++Mys),
+ "2000-01-01T10:02:00.000000000+00:02" =
+ do_format(TO * 1000 * 1000 * 1000,
+ [{offset, 120 * 1000 * 1000 * 1000}]++Ns),
NStr = "2000-01-01T09:58:00-00:02",
NStr = do_format(TO, [{offset, -120}]),
- NStr = do_format(TO * 1000, [{offset, -120 * 1000}]++Ms),
- NStr = do_format(TO * 1000 * 1000, [{offset, -120 * 1000 * 1000}]++Mys),
- NStr = do_format(TO * 1000 * 1000 * 1000,
- [{offset, -120 * 1000 * 1000 * 1000}]++Ns),
+ "2000-01-01T09:58:00.000-00:02" =
+ do_format(TO * 1000, [{offset, -120 * 1000}]++Ms),
+ "2000-01-01T09:58:00.000000-00:02" =
+ do_format(TO * 1000 * 1000, [{offset, -120 * 1000 * 1000}]++Mys),
+ "2000-01-01T09:58:00.000000000-00:02" =
+ do_format(TO * 1000 * 1000 * 1000,
+ [{offset, -120 * 1000 * 1000 * 1000}]++Ns),
543210000 = do_parse("1970-01-01T00:00:00.54321Z", Ns),
54321000 = do_parse("1970-01-01T00:00:00.054321Z", Ns),
@@ -278,18 +282,18 @@ rfc3339(Config) when is_list(Config) ->
-1613833200000000 = do_parse("1918-11-11T11:00:00+02:00", Mys),
-1613833200000000 = do_parse("1918-11-11T09:00:00Z", Mys),
- "1970-01-01T00:00:00Z" = do_format_z(0, Mys),
+ "1970-01-01T00:00:00.000000Z" = do_format_z(0, Mys),
"1970-01-01T00:00:01Z" = do_format_z(1, S),
"1970-01-01T00:00:00.001Z" = do_format_z(1, Ms),
"1970-01-01T00:00:00.000001Z" = do_format_z(1, Mys),
"1970-01-01T00:00:00.000000001Z" = do_format_z(1, Ns),
- "1970-01-01T00:00:01Z" = do_format_z(1000000, Mys),
- "1970-01-01T00:00:00.54321Z" = do_format_z(543210, Mys),
+ "1970-01-01T00:00:01.000000Z" = do_format_z(1000000, Mys),
+ "1970-01-01T00:00:00.543210Z" = do_format_z(543210, Mys),
"1970-01-01T00:00:00.543Z" = do_format_z(543, Ms),
- "1970-01-01T00:00:00.54321Z" = do_format_z(543210000, Ns),
- "1970-01-01T00:00:06.54321Z" = do_format_z(6543210, Mys),
- "1979-06-21T12:12:12Z" = do_format_z(298815132000000, Mys),
- "1918-11-11T13:00:00Z" = do_format_z(-1613818800000000, Mys),
+ "1970-01-01T00:00:00.543210000Z" = do_format_z(543210000, Ns),
+ "1970-01-01T00:00:06.543210Z" = do_format_z(6543210, Mys),
+ "1979-06-21T12:12:12.000000Z" = do_format_z(298815132000000, Mys),
+ "1918-11-11T13:00:00.000000Z" = do_format_z(-1613818800000000, Mys),
ok.
%%
diff --git a/lib/stdlib/test/erl_lint_SUITE.erl b/lib/stdlib/test/erl_lint_SUITE.erl
index f9ab83a120..d18c7c255c 100644
--- a/lib/stdlib/test/erl_lint_SUITE.erl
+++ b/lib/stdlib/test/erl_lint_SUITE.erl
@@ -67,7 +67,8 @@
record_errors/1, otp_11879_cont/1,
non_latin1_module/1, otp_14323/1,
stacktrace_syntax/1,
- otp_14285/1, otp_14378/1]).
+ otp_14285/1, otp_14378/1,
+ external_funs/1]).
suite() ->
[{ct_hooks,[ts_install_cth]},
@@ -88,7 +89,7 @@ all() ->
maps, maps_type, maps_parallel_match,
otp_11851, otp_11879, otp_13230,
record_errors, otp_11879_cont, non_latin1_module, otp_14323,
- stacktrace_syntax, otp_14285, otp_14378].
+ stacktrace_syntax, otp_14285, otp_14378, external_funs].
groups() ->
[{unused_vars_warn, [],
@@ -4134,6 +4135,21 @@ otp_14285(Config) ->
run(Config, Ts),
ok.
+external_funs(Config) when is_list(Config) ->
+ Ts = [{external_funs_1,
+ %% ERL-762: Unused variable warning not being emitted.
+ <<"f() ->
+ BugVar = process_info(self()),
+ if true -> fun m:f/1 end.
+ f(M, F) ->
+ BugVar = process_info(self()),
+ if true -> fun M:F/1 end.">>,
+ [],
+ {warnings,[{2,erl_lint,{unused_var,'BugVar'}},
+ {5,erl_lint,{unused_var,'BugVar'}}]}}],
+ run(Config, Ts),
+ ok.
+
format_error(E) ->
lists:flatten(erl_lint:format_error(E)).
diff --git a/lib/stdlib/test/lists_SUITE.erl b/lib/stdlib/test/lists_SUITE.erl
index 9a94bcc012..984b51e7ae 100644
--- a/lib/stdlib/test/lists_SUITE.erl
+++ b/lib/stdlib/test/lists_SUITE.erl
@@ -2604,6 +2604,13 @@ subtract(Config) when is_list(Config) ->
%% certain thresholds, and we need proper coverage for all corner cases.
[sub_thresholds(N) || N <- lists:seq(0, 32)],
+ %% Trapping, both crashing and otherwise.
+ [sub_trapping(N) || N <- lists:seq(0, 18)],
+
+ %% The current implementation chooses which algorithm to use based on
+ %% certain thresholds, and we need proper coverage for all corner cases.
+ [sub_thresholds(N) || N <- lists:seq(0, 32)],
+
ok.
sub_non_matching(A, B) ->
diff --git a/lib/wx/api_gen/wx_gen_cpp.erl b/lib/wx/api_gen/wx_gen_cpp.erl
index 70a3530526..f13d5873a0 100644
--- a/lib/wx/api_gen/wx_gen_cpp.erl
+++ b/lib/wx/api_gen/wx_gen_cpp.erl
@@ -995,8 +995,13 @@ build_ret_types(Type,Ps) ->
end,
lists:foldl(Calc, Free, Ps).
-build_ret(Name,_,#type{base={class,Class},single=true}) ->
- w(" rt.addRef(getRef((void *)~s,memenv), \"~s\");~n",[Name,Class]);
+build_ret(Name,_D,#type{base={class,Class},single=true}=_T) ->
+ case Class of
+ "wxGraphicsContext" ->
+ w(" rt.addRef(getRef((void *)~s,memenv,8), \"~s\");~n",[Name,Class]);
+ _ ->
+ w(" rt.addRef(getRef((void *)~s,memenv), \"~s\");~n",[Name,Class])
+ end;
build_ret(Name,_,#type{name="wxTreeItemId",single=true}) ->
w(" rt.add((wxUIntPtr *) ~s.m_pItem);~n",[Name]);
build_ret(Name,_,#type{name="wxTreeItemIdValue",single=true}) ->
diff --git a/lib/wx/c_src/gen/wxe_funcs.cpp b/lib/wx/c_src/gen/wxe_funcs.cpp
index a7bac4cf9d..74961b2e5e 100644
--- a/lib/wx/c_src/gen/wxe_funcs.cpp
+++ b/lib/wx/c_src/gen/wxe_funcs.cpp
@@ -6147,18 +6147,18 @@ case wxGraphicsObject_IsNull: { // wxGraphicsObject::IsNull
case wxGraphicsContext_Create_1_1: { // wxGraphicsContext::Create
wxWindowDC * dc = (wxWindowDC *) getPtr(bp,memenv); bp += 4;
wxGraphicsContext * Result = (wxGraphicsContext*)wxGraphicsContext::Create(*dc);
- rt.addRef(getRef((void *)Result,memenv), "wxGraphicsContext");
+ rt.addRef(getRef((void *)Result,memenv,8), "wxGraphicsContext");
break;
}
case wxGraphicsContext_Create_1_0: { // wxGraphicsContext::Create
wxWindow *window = (wxWindow *) getPtr(bp,memenv); bp += 4;
wxGraphicsContext * Result = (wxGraphicsContext*)wxGraphicsContext::Create(window);
- rt.addRef(getRef((void *)Result,memenv), "wxGraphicsContext");
+ rt.addRef(getRef((void *)Result,memenv,8), "wxGraphicsContext");
break;
}
case wxGraphicsContext_Create_0: { // wxGraphicsContext::Create
wxGraphicsContext * Result = (wxGraphicsContext*)wxGraphicsContext::Create();
- rt.addRef(getRef((void *)Result,memenv), "wxGraphicsContext");
+ rt.addRef(getRef((void *)Result,memenv,8), "wxGraphicsContext");
break;
}
case wxGraphicsContext_CreatePen: { // wxGraphicsContext::CreatePen
@@ -6999,7 +6999,7 @@ case wxGraphicsRenderer_CreateContext_1_1: { // wxGraphicsRenderer::CreateContex
wxWindowDC * dc = (wxWindowDC *) getPtr(bp,memenv); bp += 4;
if(!This) throw wxe_badarg(0);
wxGraphicsContext * Result = (wxGraphicsContext*)This->CreateContext(*dc);
- rt.addRef(getRef((void *)Result,memenv), "wxGraphicsContext");
+ rt.addRef(getRef((void *)Result,memenv,8), "wxGraphicsContext");
break;
}
case wxGraphicsRenderer_CreateContext_1_0: { // wxGraphicsRenderer::CreateContext
@@ -7007,7 +7007,7 @@ case wxGraphicsRenderer_CreateContext_1_0: { // wxGraphicsRenderer::CreateContex
wxWindow *window = (wxWindow *) getPtr(bp,memenv); bp += 4;
if(!This) throw wxe_badarg(0);
wxGraphicsContext * Result = (wxGraphicsContext*)This->CreateContext(window);
- rt.addRef(getRef((void *)Result,memenv), "wxGraphicsContext");
+ rt.addRef(getRef((void *)Result,memenv,8), "wxGraphicsContext");
break;
}
diff --git a/lib/wx/c_src/wxe_impl.cpp b/lib/wx/c_src/wxe_impl.cpp
index 1510866f09..f856099ffa 100644
--- a/lib/wx/c_src/wxe_impl.cpp
+++ b/lib/wx/c_src/wxe_impl.cpp
@@ -591,7 +591,7 @@ int WxeApp::newPtr(void * ptr, int type, wxeMemEnv *memenv) {
return ref;
}
-int WxeApp::getRef(void * ptr, wxeMemEnv *memenv) {
+int WxeApp::getRef(void * ptr, wxeMemEnv *memenv, int type) {
if(!ptr) return 0; // NULL and zero is the same
ptrMap::iterator it = ptr2ref.find(ptr);
if(it != ptr2ref.end()) {
@@ -618,7 +618,7 @@ int WxeApp::getRef(void * ptr, wxeMemEnv *memenv) {
}
memenv->ref2ptr[ref] = ptr;
- ptr2ref[ptr] = new wxeRefData(ref, 0, false, memenv);
+ ptr2ref[ptr] = new wxeRefData(ref, type, false, memenv);
return ref;
}
diff --git a/lib/wx/c_src/wxe_impl.h b/lib/wx/c_src/wxe_impl.h
index 140a2bd36a..69cc81c429 100644
--- a/lib/wx/c_src/wxe_impl.h
+++ b/lib/wx/c_src/wxe_impl.h
@@ -84,7 +84,7 @@ public:
wxeMemEnv * getMemEnv(ErlDrvTermData port);
int newPtr(void * ptr, int type, wxeMemEnv *memenv);
- int getRef(void * ptr, wxeMemEnv *memenv);
+ int getRef(void * ptr, wxeMemEnv *memenv, int type = 0);
void * getPtr(char * bp, wxeMemEnv *memenv);
void clearPtr(void *ptr);
wxeRefData * getRefData(void *ptr);