aboutsummaryrefslogtreecommitdiffstats
path: root/lib
diff options
context:
space:
mode:
Diffstat (limited to 'lib')
-rw-r--r--lib/common_test/src/ct_run.erl15
-rw-r--r--lib/crypto/c_src/crypto.c338
-rwxr-xr-xlib/diameter/bin/diameterc6
-rw-r--r--lib/diameter/src/Makefile5
-rw-r--r--lib/diameter/src/app.sed41
-rw-r--r--lib/diameter/src/base/diameter_codec.erl12
-rw-r--r--lib/diameter/src/base/diameter_config.erl14
-rw-r--r--lib/diameter/src/base/diameter_gen.erl19
-rw-r--r--lib/diameter/src/base/diameter_internal.hrl5
-rw-r--r--lib/diameter/src/base/diameter_lib.erl15
-rw-r--r--lib/diameter/src/base/diameter_service.erl20
-rw-r--r--lib/diameter/src/compiler/diameter_make.erl6
-rw-r--r--lib/diameter/src/diameter.app.src17
-rw-r--r--lib/diameter/src/transport/diameter_tcp.erl4
-rw-r--r--lib/diameter/test/diameter_app_SUITE.erl14
-rw-r--r--lib/diameter/test/diameter_traffic_SUITE.erl6
-rw-r--r--lib/diameter/test/diameter_watchdog_SUITE.erl9
-rw-r--r--lib/hipe/doc/src/hipe_app.xml62
-rw-r--r--lib/hipe/rtl/hipe_rtl_lcm.erl9
-rw-r--r--lib/hipe/test/basic_SUITE_data/basic_issues_hipe.erl28
-rw-r--r--lib/kernel/doc/src/application.xml10
-rw-r--r--lib/kernel/doc/src/config.xml9
-rw-r--r--lib/kernel/doc/src/logger_chapter.xml263
-rw-r--r--lib/kernel/doc/src/logger_disk_log_h.xml134
-rw-r--r--lib/kernel/doc/src/logger_std_h.xml104
-rw-r--r--lib/kernel/src/application_controller.erl30
-rw-r--r--lib/kernel/src/application_master.erl4
-rw-r--r--lib/kernel/src/logger_disk_log_h.erl12
-rw-r--r--lib/kernel/src/logger_h_common.erl4
-rw-r--r--lib/kernel/src/logger_h_common.hrl4
-rw-r--r--lib/kernel/src/logger_std_h.erl12
-rw-r--r--lib/kernel/test/application_SUITE.erl41
-rw-r--r--lib/kernel/test/logger_disk_log_h_SUITE.erl65
-rw-r--r--lib/kernel/test/logger_env_var_SUITE.erl2
-rw-r--r--lib/kernel/test/logger_std_h_SUITE.erl63
-rw-r--r--lib/kernel/test/logger_test_lib.erl4
-rw-r--r--lib/public_key/doc/src/public_key.xml57
-rw-r--r--lib/public_key/doc/src/using_public_key.xml4
-rw-r--r--lib/snmp/doc/src/notes.xml18
-rw-r--r--lib/snmp/vsn.mk2
-rw-r--r--lib/ssl/doc/src/ssl.xml142
-rw-r--r--lib/ssl/doc/src/using_ssl.xml6
-rw-r--r--lib/ssl/src/ssl.erl12
-rw-r--r--lib/ssl/test/ssl_test_lib.erl16
-rw-r--r--lib/stdlib/src/gen_statem.erl4
45 files changed, 899 insertions, 768 deletions
diff --git a/lib/common_test/src/ct_run.erl b/lib/common_test/src/ct_run.erl
index 223aa06e64..c9d406f1fd 100644
--- a/lib/common_test/src/ct_run.erl
+++ b/lib/common_test/src/ct_run.erl
@@ -1909,7 +1909,8 @@ auto_compile(TestSuites) ->
SuiteMakeErrors =
lists:flatmap(fun({TestDir,Suite} = TS) ->
case run_make(suites, TestDir,
- Suite, UserInclude) of
+ Suite, UserInclude,
+ [nowarn_export_all]) of
{error,{make_failed,Bad}} ->
[{TS,Bad}];
{error,_} ->
@@ -1927,7 +1928,7 @@ auto_compile(TestSuites) ->
case lists:member(Dir, Done) of
false ->
Failed1 =
- case run_make(helpmods, Dir, Suite, UserInclude) of
+ case run_make(helpmods, Dir, Suite, UserInclude, []) of
{error,{make_failed,BadMods}} ->
[{{Dir,all},BadMods}|Failed];
{error,_} ->
@@ -2669,12 +2670,12 @@ get_name(Dir) ->
run_make(TestDir, Mod, UserInclude) ->
- run_make(suites, TestDir, Mod, UserInclude).
+ run_make(suites, TestDir, Mod, UserInclude, [nowarn_export_all]).
-run_make(Targets, TestDir0, Mod, UserInclude) when is_list(Mod) ->
- run_make(Targets, TestDir0, list_to_atom(Mod), UserInclude);
+run_make(Targets, TestDir0, Mod, UserInclude, COpts) when is_list(Mod) ->
+ run_make(Targets, TestDir0, list_to_atom(Mod), UserInclude, COpts);
-run_make(Targets, TestDir0, Mod, UserInclude) ->
+run_make(Targets, TestDir0, Mod, UserInclude, COpts) ->
case locate_test_dir(TestDir0, Mod) of
{ok,TestDir} ->
%% send a start_make notification which may suspend
@@ -2689,7 +2690,7 @@ run_make(Targets, TestDir0, Mod, UserInclude) ->
XmerlInclude = get_dir(xmerl, "include"),
ErlFlags = UserInclude ++ [{i,CtInclude},
{i,XmerlInclude},
- debug_info],
+ debug_info] ++ COpts,
Result =
if Mod == all ; Targets == helpmods ->
case (catch ct_make:all([noexec|ErlFlags])) of
diff --git a/lib/crypto/c_src/crypto.c b/lib/crypto/c_src/crypto.c
index e4eb2bc3b6..0a2a235209 100644
--- a/lib/crypto/c_src/crypto.c
+++ b/lib/crypto/c_src/crypto.c
@@ -3068,202 +3068,187 @@ static ERL_NIF_TERM rsa_generate_key_nif(ErlNifEnv* env, int argc, const ERL_NIF
static ERL_NIF_TERM dh_generate_key_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (PrivKey|undefined, DHParams=[P,G], Mpint, Len|0) */
- DH* dh_params = NULL;
- int pub_len, prv_len;
- unsigned char *pub_ptr, *prv_ptr;
- ERL_NIF_TERM ret, ret_pub, ret_prv, head, tail;
+ DH *dh_params = NULL;
int mpint; /* 0 or 4 */
- BIGNUM *priv_key_in = NULL;
- BIGNUM *dh_p = NULL, *dh_g = NULL;
- unsigned long len = 0;
-#ifdef HAS_EVP_PKEY_CTX
- EVP_PKEY_CTX *ctx = NULL;
- EVP_PKEY *dhkey = NULL,
- *params = NULL;
-#endif
- if (!(get_bn_from_bin(env, argv[0], &priv_key_in)
- || argv[0] == atom_undefined)
- || !enif_get_list_cell(env, argv[1], &head, &tail)
- || !get_bn_from_bin(env, head, &dh_p)
- || !enif_get_list_cell(env, tail, &head, &tail)
- || !get_bn_from_bin(env, head, &dh_g)
- || !enif_is_empty_list(env, tail)
- || !enif_get_int(env, argv[2], &mpint) || (mpint & ~4)
- || !enif_get_ulong(env, argv[3], &len)
-
- /* Load dh_params with values to use by the generator.
- Mem mgmnt transfered from dh_p etc to dh_params */
- || !(dh_params = DH_new())
- || (priv_key_in && !DH_set0_key(dh_params, NULL, priv_key_in))
- || !DH_set0_pqg(dh_params, dh_p, NULL, dh_g)
- ) {
- if (priv_key_in) BN_free(priv_key_in);
- if (dh_p) BN_free(dh_p);
- if (dh_g) BN_free(dh_g);
- if (dh_params) DH_free(dh_params);
- return enif_make_badarg(env);
- }
-
- if (len) {
- if (len < BN_num_bits(dh_p))
- DH_set_length(dh_params, len);
- else {
+ {
+ ERL_NIF_TERM head, tail;
+ BIGNUM
+ *dh_p = NULL,
+ *dh_g = NULL,
+ *priv_key_in = NULL;
+ unsigned long
+ len = 0;
+
+ if (!(get_bn_from_bin(env, argv[0], &priv_key_in)
+ || argv[0] == atom_undefined)
+ || !enif_get_list_cell(env, argv[1], &head, &tail)
+ || !get_bn_from_bin(env, head, &dh_p)
+ || !enif_get_list_cell(env, tail, &head, &tail)
+ || !get_bn_from_bin(env, head, &dh_g)
+ || !enif_is_empty_list(env, tail)
+ || !enif_get_int(env, argv[2], &mpint) || (mpint & ~4)
+ || !enif_get_ulong(env, argv[3], &len)
+
+ /* Load dh_params with values to use by the generator.
+ Mem mgmnt transfered from dh_p etc to dh_params */
+ || !(dh_params = DH_new())
+ || (priv_key_in && !DH_set0_key(dh_params, NULL, priv_key_in))
+ || !DH_set0_pqg(dh_params, dh_p, NULL, dh_g)
+ ) {
if (priv_key_in) BN_free(priv_key_in);
if (dh_p) BN_free(dh_p);
if (dh_g) BN_free(dh_g);
if (dh_params) DH_free(dh_params);
return enif_make_badarg(env);
}
+
+ if (len) {
+ if (len < BN_num_bits(dh_p))
+ DH_set_length(dh_params, len);
+ else {
+ if (priv_key_in) BN_free(priv_key_in);
+ if (dh_p) BN_free(dh_p);
+ if (dh_g) BN_free(dh_g);
+ if (dh_params) DH_free(dh_params);
+ return enif_make_badarg(env);
+ }
+ }
}
#ifdef HAS_EVP_PKEY_CTX
- if ((dhkey = EVP_PKEY_new())
- && (params = EVP_PKEY_new())
- && EVP_PKEY_set1_DH(params, dh_params) /* set the key referenced by params to dh_params.
- dh_params (and params) must be freed */
- && (ctx = EVP_PKEY_CTX_new(params, NULL))
- && EVP_PKEY_keygen_init(ctx)
- && EVP_PKEY_keygen(ctx, &dhkey) /* "performs a key generation operation, the
- generated key is written to ppkey." (=last arg) */
- && (dh_params = EVP_PKEY_get1_DH(dhkey)) /* return the referenced key. dh_params and dhkey must be freed */
- ) {
+ {
+ EVP_PKEY_CTX *ctx;
+ EVP_PKEY *dhkey, *params;
+ int success;
+
+ params = EVP_PKEY_new();
+ success = EVP_PKEY_set1_DH(params, dh_params); /* set the key referenced by params to dh_params... */
+ DH_free(dh_params); /* ...dh_params (and params) must be freed */
+ if (!success) return atom_error;
+
+ ctx = EVP_PKEY_CTX_new(params, NULL);
+ EVP_PKEY_free(params);
+ if (!ctx) {
+ return atom_error;
+ }
+
+ if (!EVP_PKEY_keygen_init(ctx)) {
+ /* EVP_PKEY_CTX_free(ctx); */
+ return atom_error;
+ }
+
+ dhkey = EVP_PKEY_new();
+ if (!EVP_PKEY_keygen(ctx, &dhkey)) { /* "performs a key generation operation, the ... */
+ /*... generated key is written to ppkey." (=last arg) */
+ /* EVP_PKEY_CTX_free(ctx); */
+ /* EVP_PKEY_free(dhkey); */
+ return atom_error;
+ }
+
+ dh_params = EVP_PKEY_get1_DH(dhkey); /* return the referenced key. dh_params and dhkey must be freed */
+ EVP_PKEY_free(dhkey);
+ if (!dh_params) {
+ /* EVP_PKEY_CTX_free(ctx); */
+ return atom_error;
+ }
+ EVP_PKEY_CTX_free(ctx);
+ }
#else
- if (DH_generate_key(dh_params)) {
-#endif
+ if (!DH_generate_key(dh_params)) return atom_error;
+#endif
+ {
+ unsigned char *pub_ptr, *prv_ptr;
+ int pub_len, prv_len;
+ ERL_NIF_TERM ret_pub, ret_prv;
const BIGNUM *pub_key_gen, *priv_key_gen;
-
- DH_get0_key(dh_params,
- &pub_key_gen, &priv_key_gen); /* Get pub_key_gen and priv_key_gen.
- "The values point to the internal representation of
- the public key and private key values. This memory
+
+ DH_get0_key(dh_params,
+ &pub_key_gen, &priv_key_gen); /* Get pub_key_gen and priv_key_gen.
+ "The values point to the internal representation of
+ the public key and private key values. This memory
should not be freed directly." says man */
- pub_len = BN_num_bytes(pub_key_gen);
- prv_len = BN_num_bytes(priv_key_gen);
- pub_ptr = enif_make_new_binary(env, pub_len+mpint, &ret_pub);
- prv_ptr = enif_make_new_binary(env, prv_len+mpint, &ret_prv);
- if (mpint) {
- put_int32(pub_ptr, pub_len); pub_ptr += 4;
- put_int32(prv_ptr, prv_len); prv_ptr += 4;
- }
- BN_bn2bin(pub_key_gen, pub_ptr);
- BN_bn2bin(priv_key_gen, prv_ptr);
- ERL_VALGRIND_MAKE_MEM_DEFINED(pub_ptr, pub_len);
- ERL_VALGRIND_MAKE_MEM_DEFINED(prv_ptr, prv_len);
- ret = enif_make_tuple2(env, ret_pub, ret_prv);
- }
- else {
- ret = atom_error;
- }
+ pub_len = BN_num_bytes(pub_key_gen);
+ prv_len = BN_num_bytes(priv_key_gen);
+ pub_ptr = enif_make_new_binary(env, pub_len+mpint, &ret_pub);
+ prv_ptr = enif_make_new_binary(env, prv_len+mpint, &ret_prv);
+ if (mpint) {
+ put_int32(pub_ptr, pub_len); pub_ptr += 4;
+ put_int32(prv_ptr, prv_len); prv_ptr += 4;
+ }
+ BN_bn2bin(pub_key_gen, pub_ptr);
+ BN_bn2bin(priv_key_gen, prv_ptr);
+ ERL_VALGRIND_MAKE_MEM_DEFINED(pub_ptr, pub_len);
+ ERL_VALGRIND_MAKE_MEM_DEFINED(prv_ptr, prv_len);
- DH_free(dh_params);
-#ifdef HAS_EVP_PKEY_CTX
- if (ctx) EVP_PKEY_CTX_free(ctx);
- if (dhkey) EVP_PKEY_free(dhkey);
- if (params) EVP_PKEY_free(params);
-#endif
- return ret;
+ DH_free(dh_params);
+
+ return enif_make_tuple2(env, ret_pub, ret_prv);
+ }
}
static ERL_NIF_TERM dh_compute_key_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (OthersPublicKey, MyPrivateKey, DHParams=[P,G]) */
- BIGNUM *dummy_pub_key = NULL,
- *priv_key = NULL,
- *other_pub_key = NULL,
+ BIGNUM *other_pub_key = NULL,
*dh_p = NULL,
*dh_g = NULL;
- ErlNifBinary ret_bin;
- ERL_NIF_TERM ret, head, tail;
- DH *dh_priv = DH_new(), *dh_pub = DH_new();
-#ifdef HAS_EVP_PKEY_CTX
- EVP_PKEY_CTX *ctx = NULL;
- EVP_PKEY *my_priv_key = NULL, *peer_pub_key = NULL;
- size_t skeylen;
-#else
- int i;
-#endif
+ DH *dh_priv = DH_new();
- if (!get_bn_from_bin(env, argv[0], &other_pub_key)
- || !get_bn_from_bin(env, argv[1], &priv_key)
- || !enif_get_list_cell(env, argv[2], &head, &tail)
- || !get_bn_from_bin(env, head, &dh_p)
- || !enif_get_list_cell(env, tail, &head, &tail)
- || !get_bn_from_bin(env, head, &dh_g)
- || !enif_is_empty_list(env, tail)
-
- /* Note: DH_set0_key() does not allow setting only the
- * private key, although DH_compute_key() does not use the
- * public key. Work around this limitation by setting
- * the public key to a copy of the private key.
- */
- || !(dummy_pub_key = BN_dup(priv_key))
- || !DH_set0_key(dh_priv, dummy_pub_key, priv_key)
- || !DH_set0_pqg(dh_priv, dh_p, NULL, dh_g)
- ) {
- if (dh_p) BN_free(dh_p);
- if (dh_g) BN_free(dh_g);
- if (other_pub_key) BN_free(other_pub_key);
- if (dummy_pub_key) BN_free(dummy_pub_key);
- if (priv_key) BN_free(priv_key);
- return enif_make_badarg(env);
- }
-
-#ifdef HAS_EVP_PKEY_CTX
- if (!(my_priv_key = EVP_PKEY_new())
- || !EVP_PKEY_set1_DH(my_priv_key, dh_priv) /* set the key referenced by my_priv_key to dh_priv.
- dh_priv (and my_priv_key) must be freed by us*/
-
- || !(peer_pub_key = EVP_PKEY_new())
- || !DH_set0_key(dh_pub, other_pub_key, NULL)
- || !DH_set0_pqg(dh_pub, dh_p, NULL, dh_g)
- || !EVP_PKEY_set1_DH(peer_pub_key, dh_pub)
-
- || !(ctx = EVP_PKEY_CTX_new(my_priv_key, NULL))
- || (EVP_PKEY_derive_init(ctx) <= 0)
- || (EVP_PKEY_derive_set_peer(ctx, peer_pub_key) <= 0)
- || (EVP_PKEY_derive(ctx, NULL, &skeylen) <= 0)) {
+ /* Check the arguments and get
+ my private key (dh_priv),
+ the peer's public key (other_pub_key),
+ the parameters p & q
+ */
- ret = atom_error;
- }
- else {
- enif_alloc_binary(skeylen, &ret_bin);
-
- if ((EVP_PKEY_derive(ctx, ret_bin.data, &skeylen) > 0)
- && (ret_bin.size >= skeylen)) {
- /* Derivation succeded */
- if (ret_bin.size > skeylen) enif_realloc_binary(&ret_bin, skeylen);
- ret = enif_make_binary(env, &ret_bin);
+ {
+ BIGNUM *dummy_pub_key = NULL,
+ *priv_key = NULL;
+ ERL_NIF_TERM head, tail;
+
+ if (!get_bn_from_bin(env, argv[0], &other_pub_key)
+ || !get_bn_from_bin(env, argv[1], &priv_key)
+ || !enif_get_list_cell(env, argv[2], &head, &tail)
+ || !get_bn_from_bin(env, head, &dh_p)
+ || !enif_get_list_cell(env, tail, &head, &tail)
+ || !get_bn_from_bin(env, head, &dh_g)
+ || !enif_is_empty_list(env, tail)
+
+ /* Note: DH_set0_key() does not allow setting only the
+ * private key, although DH_compute_key() does not use the
+ * public key. Work around this limitation by setting
+ * the public key to a copy of the private key.
+ */
+ || !(dummy_pub_key = BN_dup(priv_key))
+ || !DH_set0_key(dh_priv, dummy_pub_key, priv_key)
+ || !DH_set0_pqg(dh_priv, dh_p, NULL, dh_g)
+ ) {
+ if (dh_p) BN_free(dh_p);
+ if (dh_g) BN_free(dh_g);
+ if (other_pub_key) BN_free(other_pub_key);
+ if (dummy_pub_key) BN_free(dummy_pub_key);
+ if (priv_key) BN_free(priv_key);
+ return enif_make_badarg(env);
}
- else {
+ }
+ {
+ ErlNifBinary ret_bin;
+ int size;
+
+ enif_alloc_binary(DH_size(dh_priv), &ret_bin);
+ size = DH_compute_key(ret_bin.data, other_pub_key, dh_priv);
+ BN_free(other_pub_key);
+ DH_free(dh_priv);
+ if (size<=0) {
enif_release_binary(&ret_bin);
- ret = atom_error;
+ return atom_error;
}
- }
-#else
- enif_alloc_binary(DH_size(dh_priv), &ret_bin);
- i = DH_compute_key(ret_bin.data, other_pub_key, dh_priv);
- if (i > 0) {
- if (i != ret_bin.size) enif_realloc_binary(&ret_bin, i);
- ret = enif_make_binary(env, &ret_bin);
+ if (size != ret_bin.size) enif_realloc_binary(&ret_bin, size);
+ return enif_make_binary(env, &ret_bin);
}
- else {
- enif_release_binary(&ret_bin);
- ret = atom_error;
- }
-#endif
-
- if (other_pub_key) BN_free(other_pub_key);
- if (dh_priv) DH_free(dh_priv);
- if (dh_pub) DH_free(dh_pub);
-#ifdef HAS_EVP_PKEY_CTX
- if (ctx) EVP_PKEY_CTX_free(ctx);
- if (my_priv_key) EVP_PKEY_free(my_priv_key);
- /* if (peer_pub_key) EVP_PKEY_free(peer_pub_key); */
-#endif
- return ret;
}
+
static ERL_NIF_TERM srp_value_B_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (Multiplier, Verifier, Generator, Exponent, Prime) */
BIGNUM *bn_verifier = NULL;
@@ -3919,13 +3904,6 @@ static ERL_NIF_TERM evp_compute_key_nif(ErlNifEnv* env, int argc, const ERL_NIF_
}
return enif_make_binary(env, &key_bin);
-
-/* Importing the other side's public key from raw binary format can be done with the EVP_PKEY_new_raw_public_key() function. Man page here: */
-/* https://www.openssl.org/docs/man1.1.1/man3/EVP_PKEY_new_raw_public_key.html */
-
-
-/* You need two EVP_PKEY objects. One containing your private/public key pair (i.e. the one you generated in the EVP_PKEY_keygen() call in your question), and one containing the public key of the peer (e.g. created using EVP_PKEY_new_raw_public_key()). To generate the X25519 shared secret you then call EVP_PKEY_derive(). See the example on the man page: openssl.org/docs/man1.1.1/man3/EVP_PKEY_derive.html – Matt Caswell May 15 at 20:39 */
-
#else
return atom_notsup;
#endif
@@ -3951,20 +3929,6 @@ static ERL_NIF_TERM evp_generate_key_nif(ErlNifEnv* env, int argc, const ERL_NIF
if (!EVP_PKEY_keygen_init(ctx)) return enif_make_atom(env,"EVP_PKEY_keygen_init failed");
if (!EVP_PKEY_keygen(ctx, &pkey)) return enif_make_atom(env,"EVP_PKEY_keygen failed");
- /*
- int EVP_PKEY_get_raw_private_key(const EVP_PKEY *pkey, unsigned char *priv, size_t *len)
- int EVP_PKEY_get_raw_public_key(const EVP_PKEY *pkey, unsigned char *pub, size_t *len)
-
- +EVP_PKEY_get_raw_private_key() fills the buffer provided by B<priv> with raw
- +private key data. The number of bytes written is populated in B<*len>. If the
- +buffer B<priv> is NULL then B<*len> is populated with the number of bytes
- +required in the buffer. The calling application is responsible for ensuring that
- +the buffer is large enough to receive the private key data. This function only
- +works for algorithms that support raw private keys. Currently this is:
- +B<EVP_PKEY_HMAC>, B<EVP_PKEY_POLY1305>, B<EVP_PKEY_SIPHASH>, B<EVP_PKEY_X25519>,
- +B<EVP_PKEY_ED25519>, B<EVP_PKEY_X448> or B<EVP_PKEY_ED448>.
- */
-
if (!EVP_PKEY_get_raw_public_key(pkey, NULL, &key_len))
return enif_make_atom(env,"EVP_PKEY_get_raw_public_key 1 failed");
if (!EVP_PKEY_get_raw_public_key(pkey,
diff --git a/lib/diameter/bin/diameterc b/lib/diameter/bin/diameterc
index 3dbd238c19..4d415ece78 100755
--- a/lib/diameter/bin/diameterc
+++ b/lib/diameter/bin/diameterc
@@ -4,7 +4,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -78,8 +78,8 @@ compile(#argv{file = File, options = Opts, output = Out}) ->
error_msg(diameter_make:format_error(Reason), []),
1
catch
- error: Reason ->
- error_msg("ERROR: ~p~n ~p", [Reason, erlang:get_stacktrace()]),
+ error: Reason: Stack ->
+ error_msg("ERROR: ~p~n ~p", [Reason, Stack]),
2
end.
diff --git a/lib/diameter/src/Makefile b/lib/diameter/src/Makefile
index 3af856f63e..f78a163591 100644
--- a/lib/diameter/src/Makefile
+++ b/lib/diameter/src/Makefile
@@ -149,7 +149,7 @@ gen/$(DICT_YRL).erl: compiler/$(DICT_YRL).yrl
$(ERLC) -Werror -o $(@D) $<
# Generate the app file.
-$(APP_TARGET): $(APP_SRC) ../vsn.mk modules.mk app.sed
+$(APP_TARGET): $(APP_SRC) ../vsn.mk modules.mk
$(gen_verbose) \
M=`echo $(notdir $(APP_MODULES)) | tr ' ' ,`; \
C=`echo $(COMPILER_MODULES) | tr ' ' ,`; \
@@ -160,8 +160,7 @@ $(APP_TARGET): $(APP_SRC) ../vsn.mk modules.mk app.sed
-e "s;%COMPILER%;$$C;" \
-e "s;%INFO%;$$I;" \
-e "s;%REGISTERED%;$$R;" \
- $< \
- | sed -f app.sed > $@
+ $< > $@
$(APPUP_TARGET): $(APPUP_SRC) ../vsn.mk
$(vsn_verbose) \
diff --git a/lib/diameter/src/app.sed b/lib/diameter/src/app.sed
deleted file mode 100644
index dd3806f5f1..0000000000
--- a/lib/diameter/src/app.sed
+++ /dev/null
@@ -1,41 +0,0 @@
-#
-# %CopyrightBegin%
-#
-# Copyright Ericsson AB 2014-2016. All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-#
-# %CopyrightEnd%
-
-#
-# Generate runtime_dependencies from applications to avoid having to
-# specify the same application more than once.
-#
-
-/{runtime_dependencies,/b v
-/{[-a-z]*, "[0-9.]*"}/!b
-/{vsn,/b
-
-/%%/!H
-s/{\([^,]*\)[^}]*}/\1/g
-s/%%/%,/
-b
-
-:v
-
-p
-x
-s/\n//
-s/%//g
-s/\n */ /g
-s/{\([^,]*\), "\([^"]*"\)}/"\1-\2/g
diff --git a/lib/diameter/src/base/diameter_codec.erl b/lib/diameter/src/base/diameter_codec.erl
index 2dd2c906a2..5c0a24cdf6 100644
--- a/lib/diameter/src/base/diameter_codec.erl
+++ b/lib/diameter/src/base/diameter_codec.erl
@@ -92,8 +92,8 @@ encode(Mod, Opts, #diameter_packet{} = Pkt) ->
%% count encode errors.
?LOG(encode_error, {Reason, Stack, H}),
exit({?MODULE, encode, T});
- error: Reason ->
- T = {Reason, diameter_lib:get_stacktrace()},
+ error: Reason: Stack ->
+ T = {Reason, diameter_lib:stacktrace(Stack)},
?LOG(encode_error, T),
exit({?MODULE, encode, T})
end;
@@ -134,8 +134,8 @@ enc(_, Opts, #diameter_packet{msg = [#diameter_header{} = Hdr | As]}
Eid:32,
Bin/binary>>}
catch
- error: Reason ->
- exit({Reason, diameter_lib:get_stacktrace(), Hdr})
+ error: Reason: Stack ->
+ exit({Reason, diameter_lib:stacktrace(Stack), Hdr})
end;
enc(Mod, Opts, #diameter_packet{header = Hdr0, msg = Msg} = Pkt) ->
@@ -179,14 +179,14 @@ enc(Mod, Opts, #diameter_packet{header = Hdr0, msg = Msg} = Pkt) ->
Eid:32,
Bin/binary>>}
catch
- error: Reason ->
+ error: Reason: Stack ->
Hdr = Hdr0#diameter_header{cmd_code = Code,
application_id = Aid,
is_request = RB,
is_proxiable = PB,
is_error = EB,
is_retransmitted = TB},
- exit({Reason, diameter_lib:get_stacktrace(), Hdr})
+ exit({Reason, diameter_lib:stacktrace(Stack), Hdr})
end.
%% values/1
diff --git a/lib/diameter/src/base/diameter_config.erl b/lib/diameter/src/base/diameter_config.erl
index 90a9282349..36ae4c2276 100644
--- a/lib/diameter/src/base/diameter_config.erl
+++ b/lib/diameter/src/base/diameter_config.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2017. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -633,8 +633,8 @@ opt(service, {K, F})
Nodes ->
is_list(Nodes) orelse {error, Nodes}
catch
- E:R ->
- {error, {E, R, ?STACK}}
+ E:R:Stack ->
+ {error, {E, R, Stack}}
end;
opt(service, {sequence, {H,N}}) ->
@@ -651,8 +651,8 @@ opt(service = S, {sequence = K, F}) ->
V ->
{error, V}
catch
- E:R ->
- {error, {E, R, ?STACK}}
+ E:R:Stack ->
+ {error, {E, R, Stack}}
end;
opt(transport, {transport_module, M}) ->
@@ -932,8 +932,8 @@ cb(M,F) ->
try M:F() of
V -> V
catch
- E: Reason ->
- ?THROW({callback, E, Reason, ?STACK})
+ E: Reason: Stack ->
+ ?THROW({callback, E, Reason, Stack})
end.
%% call/1
diff --git a/lib/diameter/src/base/diameter_gen.erl b/lib/diameter/src/base/diameter_gen.erl
index 93ebe57685..d110a3015e 100644
--- a/lib/diameter/src/base/diameter_gen.erl
+++ b/lib/diameter/src/base/diameter_gen.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2017. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -76,8 +76,7 @@ encode_avps(Name, Vals, #{module := Mod} = Opts) ->
?LINE,
{Reason, Name, Vals, Mod}),
erlang:error(list_to_tuple(Reason ++ [Name]));
- error: Reason ->
- Stack = erlang:get_stacktrace(),
+ error: Reason: Stack ->
diameter_lib:log({encode, failure},
?MODULE,
?LINE,
@@ -555,8 +554,8 @@ dec(Data, Name, AvpName, Type, Mod, Dict, Fmt, Failed, Opts, Avp) ->
catch
throw: {?MODULE, T} ->
decode_error(Failed, Fmt, T, Avp);
- error: Reason ->
- decode_error(Failed, Reason, Name, Mod, Opts, Avp)
+ error: Reason: Stack ->
+ decode_error(Failed, Reason, Stack, Name, Mod, Opts, Avp)
end.
%% dec_AVP/7
@@ -623,19 +622,19 @@ set(none, Avp, _Name) ->
set(_, Avp, Rec) ->
Avp#diameter_avp{value = Rec}.
-%% decode_error/6
+%% decode_error/7
%%
%% Error when decoding a non-grouped AVP.
-decode_error(true, _, _, _, _, Avp) ->
+decode_error(true, _, _, _, _, _, Avp) ->
Avp;
-decode_error(false, Reason, Name, Mod, Opts, Avp) ->
- Stack = diameter_lib:get_stacktrace(),
+decode_error(false, Reason, Stack, Name, Mod, Opts, Avp) ->
+ Z = diameter_lib:stacktrace(Stack),
diameter_lib:log(decode_error,
?MODULE,
?LINE,
- {Reason, Name, Avp#diameter_avp.name, Mod, Stack}),
+ {Reason, Name, Avp#diameter_avp.name, Mod, Z}),
case Reason of
{'DIAMETER', 5014 = RC, _} ->
%% Length error communicated from diameter_types or a
diff --git a/lib/diameter/src/base/diameter_internal.hrl b/lib/diameter/src/base/diameter_internal.hrl
index a0f4a8567d..4a678643c2 100644
--- a/lib/diameter/src/base/diameter_internal.hrl
+++ b/lib/diameter/src/base/diameter_internal.hrl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -35,9 +35,6 @@
%% A corresponding error when failure is the best option.
-define(ERROR(T), erlang:error({T, ?MODULE, ?LINE})).
-%% Failure reports always get a stack trace.
--define(STACK, erlang:get_stacktrace()).
-
%% Warning report for unexpected messages in various processes.
-define(UNEXPECTED(F,A),
diameter_lib:warning_report(unexpected, {?MODULE, F, A})).
diff --git a/lib/diameter/src/base/diameter_lib.erl b/lib/diameter/src/base/diameter_lib.erl
index 1c1ea42cb5..edd9d5a4ce 100644
--- a/lib/diameter/src/base/diameter_lib.erl
+++ b/lib/diameter/src/base/diameter_lib.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2017. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -33,7 +33,7 @@
time/1,
eval/1,
eval_name/1,
- get_stacktrace/0,
+ stacktrace/1,
ipaddr/1,
spawn_opts/2,
wait/1,
@@ -42,16 +42,13 @@
log/4]).
%% ---------------------------------------------------------------------------
-%% # get_stacktrace/0
+%% # stacktrace/1
%% ---------------------------------------------------------------------------
%% Return a stacktrace with a leading, potentially large, argument
-%% list replaced by an arity. Trace on stacktrace/0 to see the
+%% list replaced by an arity. Trace on stacktrace/1 to see the
%% original.
-get_stacktrace() ->
- stacktrace(erlang:get_stacktrace()).
-
stacktrace([{M,F,A,L} | T]) when is_list(A) ->
[{M, F, length(A), L} | T];
stacktrace(L) ->
@@ -268,8 +265,8 @@ ipaddr(Addr) ->
try
ip(Addr)
catch
- error: _ ->
- erlang:error({invalid_address, erlang:get_stacktrace()})
+ error: _: Stack ->
+ erlang:error({invalid_address, Stack})
end.
%% Already a tuple: ensure non-negative integers of the right size.
diff --git a/lib/diameter/src/base/diameter_service.erl b/lib/diameter/src/base/diameter_service.erl
index cbe66ef27a..77d184cfc7 100644
--- a/lib/diameter/src/base/diameter_service.erl
+++ b/lib/diameter/src/base/diameter_service.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2017. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -808,8 +808,8 @@ remotes(F) ->
error_report(invalid_return, share_peers, F),
[]
catch
- E:R ->
- ?LOG(failure, {E, R, F, diameter_lib:get_stacktrace()}),
+ E:R:S ->
+ ?LOG(failure, {E, R, F, diameter_lib:stacktrace(S)}),
error_report(failure, share_peers, F),
[]
end.
@@ -1146,11 +1146,11 @@ peer_cb(App, F, A) ->
mod_state(App#diameter_app.alias, ModS),
true
catch
- E:R ->
+ E:R:S ->
%% Don't include arguments since a #diameter_caps{} strings
%% from the peer, which could be anything (especially, large).
[Mod|X] = App#diameter_app.module,
- ?LOG(failure, {E, R, Mod, F, diameter_lib:get_stacktrace()}),
+ ?LOG(failure, {E, R, Mod, F, diameter_lib:stacktrace(S)}),
error_report(failure, F, {Mod, F, A ++ X}),
false
end.
@@ -1376,9 +1376,9 @@ cm([#diameter_app{alias = Alias} = App], Req, From, Svc) ->
?LOG(invalid_return, {ModX, handle_call, Args, T}),
invalid
catch
- E: Reason ->
+ E: Reason: S ->
ModX = App#diameter_app.module,
- Stack = diameter_lib:get_stacktrace(),
+ Stack = diameter_lib:stacktrace(S),
?LOG(failure, {E, Reason, ModX, handle_call, Stack}),
failure
end;
@@ -1585,10 +1585,10 @@ pick_peer(Local,
?LOG(invalid_return, {ModX, pick_peer, T}),
false
catch
- E: Reason when M ->
+ E: Reason: Stack when M ->
ModX = App#diameter_app.module,
- Stack = diameter_lib:get_stacktrace(),
- ?LOG(failure, {E, Reason, ModX, pick_peer, Stack}),
+ Z = diameter_lib:stacktrace(Stack),
+ ?LOG(failure, {E, Reason, ModX, pick_peer, Z}),
false
end.
diff --git a/lib/diameter/src/compiler/diameter_make.erl b/lib/diameter/src/compiler/diameter_make.erl
index eae40dbafd..03cfc03edc 100644
--- a/lib/diameter/src/compiler/diameter_make.erl
+++ b/lib/diameter/src/compiler/diameter_make.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -271,6 +271,6 @@ make(File, Opts, Dict, Mode) ->
try
diameter_codegen:from_dict(File, Dict, Opts, Mode)
catch
- error: Reason ->
- erlang:error({Reason, Mode, erlang:get_stacktrace()})
+ error: Reason: Stack ->
+ erlang:error({Reason, Mode, Stack})
end.
diff --git a/lib/diameter/src/diameter.app.src b/lib/diameter/src/diameter.app.src
index 9a6e47006b..18202f033e 100644
--- a/lib/diameter/src/diameter.app.src
+++ b/lib/diameter/src/diameter.app.src
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2017. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -28,14 +28,21 @@
]},
{registered, [%REGISTERED%]},
{applications, [
- {stdlib, "2.4"}, {kernel, "3.2"}%, {erts, "6.4"}
- %% {syntax-tools, "1.6,18"}
- %% {runtime-tools, "1.8.16"}
- %, {ssl, "6.0"}
+ stdlib,
+ kernel
+ %, ssl
+ %, syntax-tools
+ %, runtime-tools
]},
{env, []},
{mod, {diameter_app, []}},
{runtime_dependencies, [
+ "erts-10.0",
+ "stdlib-2.4",
+ "kernel-3.2",
+ "ssl-9.0"
+ %, "syntax-tools-1.6.18"
+ %, "runtime-tools-1.8.16"
]}
%%
%% Note that ssl is only required if configured on TCP transports,
diff --git a/lib/diameter/src/transport/diameter_tcp.erl b/lib/diameter/src/transport/diameter_tcp.erl
index a8639baa11..681d273d07 100644
--- a/lib/diameter/src/transport/diameter_tcp.erl
+++ b/lib/diameter/src/transport/diameter_tcp.erl
@@ -716,7 +716,7 @@ tls_handshake(_, false, S) ->
tls(connect, Sock, Opts) ->
ssl:connect(Sock, Opts);
tls(accept, Sock, Opts) ->
- ssl:ssl_accept(Sock, Opts).
+ ssl:handshake(Sock, Opts). %% assume no handshake option
%% recv/2
%%
@@ -839,7 +839,7 @@ start_fragment_timer(#transport{timeout = Tmo} = S) ->
accept(ssl, LSock) ->
case ssl:transport_accept(LSock) of
{ok, Sock} ->
- {ssl:ssl_accept(Sock), Sock};
+ ssl:handshake(Sock);
{error, _} = No ->
No
end;
diff --git a/lib/diameter/test/diameter_app_SUITE.erl b/lib/diameter/test/diameter_app_SUITE.erl
index 71256020f5..ef4a28d3f4 100644
--- a/lib/diameter/test/diameter_app_SUITE.erl
+++ b/lib/diameter/test/diameter_app_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2015. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -208,9 +208,9 @@ xref(Config) ->
CTmods = CTmods -- Mods,
%% Ensure that runtime modules only call other runtime modules, or
- %% applications declared as in runtime_dependencies in the app
- %% file. Note that the declared application versions are ignored
- %% since we only know what we can see now.
+ %% applications declared in runtime_dependencies in the app file.
+ %% The declared application versions are ignored since we only
+ %% know what we see now.
[] = lists:filter(fun(M) -> not lists:member(app(M), Deps) end,
RTdeps -- Mods).
@@ -261,8 +261,12 @@ app(Mod) ->
case code:which(Mod) of
preloaded ->
"erts";
+ Reason when is_atom(Reason) ->
+ error({Reason, Mod});
Path ->
- unversion(lists:nth(3, lists:reverse(filename:split(Path))))
+ %% match to identify an unexpectedly short path
+ {_, _, [_,_,_|_] = Split} = {Mod, Path, filename:split(Path)},
+ unversion(lists:nth(3, lists:reverse(Split)))
end.
add_application(XRef, App) ->
diff --git a/lib/diameter/test/diameter_traffic_SUITE.erl b/lib/diameter/test/diameter_traffic_SUITE.erl
index c224f9a27e..434aef01dd 100644
--- a/lib/diameter/test/diameter_traffic_SUITE.erl
+++ b/lib/diameter/test/diameter_traffic_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2017. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -1924,8 +1924,8 @@ compile_and_load() ->
{module, nas4005} = code:load_binary(nas4005, "nas4005", Bin),
true
catch
- E:R ->
- {E, R, erlang:get_stacktrace()}
+ E:R:Stack ->
+ {E, R, Stack}
end.
here() ->
diff --git a/lib/diameter/test/diameter_watchdog_SUITE.erl b/lib/diameter/test/diameter_watchdog_SUITE.erl
index 39c4f051a5..f3f168e671 100644
--- a/lib/diameter/test/diameter_watchdog_SUITE.erl
+++ b/lib/diameter/test/diameter_watchdog_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2010-2017. All Rights Reserved.
+%% Copyright Ericsson AB 2010-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -638,10 +638,9 @@ run1([F|A]) ->
apply(?MODULE, F, A),
ok
catch
- E:R ->
- S = erlang:get_stacktrace(),
- ?WARN("~p", [{A, E, R, S}]),
- S
+ E:R:Stack ->
+ ?WARN("~p", [{A, E, R, Stack}]),
+ Stack
end.
%% jitter/2
diff --git a/lib/hipe/doc/src/hipe_app.xml b/lib/hipe/doc/src/hipe_app.xml
index fc42ecd97d..4d40db086c 100644
--- a/lib/hipe/doc/src/hipe_app.xml
+++ b/lib/hipe/doc/src/hipe_app.xml
@@ -35,6 +35,14 @@
<app>HiPE</app>
<appsummary>The HiPE Application</appsummary>
<description>
+ <note>
+ <p>
+ HiPE and execution of HiPE compiled code only have limited support by
+ the OTP team at Ericsson. The OTP team only does limited maintenance
+ of HiPE and does not actively develop HiPE. HiPE is mainly supported
+ by the HiPE team at Uppsala University.
+ </p>
+ </note>
<p>
The normal way to native-compile an Erlang module using HiPE is to include the atom native
in the Erlang compiler options, as in:</p>
@@ -108,7 +116,7 @@
queue when the reference was created will be bypassed, as they
cannot possibly contain the reference. HiPE currently has an
optimization similar this, but it is not guaranteed to
- bypass all messages. In the worst case scenario it, cannot
+ bypass all messages. In the worst case scenario, it cannot
bypass any messages at all.
</p>
<p>
@@ -117,21 +125,55 @@
</p>
</item>
+ <tag>Garbage collection after BIFs</tag>
+ <item>
+ <p>
+ The condition for determining whether a garbage collection
+ is needed or not has changed in later releases. HiPE has not
+ been updated regarding this which may cause premature garbage
+ collections after BIF calls.
+ </p>
+ </item>
+
</taglist>
</section>
<section>
<title>Stability Issues</title>
<taglist>
- <tag>Not yielding in <c>receive</c> statements</tag>
+ <tag>Not checking reduction count on function returns</tag>
<item>
- <p>HiPE will not yield in <c>receive</c> statements where
- appropriate. If a process have lots of signals in its signal
- queue and execute a HiPE compiled <c>receive</c> statement,
- the scheduler thread performing the execution may be stuck
- in the <c>receive</c> statement for a very long time. This
- can in turn cause various severe issues such as for example
- prevent the runtime system from being able to release
- memory.
+ <p>
+ BEAM checks the reduction count and schedules out the executing
+ process if needed both when calling a function and when returning
+ from a function call that was not called using a tail call.
+ HiPE only checks the reduction count when calling a function.
+ </p>
+ <p>
+ The runtime system might need to schedule out a process
+ in order to reclaim memory. If the process isn't scheduled
+ out soon after the process has entered this state, memory
+ consumption will quickly grow. Maintaining this state is also
+ quite expensive performance wise.
+ </p>
+ <p>
+ Processes executing code that performs large recursions and
+ produce data after returning from recursive calls may have to
+ be scheduled out when returning from a function call. Since
+ HiPE does not check reductions on returns, processes executing
+ such HiPE compiled code may cause huge peeks in memory
+ consumption as well as severe performance degradation.
+ </p>
+ </item>
+
+ <tag>Not bumping appropriate amount of reductions in <c>receive</c> statements</tag>
+ <item>
+ <p>
+ The process signaling improvements made in ERTS version
+ 10.0 moved potentially significant amounts of work into the
+ receive statement from other places. In order to account for
+ this work, the reduction count should be bumped on the
+ executing process. Reductions are not bumped when entering
+ the <c>receive</c> statement from HiPE compiled code.
</p>
</item>
</taglist>
diff --git a/lib/hipe/rtl/hipe_rtl_lcm.erl b/lib/hipe/rtl/hipe_rtl_lcm.erl
index af39c9a0a4..2c8cc80e56 100644
--- a/lib/hipe/rtl/hipe_rtl_lcm.erl
+++ b/lib/hipe/rtl/hipe_rtl_lcm.erl
@@ -267,14 +267,17 @@ try_insert_expr_last(CFG0, Label, Instr) ->
%% with the new code inserted second to last (assuming the last expression
%% is a branch operation).
insert_expr_last_work(_Instr, [#call{}]) ->
- %% Call instructions clobber all expressions; we musn't insert the expression
- %% before it
+ %% Call instructions clobber all expressions; we must not insert the
+ %% expression before it
not_safe;
insert_expr_last_work(Instr, [Code1]) ->
%% We insert the code next to last.
[Instr, Code1];
insert_expr_last_work(Instr, [Code|Codes]) ->
- [Code|insert_expr_last_work(Instr, Codes)].
+ case insert_expr_last_work(Instr, Codes) of
+ not_safe -> not_safe;
+ NewCodes -> [Code|NewCodes]
+ end.
%%=============================================================================
%% Inserts expression first in the block for the given label.
diff --git a/lib/hipe/test/basic_SUITE_data/basic_issues_hipe.erl b/lib/hipe/test/basic_SUITE_data/basic_issues_hipe.erl
index e71045bfe2..fc87abb54e 100644
--- a/lib/hipe/test/basic_SUITE_data/basic_issues_hipe.erl
+++ b/lib/hipe/test/basic_SUITE_data/basic_issues_hipe.erl
@@ -8,8 +8,9 @@
-export([test/0]).
-%% functions that need to be exported so that they are retained.
--export([auth/4]).
+%% functions that need to be exported so that they are retained and/or
+%% not specialized away by the compiler.
+-export([auth/4, wxSizer_replace/2, parent_class/1]).
test() ->
ok = test_dominance_trees(),
@@ -18,6 +19,7 @@ test() ->
ok = test_bif_fails(),
ok = test_find_catches(),
ok = test_heap_allocate_trim(),
+ ok = wxSizer_replace(),
ok.
%%--------------------------------------------------------------------
@@ -151,3 +153,25 @@ get_next_retry(Error, Count) ->
end.
pair(A, B) -> {A, B}.
+
+%%--------------------------------------------------------------------
+%% Date: June 11, 2018
+%%
+%% Stripped down test case (from `wxSizer') that crashed the lazy code
+%% motion pass of the HiPE compiler in a pre-release of Erlang/OTP 21.
+%% A similar crash existed in `ssl_correction'.
+%%--------------------------------------------------------------------
+
+wxSizer_replace() ->
+ wxSizer_replace(?MODULE, ?MODULE).
+
+-define(CLASS(Type, Class), ((Type) =:= Class) orelse (Type):parent_class(Class)).
+
+wxSizer_replace(OldwinT, NewwinT) -> % this function was the culprit
+ ?CLASS(OldwinT, ?MODULE),
+ ?CLASS(NewwinT, ?MODULE),
+ ok.
+
+parent_class(wxWindow) -> true;
+parent_class(wxEvtHandler) -> true;
+parent_class(_Class) -> erlang:error({badtype, ?MODULE}).
diff --git a/lib/kernel/doc/src/application.xml b/lib/kernel/doc/src/application.xml
index 886286b76d..be914aee87 100644
--- a/lib/kernel/doc/src/application.xml
+++ b/lib/kernel/doc/src/application.xml
@@ -318,8 +318,13 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
<c>{error,{not_started,App}}</c> is returned, where <c>App</c>
is the name of the missing application.</p>
<p>The application controller then creates an <em>application master</em>
- for the application. The application master is
- the group leader of all the processes in the application.
+ for the application. The application master becomes the
+ group leader of all the processes in the application. I/O is
+ forwarded to the previous group leader, though, this is just
+ a way to identify processes that belong to the application.
+ Used for example to find itself from any process, or,
+ reciprocally, to kill them all when it terminates.</p>
+ <p>
The application master starts the application by calling
the application callback function <c>Module:start/2</c> as
defined by the application specification key <c>mod</c>.</p>
@@ -608,4 +613,3 @@ Nodes = [cp1@cave, {cp2@cave, cp3@cave}]</code>
<seealso marker="app">app(4)</seealso></p>
</section>
</erlref>
-
diff --git a/lib/kernel/doc/src/config.xml b/lib/kernel/doc/src/config.xml
index 8850c1736b..3f01170508 100644
--- a/lib/kernel/doc/src/config.xml
+++ b/lib/kernel/doc/src/config.xml
@@ -86,8 +86,13 @@
<tag><c>File = string()</c></tag>
<item>Name of another <c>.config</c> file.
Extension <c>.config</c> can be omitted. It is
- recommended to use absolute paths. A relative path is
- relative the current working directory of the emulator.</item>
+ recommended to use absolute paths. If a relative path is used,
+ <c>File</c> is searched, first, relative from <c>sys.config</c> directory, then relative
+ to the current working directory of the emulator, for backward compatibility.
+ This allow to use a <c>sys.config</c> pointing out other <c>.config</c> files in a release
+ or in a node started manually using <c>-config ...</c> with same result whatever
+ the current working directory.
+ </item>
</taglist>
<p>When traversing the contents of <c>sys.config</c> and a filename
is encountered, its contents are read and merged with the result
diff --git a/lib/kernel/doc/src/logger_chapter.xml b/lib/kernel/doc/src/logger_chapter.xml
index 6e1eab8a8e..208193ee42 100644
--- a/lib/kernel/doc/src/logger_chapter.xml
+++ b/lib/kernel/doc/src/logger_chapter.xml
@@ -104,6 +104,7 @@
defined.</p>
</section>
<section>
+ <marker id="logger_api"/>
<title>Logger API</title>
<p>The API for logging consists of a set
of <seealso marker="logger#macros">macros</seealso>, and a set
@@ -1090,184 +1091,210 @@ do_log(Fd, LogEvent, #{formatter := {FModule, FConfig}}) ->
<section>
<marker id="overload_protection"/>
<title>Protecting the Handler from Overload</title>
- <p>In order for the built-in handlers to survive, and stay responsive,
- during periods of high load (i.e. when huge numbers of incoming
- log requests must be handled), a mechanism for overload protection
- has been implemented in the
- <seealso marker="logger_std_h"><c>logger_std_h</c></seealso>
- and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c>
- </seealso> handler. The mechanism, used by both handlers, works
- as follows:</p>
+ <p>The default handlers, <seealso marker="logger_std_h">
+ <c>logger_std_h</c></seealso> and <seealso marker="logger_disk_log_h">
+ <c>logger_disk_log_h</c></seealso>, feature an overload protection
+ mechanism, which makes it possible for the handlers to survive,
+ and stay responsive, during periods of high load (when huge
+ numbers of incoming log requests must be handled).
+ The mechanism works as follows:</p>
<section>
<title>Message Queue Length</title>
<p>The handler process keeps track of the length of its message
- queue and reacts in different ways depending on the current status.
- The purpose is to keep the handler in, or (as quickly as possible),
- get the handler into, a state where it can keep up with the pace
- of incoming log requests. The memory usage of the handler must never
- keep growing larger and larger, since that would eventually cause the
- handler to crash. Three thresholds with associated actions have been
- defined:</p>
+ queue and takes some form of action when the current length exceeds a
+ configurable threshold. The purpose is to keep the handler in, or to
+ as quickly as possible get the handler into, a state where it can
+ keep up with the pace of incoming log events. The memory use of the
+ handler must never grow larger and larger, since that will eventually
+ cause the handler to crash. These three thresholds, with associated
+ actions, exist:</p>
<taglist>
- <tag><c>toggle_sync_qlen</c></tag>
+ <tag><c>sync_mode_qlen</c></tag>
<item>
- <p>The default value of this level is <c>10</c> messages,
- and as long as the length of the message queue is lower, all log
- requests are handled asynchronously. This simply means that the
- process sending the log request (by calling a log function in the
- Logger API) does not wait for a response from the handler but
- continues executing immediately after the request (i.e. it will not
- be affected by the time it takes the handler to print to the log
- device). If the message queue grows larger than this value, however,
- the handler starts handling the log requests synchronously instead,
- meaning the process sending the request will have to wait for a
- response. When the handler manages to reduce the message queue to a
- level below the <c>toggle_sync_qlen</c> threshold, asynchronous
+ <p>As long as the length of the message queue is lower than this
+ value, all log events are handled asynchronously. This means that
+ the client process sending the log event, by calling a log function
+ in the <seealso marker="logger_chapter#logger_api">Logger API</seealso>,
+ does not wait for a response from the handler but continues
+ executing immediately after the event is sent. It is not affected
+ by the time it takes the handler to print the event to the log
+ device. If the message queue grows larger than this value,
+ the handler starts handling log events synchronously instead,
+ meaning that the client process sending the event must wait for a
+ response. When the handler reduces the message queue to a
+ level below the <c>sync_mode_qlen</c> threshold, asynchronous
operation is resumed. The switch from asynchronous to synchronous
- mode will force the logging tempo of few busy senders to slow down,
- but cannot protect the handler sufficiently in situations of many
- concurrent senders.</p>
+ mode can slow down the logging tempo of one, or a few, busy senders,
+ but cannot protect the handler sufficiently in a situation of many
+ busy concurrent senders.</p>
+ <p>Defaults to <c>10</c> messages.</p>
</item>
- <tag><c>drop_new_reqs_qlen</c></tag>
+ <tag><c>drop_mode_qlen</c></tag>
<item>
- <p>When the message queue has grown larger than this threshold, which
- defaults to <c>200</c> messages, the handler switches to a mode in
- which it drops any new requests being made. Dropping a message in
- this state means that the log function never actually sends a message
- to the handler. The log call simply returns without an action. When
- the length of the message queue has been reduced to a level below this
- threshold, synchronous or asynchronous request handling mode is
- resumed.</p>
+ <p>When the message queue grows larger than this threshold, the
+ handler switches to a mode in which it drops all new events that
+ senders want to log. Dropping an event in this mode means that the
+ call to the log function never results in a message being sent to
+ the handler, but the function returns without taking any action.
+ The handler keeps logging the events that are already in its message
+ queue, and when the length of the message queue is reduced to a level
+ below the threshold, synchronous or asynchronous mode is resumed.
+ Notice that when the handler activates or deactivates drop mode,
+ information about it is printed in the log.</p>
+ <p>Defaults to <c>200</c> messages.</p>
</item>
- <tag><c>flush_reqs_qlen</c></tag>
+ <tag><c>flush_qlen</c></tag>
<item>
- <p>Above this threshold, which defaults to <c>1000</c> messages, a
- flush operation takes place, in which all messages buffered in the
- process mailbox get deleted without any logging actually taking
- place. (Processes waiting for a response from a synchronous log request
- will receive a reply indicating that the request has been dropped).</p>
+ <p>If the length of the message queue grows larger than this threshold,
+ a flush (delete) operation takes place. To flush events, the handler
+ discards the messages in the message queue by receiving them in a
+ loop without logging. Client processes waiting for a response from a
+ synchronous log request receive a reply from the handler indicating
+ that the request is dropped. The handler process increases its
+ priority during the flush loop to make sure that no new events
+ are received during the operation. Notice that after the flush operation
+ is performed, the handler prints information in the log about how many
+ events have been deleted.</p>
+ <p>Defaults to <c>1000</c> messages.</p>
</item>
</taglist>
<p>For the overload protection algorithm to work properly, it is
required that:</p>
- <p><c>toggle_sync_qlen =&lt; drop_new_reqs_qlen =&lt; flush_reqs_qlen</c></p>
+ <p><c>sync_mode_qlen =&lt; drop_mode_qlen =&lt; flush_qlen</c></p>
<p>and that:</p>
- <p><c>drop_new_reqs_qlen &gt; 1</c></p>
+ <p><c>drop_mode_qlen &gt; 1</c></p>
- <p>If <c>toggle_sync_qlen</c> is set to <c>0</c>, the handler will handle all
- requests synchronously. Setting the value of <c>toggle_sync_qlen</c> to the same
- as <c>drop_new_reqs_qlen</c>, disables the synchronous mode. Likewise, setting
- the value of <c>drop_new_reqs_qlen</c> to the same as <c>flush_reqs_qlen</c>,
- disables the drop mode.</p>
+ <p>To disable certain modes, do the following:</p>
+ <list>
+ <item>If <c>sync_mode_qlen</c> is set to <c>0</c>, all log events are handled
+ synchronously. That is, asynchronous logging is disabled.</item>
+ <item>If <c>sync_mode_qlen</c> is set to the same value as
+ <c>drop_mode_qlen</c>, synchronous mode is disabled. That is, the handler
+ always runs in asynchronous mode, unless dropping or flushing is invoked.</item>
+ <item>If <c>drop_mode_qlen</c> is set to the same value as <c>flush_qlen</c>,
+ drop mode is disabled and can never occur.</item>
+ </list>
<p>During high load scenarios, the length of the handler message queue
rarely grows in a linear and predictable way. Instead, whenever the
- handler process gets scheduled in, it can have an almost arbitrary number
- of messages waiting in the mailbox. It's for this reason that the overload
- protection mechanism is focused on acting quickly and quite drastically
- (such as immediately dropping or flushing messages) as soon as a large
- queue length is detected. </p>
-
- <p>The thresholds listed above may be modified by the user if, e.g, a handler
- shouldn't drop or flush messages unless the message queue length grows
- extremely large. (The handler must be allowed to use large amounts of memory
- under such circumstances however). Another example of when the user might want
- to change the settings is if, for performance reasons, the logging processes must
- never get blocked by synchronous log requests, while dropping or flushing requests
- is perfectly acceptable (since it doesn't affect the performance of the
- loggers).</p>
+ handler process is scheduled in, it can have an almost arbitrary number
+ of messages waiting in the message queue. It is for this reason that the overload
+ protection mechanism is focused on acting quickly, and quite drastically,
+ such as immediately dropping or flushing messages, when a large queue length
+ is detected.</p>
+
+ <p>The values of the previously listed thresholds can be specified by the user.
+ This way, a handler can be configured to, for example, not drop or flush
+ messages unless the message queue length of the handler process grows extremely
+ large. Notice that large amounts of memory can be required for the node under such
+ circumstances. Another example of user configuration is when, for performance
+ reasons, the client processes must never be blocked by synchronous log requests.
+ It is possible, perhaps, that dropping or flushing events is still acceptable, since
+ it does not affect the performance of the client processes sending the log events.</p>
<p>A configuration example:</p>
<code type="none">
logger:add_handler(my_standard_h, logger_std_h,
- #{config =>
- #{type => {file,"./system_info.log"},
- toggle_sync_qlen => 100,
- drop_new_reqs_qlen => 1000,
- flush_reqs_qlen => 2000}}).
+ #{config => #{type => {file,"./system_info.log"},
+ sync_mode_qlen => 100,
+ drop_mode_qlen => 1000,
+ flush_qlen => 2000}}).
</code>
</section>
<section>
<title>Controlling Bursts of Log Requests</title>
- <p>A potential problem with large bursts of log requests, is that log files
- may get full or wrapped too quickly (in the latter case overwriting
- previously logged data that could be of great importance). For this reason,
- both built-in handlers offer the possibility to set a maximum level of how
- many requests to process with a certain time frame. With this burst control
- feature enabled, the handler will take care of bursts of log requests
- without choking log files, or the terminal, with massive amounts of
- printouts. These are the configuration parameters:</p>
-
+ <p>Large bursts of log events - many events received by the handler
+ under a short period of time - can potentially cause problems, such as:</p>
+ <list>
+ <item>Log files grow very large, very quickly.</item>
+ <item>Circular logs wrap too quickly so that important data is overwritten.</item>
+ <item>Write buffers grow large, which slows down file sync operations.</item>
+ </list>
+
+ <p>For this reason, both built-in handlers offer the possibility to specify the
+ maximum number of events to be handled within a certain time frame.
+ With this burst control feature enabled, the handler can avoid choking the log with
+ massive amounts of printouts. The configuration parameters are:</p>
<taglist>
- <tag><c>enable_burst_limit</c></tag>
+ <tag><c>burst_limit_enable</c></tag>
<item>
- <p>This is set to <c>true</c> by default. The value <c>false</c>
- disables the burst control feature.</p>
+ <p>Value <c>true</c> enables burst control and <c>false</c> disables it.</p>
+ <p>Defaults to <c>true</c>.</p>
</item>
- <tag><c>burst_limit_size</c></tag>
+ <tag><c>burst_limit_max_count</c></tag>
<item>
- <p>This is how many requests should be processed within the
- <c>burst_window_time</c> time frame. After this maximum has been
- reached, successive requests will be dropped until the end of the
- time frame. The default value is <c>500</c> messages.</p>
+ <p>This is the maximum number of events to handle within a
+ <c>burst_limit_window_time</c> time frame. After the limit is
+ reached, successive events are dropped until the end of the time frame.</p>
+ <p>Defaults to <c>500</c> events.</p>
</item>
- <tag><c>burst_window_time</c></tag>
+ <tag><c>burst_limit_window_time</c></tag>
<item>
- <p>The default window is <c>1000</c> milliseconds long.</p>
+ <p>See the previous description of <c>burst_limit_max_count</c>.</p>
+ <p>Defaults to <c>1000</c> milliseconds.</p>
</item>
</taglist>
<p>A configuration example:</p>
<code type="none">
logger:add_handler(my_disk_log_h, logger_disk_log_h,
- #{disk_log_opts =>
- #{file => "./my_disk_log"},
- config =>
- #{burst_limit_size => 10,
- burst_window_time => 500}}).
+ #{config => #{file => "./my_disk_log",
+ burst_limit_enable => true,
+ burst_limit_max_count => 20,
+ burst_limit_window_time => 500}}).
</code>
</section>
<section>
- <title>Terminating a Large Handler</title>
- <p>A handler process may grow large even if it can manage peaks of high load
- without crashing. The overload protection mechanism includes user configurable
- levels for a maximum allowed message queue length and maximum allowed memory
- usage. This feature is disabled by default, but can be switched on by means
- of the following configuration parameters:</p>
-
+ <title>Terminating an Overloaded Handler</title>
+ <p>It is possible that a handler, even if it can successfully manage peaks
+ of high load without crashing, can build up a large message queue, or use a
+ large amount of memory. The overload protection mechanism includes an
+ automatic termination and restart feature for the purpose of guaranteeing
+ that a handler does not grow out of bounds. The feature is configured
+ with the following parameters:</p>
<taglist>
- <tag><c>enable_kill_overloaded</c></tag>
+ <tag><c>overload_kill_enable</c></tag>
<item>
- <p>This is set to <c>false</c> by default. The value <c>true</c>
- enables the feature.</p>
+ <p>Value <c>true</c> enables the feature and <c>false</c> disables it.</p>
+ <p>Defaults to <c>false</c>.</p>
</item>
- <tag><c>handler_overloaded_qlen</c></tag>
+ <tag><c>overload_kill_qlen</c></tag>
<item>
- <p>This is the maximum allowed queue length. If the mailbox grows larger
- than this, the handler process gets terminated.</p>
+ <p>This is the maximum allowed queue length. If the message queue grows
+ larger than this, the handler process is terminated.</p>
+ <p>Defaults to <c>20000</c> messages.</p>
</item>
- <tag><c>handler_overloaded_mem</c></tag>
+ <tag><c>overload_kill_mem_size</c></tag>
<item>
- <p>This is the maximum allowed memory usage of the handler process. If
- the handler grows any larger, the process gets terminated.</p>
+ <p>This is the maximum memory size that the handler process is allowed to use.
+ If the handler grows larger than this, the process is terminated.</p>
+ <p>Defaults to <c>3000000</c> bytes.</p>
</item>
- <tag><c>handler_restart_after</c></tag>
+ <tag><c>overload_kill_restart_after</c></tag>
<item>
- <p>If the handler gets terminated because of its queue length or
- memory usage, it can get automatically restarted again after a
- configurable delay time. The time is specified in milliseconds
- and <c>5000</c> is the default value. The value <c>never</c> can
- also be set, which prevents a restart.</p>
+ <p>If the handler is terminated, it restarts automatically after a
+ delay specified in milliseconds. The value <c>infinity</c> prevents
+ restarts.</p>
+ <p>Defaults to <c>5000</c> milliseconds.</p>
</item>
</taglist>
+ <p>If the handler process is terminated because of overload, it prints
+ information about it in the log. It also prints information about when a
+ restart has taken place, and the handler is back in action.</p>
+ <note>
+ <p>The sizes of the log events affect the memory needs of the handler.
+ For information about how to limit the size of log events, see the
+ <seealso marker="logger_formatter"><c>logger_formatter(3)</c></seealso>
+ manual page.</p>
+ </note>
</section>
</section>
diff --git a/lib/kernel/doc/src/logger_disk_log_h.xml b/lib/kernel/doc/src/logger_disk_log_h.xml
index 63c29cb010..98439983cf 100644
--- a/lib/kernel/doc/src/logger_disk_log_h.xml
+++ b/lib/kernel/doc/src/logger_disk_log_h.xml
@@ -33,106 +33,112 @@
<file>logger_disk_log_h.xml</file>
</header>
<module>logger_disk_log_h</module>
- <modulesummary>A disk_log based handler for the Logger.</modulesummary>
+ <modulesummary>A disk_log based handler for Logger</modulesummary>
<description>
<p>This is a handler for Logger that offers circular
(wrapped) logs by using <seealso marker="disk_log"><c>disk_log</c></seealso>.
- Multiple instances
- of this handler can be added to Logger, and each instance prints to
- its own disk_log file, created with the name and settings specified in
- the handler configuration.</p>
+ Multiple instances of this handler can be added to Logger, and each instance
+ prints to its own disk log file, created with the name and settings specified
+ in the handler configuration.</p>
<p>The default standard handler,
<seealso marker="logger_std_h"><c>logger_std_h</c></seealso>, can be
- replaced by a disk_log handler at start up of the Kernel application.
+ replaced by a disk_log handler at startup of the Kernel application.
See an example of this below.</p>
- <p>The handler has an overload protection mechanism that will keep the handler
- process and the Kernel application alive during a high load of log
- requests. How this feature works, and how to modify the configuration,
- is described in the
+ <p>The handler has an overload protection mechanism that keeps the handler
+ process and the Kernel application alive during high loads of log
+ events. How overload protection works, and how to configure it, is
+ described in the
<seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
</seealso>.</p>
<p>To add a new instance of the disk_log handler, use
<seealso marker="logger#add_handler-3"><c>logger:add_handler/3</c>
- </seealso>. The handler configuration argument is a map which may contain
+ </seealso>. The handler configuration argument is a map which can contain
general configuration parameters, as documented in the
<seealso marker="logger_chapter#handler_configuration"><c>User's Guide</c>
- </seealso>, as well as handler specific parameters.</p>
- <p>The settings for the disk_log log file should be specified with the
- key <c>disk_log_opts</c>. These settings are a subset of the disk_log
- data type
- <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>.</p>
- <p>Parameters in the <c>disk_log_opts</c> map:</p>
+ </seealso>, and handler specific parameters. The specific data
+ is stored in a sub map with the key <c>config</c>, and can contain the
+ following parameters:</p>
<taglist>
<tag><c>file</c></tag>
- <item>This is the full name of the disk_log log file.</item>
+ <item>
+ <p>This is the full name of the disk log file. The option
+ corresponds to the <c>name</c> property in the
+ <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
+ datatype.</p>
+ </item>
<tag><c>type</c></tag>
- <item>This is the disk_log type, <c>wrap</c> or <c>halt</c>. The
- default value is <c>wrap</c>.</item>
+ <item>
+ <p>This is the disk log type, <c>wrap</c> or <c>halt</c>. The option
+ corresponds to the <c>type</c> property in the
+ <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
+ datatype.</p>
+ <p>Defaults to <c>wrap</c>.</p>
+ </item>
<tag><c>max_no_files</c></tag>
- <item>This is the maximum number of files that disk_log will use
- for its circular logging. The default value is <c>10</c>. (The setting
- has no effect on a halt log).</item>
+ <item>
+ <p>This is the maximum number of files that disk_log uses
+ for its circular logging. The option
+ corresponds to the <c>MaxNoFiles</c> element in the <c>size</c> property in the
+ <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
+ datatype.</p>
+ <p>Defaults to <c>10</c>.</p>
+ <p>The setting has no effect on a halt log.</p>
+ </item>
<tag><c>max_no_bytes</c></tag>
- <item>This is the maximum number of bytes that will be written to
- a log file before disk_log proceeds with the next file in order (or
- generates an error in case of a full halt log). The default value for
- a wrap log is <c>1048576</c> bytes, and <c>infinity</c> for a halt
- log.</item>
- </taglist>
- <p>Specific configuration for the handler (represented as a sub map)
- is specified with the key <c>config</c>. It may contain the
- following parameter:</p>
- <taglist>
+ <item>
+ <p>This is the maximum number of bytes that is written to
+ a log file before disk_log proceeds with the next file in order, or
+ generates an error in case of a full halt log. The option
+ corresponds to the <c>MaxNoBytes</c> element in the <c>size</c> property in the
+ <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>
+ datatype.</p>
+ <p>Defaults to <c>1048576</c> bytes for a wrap log, and
+ <c>infinity</c> for a halt log.</p>
+ </item>
<tag><c>filesync_repeat_interval</c></tag>
<item>
- <p>This value (in milliseconds) specifies how often the handler will
- do a disk_log sync operation in order to make sure that buffered data
- gets written to disk. The handler will repeatedly attempt this
- operation, but only perform it if something has actually been logged
- since the last sync. The default value is <c>5000</c> milliseconds.
- If <c>no_repeat</c> is set as value, the repeated sync operation is
- disabled. The user can also call the
- <seealso marker="logger_disk_log_h#sync-1"><c>sync/1</c>
- </seealso> function to perform a disk_log sync.</p></item>
+ <p>This value, in milliseconds, specifies how often the handler does
+ a disk_log sync operation to write buffered data to disk. The handler attempts
+ the operation repeatedly, but only performs a new sync if something has
+ actually been logged.</p>
+ <p>Defaults to <c>5000</c> milliseconds.</p>
+ <p>If <c>no_repeat</c> is set as value, the repeated sync operation
+ is disabled. The user can also call the
+ <seealso marker="logger_disk_log_h#filesync-1"><c>filesync/1</c>
+ </seealso> function to perform a disk_log sync.</p>
+ </item>
</taglist>
- <p>There are a number of other configuration parameters available, that are
- to be used for customizing the overload protection behaviour. The same
- parameters are used both in the standard handler and the disk_log handler,
- and are documented in the
+ <p>Other configuration parameters exist, to be used for customizing
+ the overload protection behaviour. The same parameters are used both in the
+ standard handler and the disk_log handler, and are documented in the
<seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
</seealso>.</p>
- <p>Note that when changing the configuration of the handler in runtime, by
- calling
- <seealso marker="logger#set_handler_config-2"><c>logger:set_handler_config/2
- or logger:set_handler_config/3</c></seealso>, the <c>disk_log_opts</c>
- settings may not be modified.</p>
+ <p>Notice that when changing the configuration of the handler in runtime, the
+ disk_log options (<c>file</c>, <c>type</c>, <c>max_no_files</c>,
+ <c>max_no_bytes</c>) must not be modified.</p>
<p>Example of adding a disk_log handler:</p>
<code type="none">
logger:add_handler(my_disk_log_h, logger_disk_log_h,
- #{level => error,
- filter_default => log,
- disk_log_opts =>
- #{file => "./my_disk_log",
- type => wrap,
- max_no_files => 4,
- max_no_bytes => 10000},
- config =>
- #{filesync_repeat_interval => 1000}}).
+ #{config => #{file => "./my_disk_log",
+ type => wrap,
+ max_no_files => 4,
+ max_no_bytes => 10000},
+ filesync_repeat_interval => 1000}}).
</code>
- <p>In order to use the disk_log handler instead of the default standard
+ <p>To use the disk_log handler instead of the default standard
handler when starting an Erlang node, change the Kernel default logger to
- use disk_log. Example:</p>
+ use <c>logger_disk_log_h</c>. Example:</p>
<code type="none">
erl -kernel logger '[{handler,default,logger_disk_log_h,
- #{disk_log_opts => #{file => "./system_disk_log"}}}]'
+ #{config => #{file => "./system_disk_log"}}}]'
</code>
</description>
<funcs>
<func>
- <name name="sync" arity="1" clause_i="1"/>
+ <name name="filesync" arity="1" clause_i="1"/>
<fsummary>Writes buffered data to disk.</fsummary>
<desc>
<p>Write buffered data to disk.</p>
diff --git a/lib/kernel/doc/src/logger_std_h.xml b/lib/kernel/doc/src/logger_std_h.xml
index 89e11389c5..95b4baf160 100644
--- a/lib/kernel/doc/src/logger_std_h.xml
+++ b/lib/kernel/doc/src/logger_std_h.xml
@@ -33,92 +33,84 @@
<file>logger_std_h.xml</file>
</header>
<module>logger_std_h</module>
- <modulesummary>Default handler for Logger.</modulesummary>
+ <modulesummary>Standard handler for Logger.</modulesummary>
<description>
- <p>This is the default handler for Logger.
+ <p>This is the standard handler for Logger.
Multiple instances of this handler can be added to
- Logger, and each instance will print logs to <c>standard_io</c>,
- <c>standard_error</c> or to file. The default instance that starts
- with Kernel is named <c>default</c> - which is the name to be used
- for reconfiguration.</p>
- <p>The handler has an overload protection mechanism that will keep the handler
- process and the Kernel application alive during a high load of log
- requests. How this feature works, and how to modify the configuration,
- is described in the
+ Logger, and each instance prints logs to <c>standard_io</c>,
+ <c>standard_error</c>, or to file.</p>
+ <p>The handler has an overload protection mechanism that keeps the handler
+ process and the Kernel application alive during high loads of log
+ events. How overload protection works, and how to configure it, is
+ described in the
<seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
</seealso>.</p>
<p>To add a new instance of the standard handler, use
<seealso marker="logger#add_handler-3"><c>logger:add_handler/3</c>
- </seealso>. The handler configuration argument is a map which may contain
- general configuration parameters, as documented in the
+ </seealso>. The handler configuration argument is a map which can contain
+ general configuration parameters, as documented in the
<seealso marker="logger_chapter#handler_configuration"><c>User's Guide</c>
- </seealso>, as well as handler specific parameters. The specific parameters
- are stored in a sub map with the key <c>config</c>. The following
- keys and values may be specified:</p>
+ </seealso>, and handler specific parameters. The specific data
+ is stored in a sub map with the key <c>config</c>, and can contain the
+ following parameters:</p>
<taglist>
<tag><marker id="type"/><c>type</c></tag>
<item>
- <p>This will have the value <c>standard_io</c>, <c>standard_error</c>,
- <c>{file,LogFileName}</c>, or <c>{file,LogFileName,LogFileOpts}</c>,
- where <c>standard_io</c> is the default value for type. It's recommended
- to not specify <c>LogFileOpts</c> if not absolutely necessary. The
- default options used by the handler to open a file for logging are:
- <c>raw</c>, <c>append</c> and <c>delayed_write</c>. The standard
- handler does not have support for circular logging. Use the
- <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c>
- </seealso> handler for this.</p></item>
+ <p>This has the value <c>standard_io</c>, <c>standard_error</c>,
+ <c>{file,LogFileName}</c>, or <c>{file,LogFileName,LogFileOpts}</c>.</p>
+ <p> Defaults to <c>standard_io</c>.</p>
+ <p>It is recommended not to specify <c>LogFileOpts</c> unless absolutely
+ necessary. The default options used by the handler to open a file for logging are
+ <c>raw</c>, <c>append</c>, and <c>delayed_write</c>. Notice that the standard
+ handler does not have support for circular logging. Use the disk_log handler,
+ <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c></seealso>,
+ for this.</p>
+ </item>
<tag><c>filesync_repeat_interval</c></tag>
<item>
- <p>This value (in milliseconds) specifies how often the handler will
- do a file sync operation in order to make sure that buffered data gets
- written to disk. The handler will repeatedly attempt this
- operation, but only perform it if something has actually been logged
- since the last sync. The default value is <c>5000</c> milliseconds.
- If <c>no_repeat</c> is set as value, the repeated file sync operation
- is disabled, and it will be the operating system settings that determine
- how quickly or slowly data gets written to disk. The user can also call
- the <seealso marker="logger_std_h#sync-1"><c>sync/1</c></seealso>
- function to perform a file sync.</p></item>
+ <p>This value, in milliseconds, specifies how often the handler does
+ a file sync operation to write buffered data to disk. The handler attempts
+ the operation repeatedly, but only performs a new sync if something has
+ actually been logged.</p>
+ <p>Defaults to <c>5000</c> milliseconds.</p>
+ <p>If <c>no_repeat</c> is set as value, the repeated file sync operation
+ is disabled, and it is the operating system settings that determine
+ how quickly or slowly data is written to disk. The user can also call
+ the <seealso marker="logger_std_h#filesync-1"><c>filesync/1</c></seealso>
+ function to perform a file sync.</p>
+ </item>
</taglist>
- <p>There are a number of other configuration parameters available, that are
- to be used for customizing the overload protection behaviour. The same
- parameters are used both in the standard handler and the disk_log handler,
- and are documented in the
+ <p>Other configuration parameters exist, to be used for customizing
+ the overload protection behaviour. The same parameters are used both in the
+ standard handler and the disk_log handler, and are documented in the
<seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
</seealso>.</p>
- <p>Note that when changing the configuration of the handler in runtime, by
- calling
- <seealso marker="logger#set_handler_config-2"><c>logger:set_handler_config/2</c>
- </seealso>, or
- <seealso marker="logger#set_handler_config-3"><c>logger:set_handler_config/3</c>
- </seealso>,
- the <c>type</c> parameter may not be modified.</p>
+ <p>Notice that if changing the configuration of the handler in runtime,
+ the <c>type</c> parameter must not be modified.</p>
<p>Example of adding a standard handler:</p>
<code type="none">
logger:add_handler(my_standard_h, logger_std_h,
- #{level => info,
- filter_default => log,
- config =>
- #{type => {file,"./system_info.log"},
- filesync_repeat_interval => 1000}}).
+ #{config => #{type => {file,"./system_info.log"},
+ filesync_repeat_interval => 1000}}).
</code>
- <p>In order to configure the default handler (that starts initially with
- the Kernel application) to log to file instead of <c>standard_io</c>,
- change the Kernel default logger to use a file. Example:</p>
+ <p>To set the default handler, that starts initially with
+ the Kernel application, to log to file instead of <c>standard_io</c>,
+ change the Kernel default logger configuration. Example:</p>
<code type="none">
erl -kernel logger '[{handler,default,logger_std_h,
#{config => #{type => {file,"./log.log"}}}}]'
</code>
<p>An example of how to replace the standard handler with a disk_log handler
- at start up can be found in the manual of
- <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c></seealso>.</p>
+ at startup is found in the
+ <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c></seealso>
+ manual.</p>
</description>
<funcs>
<func>
- <name name="sync" arity="1" clause_i="1"/>
+ <name name="filesync" arity="1" clause_i="1"/>
<fsummary>Writes buffered data to disk.</fsummary>
<desc>
<p>Write buffered data to disk.</p>
diff --git a/lib/kernel/src/application_controller.erl b/lib/kernel/src/application_controller.erl
index 0dad6ae935..a074d2e74b 100644
--- a/lib/kernel/src/application_controller.erl
+++ b/lib/kernel/src/application_controller.erl
@@ -1814,8 +1814,9 @@ check_conf() ->
%% Therefore read and merge contents.
if
BFName =:= "sys" ->
+ DName = filename:dirname(FName),
{ok, SysEnv, Errors} =
- check_conf_sys(NewEnv),
+ check_conf_sys(NewEnv, [], [], DName),
%% Report first error, if any, and
%% terminate
@@ -1837,20 +1838,31 @@ check_conf() ->
end.
check_conf_sys(Env) ->
- check_conf_sys(Env, [], []).
+ check_conf_sys(Env, [], [], []).
-check_conf_sys([File|T], SysEnv, Errors) when is_list(File) ->
+check_conf_sys([File|T], SysEnv, Errors, DName) when is_list(File),is_list(DName) ->
BFName = filename:basename(File, ".config"),
FName = filename:join(filename:dirname(File), BFName ++ ".config"),
- case load_file(FName) of
+ LName = case filename:pathtype(FName) of
+ relative when (DName =/= []) ->
+ % Check if relative to sys.config dir otherwise use legacy mode,
+ % i.e relative to cwd.
+ RName = filename:join(DName, FName),
+ case erl_prim_loader:read_file_info(RName) of
+ {ok, _} -> RName ;
+ error -> FName
+ end;
+ _ -> FName
+ end,
+ case load_file(LName) of
{ok, NewEnv} ->
- check_conf_sys(T, merge_env(SysEnv, NewEnv), Errors);
+ check_conf_sys(T, merge_env(SysEnv, NewEnv), Errors, DName);
{error, {Line, _Mod, Str}} ->
- check_conf_sys(T, SysEnv, [{error, {FName, Line, Str}}|Errors])
+ check_conf_sys(T, SysEnv, [{error, {LName, Line, Str}}|Errors], DName)
end;
-check_conf_sys([Tuple|T], SysEnv, Errors) ->
- check_conf_sys(T, merge_env(SysEnv, [Tuple]), Errors);
-check_conf_sys([], SysEnv, Errors) ->
+check_conf_sys([Tuple|T], SysEnv, Errors, DName) ->
+ check_conf_sys(T, merge_env(SysEnv, [Tuple]), Errors, DName);
+check_conf_sys([], SysEnv, Errors, _) ->
{ok, SysEnv, lists:reverse(Errors)}.
load_file(File) ->
diff --git a/lib/kernel/src/application_master.erl b/lib/kernel/src/application_master.erl
index 5da2b0b06c..06991b45e1 100644
--- a/lib/kernel/src/application_master.erl
+++ b/lib/kernel/src/application_master.erl
@@ -118,6 +118,10 @@ init(Parent, Starter, ApplData, Type) ->
link(Parent),
process_flag(trap_exit, true),
OldGleader = group_leader(),
+ %% We become the group leader, but forward all I/O to OldGleader.
+ %% This is just a way to identify processes that belong to the
+ %% application. Used for example to find ourselves from any
+ %% process, or, reciprocally, to kill them all when we terminate.
group_leader(self(), self()),
%% Insert ourselves as master for the process. This ensures that
%% the processes in the application can use get_env/1 at startup.
diff --git a/lib/kernel/src/logger_disk_log_h.erl b/lib/kernel/src/logger_disk_log_h.erl
index 394c7b7320..cf8ea658e3 100644
--- a/lib/kernel/src/logger_disk_log_h.erl
+++ b/lib/kernel/src/logger_disk_log_h.erl
@@ -26,7 +26,7 @@
-include("logger_h_common.hrl").
%%% API
--export([start_link/3, info/1, sync/1, reset/1]).
+-export([start_link/3, info/1, filesync/1, reset/1]).
%% gen_server callbacks
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
@@ -58,19 +58,19 @@ start_link(Name, Config, HandlerState) ->
%%%-----------------------------------------------------------------
%%%
--spec sync(Name) -> ok | {error,Reason} when
+-spec filesync(Name) -> ok | {error,Reason} when
Name :: atom(),
Reason :: handler_busy | {badarg,term()}.
-sync(Name) when is_atom(Name) ->
+filesync(Name) when is_atom(Name) ->
try
gen_server:call(?name_to_reg_name(?MODULE,Name),
disk_log_sync, ?DEFAULT_CALL_TIMEOUT)
catch
_:{timeout,_} -> {error,handler_busy}
end;
-sync(Name) ->
- {error,{badarg,{sync,[Name]}}}.
+filesync(Name) ->
+ {error,{badarg,{filesync,[Name]}}}.
%%%-----------------------------------------------------------------
%%%
@@ -708,7 +708,7 @@ disk_log_sync(Name, State) ->
ok;
_ ->
LogOpts = maps:get(log_opts, State),
- logger_h_common:error_notify({Name,sync,
+ logger_h_common:error_notify({Name,filesync,
LogOpts,
SyncError})
end,
diff --git a/lib/kernel/src/logger_h_common.erl b/lib/kernel/src/logger_h_common.erl
index d556938f02..f9f762405c 100644
--- a/lib/kernel/src/logger_h_common.erl
+++ b/lib/kernel/src/logger_h_common.erl
@@ -115,7 +115,7 @@ check_common_config({overload_kill_qlen,N}) when is_integer(N) ->
check_common_config({overload_kill_mem_size,N}) when is_integer(N) ->
valid;
check_common_config({overload_kill_restart_after,NorA}) when is_integer(NorA);
- NorA == never ->
+ NorA == infinity ->
valid;
check_common_config({filesync_repeat_interval,NorA}) when is_integer(NorA);
@@ -261,7 +261,7 @@ flush_log_events(Limit, Limit) ->
Limit;
flush_log_events(N, Limit) ->
%% flush log events but leave other events, such as
- %% file/disk_log_sync, info and change_config, so that these
+ %% filesync, info and change_config, so that these
%% have a chance to be processed even under heavy load
receive
{'$gen_cast',{log,_}} ->
diff --git a/lib/kernel/src/logger_h_common.hrl b/lib/kernel/src/logger_h_common.hrl
index ad80b51109..e0a7b6e3ca 100644
--- a/lib/kernel/src/logger_h_common.hrl
+++ b/lib/kernel/src/logger_h_common.hrl
@@ -41,10 +41,10 @@
-define(OVERLOAD_KILL_MEM_SIZE, 3000000).
%% This is the default time that the handler will wait before
-%% restarting and accepting new requests. The value 'never'
+%% restarting and accepting new requests. The value 'infinity'
%% disables restarts.
-define(OVERLOAD_KILL_RESTART_AFTER, 5000).
-%%-define(OVERLOAD_KILL_RESTART_AFTER, never).
+%%-define(OVERLOAD_KILL_RESTART_AFTER, infinity).
%% The handler sends asynchronous write requests to the process
%% controlling the i/o device, but every once in this interval
diff --git a/lib/kernel/src/logger_std_h.erl b/lib/kernel/src/logger_std_h.erl
index 801d05853a..2a37076dda 100644
--- a/lib/kernel/src/logger_std_h.erl
+++ b/lib/kernel/src/logger_std_h.erl
@@ -28,7 +28,7 @@
-include_lib("kernel/include/file.hrl").
%% API
--export([start_link/3, info/1, sync/1, reset/1]).
+-export([start_link/3, info/1, filesync/1, reset/1]).
%% gen_server and proc_lib callbacks
-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
@@ -60,19 +60,19 @@ start_link(Name, Config, HandlerState) ->
%%%-----------------------------------------------------------------
%%%
--spec sync(Name) -> ok | {error,Reason} when
+-spec filesync(Name) -> ok | {error,Reason} when
Name :: atom(),
Reason :: handler_busy | {badarg,term()}.
-sync(Name) when is_atom(Name) ->
+filesync(Name) when is_atom(Name) ->
try
gen_server:call(?name_to_reg_name(?MODULE,Name),
filesync, ?DEFAULT_CALL_TIMEOUT)
catch
_:{timeout,_} -> {error,handler_busy}
end;
-sync(Name) ->
- {error,{badarg,{sync,[Name]}}}.
+filesync(Name) ->
+ {error,{badarg,{filesync,[Name]}}}.
%%%-----------------------------------------------------------------
%%%
@@ -820,7 +820,7 @@ sync_dev(Fd, DevName, PrevSyncResult, HandlerName) ->
%% don't report same error twice
PrevSyncResult;
Error ->
- logger_h_common:error_notify({HandlerName,sync,DevName,Error}),
+ logger_h_common:error_notify({HandlerName,filesync,DevName,Error}),
Error
end.
diff --git a/lib/kernel/test/application_SUITE.erl b/lib/kernel/test/application_SUITE.erl
index 988f26280f..3d07d6d70d 100644
--- a/lib/kernel/test/application_SUITE.erl
+++ b/lib/kernel/test/application_SUITE.erl
@@ -37,7 +37,8 @@
-export([config_change/1, persistent_env/1,
distr_changed_tc1/1, distr_changed_tc2/1,
ensure_started/1, ensure_all_started/1,
- shutdown_func/1, do_shutdown/1, shutdown_timeout/1, shutdown_deadlock/1]).
+ shutdown_func/1, do_shutdown/1, shutdown_timeout/1, shutdown_deadlock/1,
+ config_relative_paths/1]).
-define(TESTCASE, testcase_name).
-define(testcase, proplists:get_value(?TESTCASE, Config)).
@@ -55,7 +56,7 @@ all() ->
script_start, nodedown_start, permit_false_start_local,
permit_false_start_dist, get_key, get_env, ensure_all_started,
{group, distr_changed}, config_change, shutdown_func, shutdown_timeout,
- shutdown_deadlock,
+ shutdown_deadlock, config_relative_paths,
persistent_env].
groups() ->
@@ -2075,6 +2076,42 @@ shutdown_deadlock(Config) when is_list(Config) ->
%%-----------------------------------------------------------------
+%% Relative paths in sys.config
+%%-----------------------------------------------------------------
+config_relative_paths(Config) ->
+ Dir = ?config(priv_dir,Config),
+ SubDir = filename:join(Dir,"subdir"),
+ Sys = filename:join(SubDir,"sys.config"),
+ ok = filelib:ensure_dir(Sys),
+ ok = file:write_file(Sys,"[\"../up.config\",\"current\"].\n"),
+
+ Up = filename:join(Dir,"up.config"),
+ ok = file:write_file(Up,"[{app1,[{key1,value}]}].\n"),
+
+ {ok,Cwd} = file:get_cwd(),
+ Current1 = filename:join(Cwd,"current.config"),
+ ok = file:write_file(Current1,"[{app1,[{key2,value1}]}].\n"),
+
+ N1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ {ok,Node1} = start_node(N1,filename:rootname(Sys)),
+ ok = rpc:call(Node1, application, load, [app1()]),
+ {ok, value} = rpc:call(Node1, application, get_env,[app1,key1]),
+ {ok, value1} = rpc:call(Node1, application, get_env,[app1,key2]),
+
+ Current2 = filename:join(SubDir,"current.config"),
+ ok = file:write_file(Current2,"[{app1,[{key2,value2}]}].\n"),
+
+ N2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_2"])),
+ {ok, Node2} = start_node(N2,filename:rootname(Sys)),
+ ok = rpc:call(Node2, application, load, [app1()]),
+ {ok, value} = rpc:call(Node2, application, get_env,[app1,key1]),
+ {ok, value2} = rpc:call(Node2, application, get_env,[app1,key2]),
+
+ stop_node_nice([Node1,Node2]),
+
+ ok.
+
+%%-----------------------------------------------------------------
%% Utility functions
%%-----------------------------------------------------------------
app0() ->
diff --git a/lib/kernel/test/logger_disk_log_h_SUITE.erl b/lib/kernel/test/logger_disk_log_h_SUITE.erl
index bb2b5eed57..7e5b574869 100644
--- a/lib/kernel/test/logger_disk_log_h_SUITE.erl
+++ b/lib/kernel/test/logger_disk_log_h_SUITE.erl
@@ -45,10 +45,6 @@
-define(log_no(File,N), lists:concat([File,".",N])).
-define(domain,#{domain=>[?MODULE]}).
--define(SYNC_REP_INT, if is_atom(?FILESYNC_REPEAT_INTERVAL) -> 5500;
- true -> ?FILESYNC_REPEAT_INTERVAL + 500
- end).
-
suite() ->
[{timetrap,{seconds,30}},
{ct_hooks,[logger_test_lib]}].
@@ -69,9 +65,10 @@ end_per_group(_Group, _Config) ->
init_per_testcase(TestHooksCase, Config) when
TestHooksCase == write_failure;
TestHooksCase == sync_failure ->
- if ?TEST_HOOKS_TAB == undefined ->
+ case (fun() -> ?TEST_HOOKS_TAB == undefined end)() of
+ true ->
{skip,"Define the TEST_HOOKS macro to run this test"};
- true ->
+ false ->
ct:print("********** ~w **********", [TestHooksCase]),
Config
end;
@@ -143,7 +140,7 @@ create_log(Config) ->
formatter=>{?MODULE,self()}},
#{file=>LogFile1}),
logger:notice("hello", ?domain),
- logger_disk_log_h:sync(Name1),
+ logger_disk_log_h:filesync(Name1),
ct:pal("Checking contents of ~p", [?log_no(LogFile1,1)]),
try_read_file(?log_no(LogFile1,1), {ok,<<"hello\n">>}, 5000),
@@ -156,7 +153,7 @@ create_log(Config) ->
formatter=>{?MODULE,self()}},
#{file=>LogFile2}),
logger:notice("dummy", ?domain),
- logger_disk_log_h:sync(Name2),
+ logger_disk_log_h:filesync(Name2),
ct:pal("Checking contents of ~p", [?log_no(LogFile2,1)]),
try_read_file(?log_no(LogFile2,1), {ok,<<"dummy\n">>}, 5000),
@@ -177,7 +174,7 @@ open_existing_log(Config) ->
formatter=>{?MODULE,self()}},
#{file=>LogFile1}),
logger:notice("one", ?domain),
- logger_disk_log_h:sync(HName),
+ logger_disk_log_h:filesync(HName),
ct:pal("Checking contents of ~p", [?log_no(LogFile1,1)]),
try_read_file(?log_no(LogFile1,1), {ok,<<"one\n">>}, 5000),
logger:notice("two", ?domain),
@@ -191,7 +188,7 @@ open_existing_log(Config) ->
formatter=>{?MODULE,self()}},
#{file=>LogFile1}),
logger:notice("three", ?domain),
- logger_disk_log_h:sync(HName),
+ logger_disk_log_h:filesync(HName),
try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\nthree\n">>}, 5000),
remove_and_stop(HName),
try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\nthree\n">>}, 5000).
@@ -216,22 +213,22 @@ disk_log_opts(Config) ->
{WFileFull,wrap,{Size,2},1} = {Get(file,WInfo1),Get(type,WInfo1),
Get(size,WInfo1),Get(current_file,WInfo1)},
logger:notice("123", ?domain),
- logger_disk_log_h:sync(WName),
+ logger_disk_log_h:filesync(WName),
timer:sleep(500),
1 = Get(current_file, disk_log:info(WName)),
logger:notice("45", ?domain),
- logger_disk_log_h:sync(WName),
+ logger_disk_log_h:filesync(WName),
timer:sleep(500),
1 = Get(current_file, disk_log:info(WName)),
logger:notice("6", ?domain),
- logger_disk_log_h:sync(WName),
+ logger_disk_log_h:filesync(WName),
timer:sleep(500),
2 = Get(current_file, disk_log:info(WName)),
logger:notice("7890", ?domain),
- logger_disk_log_h:sync(WName),
+ logger_disk_log_h:filesync(WName),
timer:sleep(500),
2 = Get(current_file, disk_log:info(WName)),
@@ -249,7 +246,7 @@ disk_log_opts(Config) ->
{HFile1Full,halt,infinity} = {Get(file,HInfo1),Get(type,HInfo1),
Get(size,HInfo1)},
logger:notice("12345", ?domain),
- logger_disk_log_h:sync(HName1),
+ logger_disk_log_h:filesync(HName1),
timer:sleep(500),
1 = Get(no_written_items, disk_log:info(HName1)),
@@ -426,8 +423,8 @@ config_fail(cleanup,_Config) ->
logger:remove_handler(?MODULE).
bad_input(_Config) ->
- {error,{badarg,{sync,["BadType"]}}} =
- logger_disk_log_h:sync("BadType"),
+ {error,{badarg,{filesync,["BadType"]}}} =
+ logger_disk_log_h:filesync("BadType"),
{error,{badarg,{info,["BadType"]}}} = logger_disk_log_h:info("BadType"),
{error,{badarg,{reset,["BadType"]}}} = logger_disk_log_h:reset("BadType").
@@ -475,7 +472,7 @@ reconfig(Config) ->
overload_kill_enable => true,
overload_kill_qlen => 100000,
overload_kill_mem_size => 10000000,
- overload_kill_restart_after => never,
+ overload_kill_restart_after => infinity,
filesync_repeat_interval => no_repeat},
ok = logger:set_handler_config(?MODULE, config, HConfig1),
#{id := ?MODULE,
@@ -488,7 +485,7 @@ reconfig(Config) ->
overload_kill_enable := true,
overload_kill_qlen := 100000,
overload_kill_mem_size := 10000000,
- overload_kill_restart_after := never,
+ overload_kill_restart_after := infinity,
filesync_repeat_interval := no_repeat} =
logger_disk_log_h:info(?MODULE),
@@ -545,7 +542,7 @@ sync(Config) ->
logger:notice("second", ?domain),
logger:notice("third", ?domain),
%% do explicit sync
- logger_disk_log_h:sync(?MODULE),
+ logger_disk_log_h:filesync(?MODULE),
check_tracer(100),
%% check that if there's no repeated disk_log_sync active,
@@ -758,9 +755,15 @@ write_failure(Config) ->
ct:pal("LogOpts = ~p", [LogOpts = maps:get(log_opts, HState)]),
ok = log_on_remote_node(Node, "Logged1"),
- rpc:call(Node, logger_disk_log_h, sync, [?STANDARD_HANDLER]),
+ rpc:call(Node, logger_disk_log_h, filesync, [?STANDARD_HANDLER]),
?check_no_log,
- try_read_file(Log, {ok,<<"Logged1\n">>}, ?SYNC_REP_INT),
+
+ SyncRepInt = case (fun() -> is_atom(?FILESYNC_REPEAT_INTERVAL) end)() of
+ true -> 5500;
+ false -> ?FILESYNC_REPEAT_INTERVAL + 500
+ end,
+
+ try_read_file(Log, {ok,<<"Logged1\n">>}, SyncRepInt),
rpc:call(Node, ?MODULE, set_result, [disk_log_blog,{error,no_such_log}]),
ok = log_on_remote_node(Node, "Cause simple error printout"),
@@ -778,9 +781,9 @@ write_failure(Config) ->
rpc:call(Node, ?MODULE, set_result, [disk_log_blog,ok]),
ok = log_on_remote_node(Node, "Logged2"),
- rpc:call(Node, logger_disk_log_h, sync, [?STANDARD_HANDLER]),
+ rpc:call(Node, logger_disk_log_h, filesync, [?STANDARD_HANDLER]),
?check_no_log,
- try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, ?SYNC_REP_INT),
+ try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, SyncRepInt),
ok.
write_failure(cleanup, _Config) ->
Nodes = nodes(),
@@ -814,7 +817,7 @@ sync_failure(Config) ->
rpc:call(Node, ?MODULE, set_result, [disk_log_sync,{error,no_such_log}]),
ok = log_on_remote_node(Node, "Cause simple error printout"),
- ?check({error,{?STANDARD_HANDLER,sync,LogOpts,{error,no_such_log}}}),
+ ?check({error,{?STANDARD_HANDLER,filesync,LogOpts,{error,no_such_log}}}),
ok = log_on_remote_node(Node, "No second error printout"),
?check_no_log,
@@ -822,7 +825,7 @@ sync_failure(Config) ->
rpc:call(Node, ?MODULE, set_result,
[disk_log_sync,{error,{blocked_log,?STANDARD_HANDLER}}]),
ok = log_on_remote_node(Node, "Cause simple error printout"),
- ?check({error,{?STANDARD_HANDLER,sync,LogOpts,
+ ?check({error,{?STANDARD_HANDLER,filesync,LogOpts,
{error,{blocked_log,?STANDARD_HANDLER}}}}),
rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]),
@@ -1143,7 +1146,7 @@ restart_after(Config) ->
NewHConfig1 =
HConfig#{config=>DLHConfig#{overload_kill_enable=>true,
overload_kill_qlen=>10,
- overload_kill_restart_after=>never}},
+ overload_kill_restart_after=>infinity}},
ok = logger:set_handler_config(?MODULE, NewHConfig1),
MRef1 = erlang:monitor(process, whereis(h_proc_name())),
%% kill handler
@@ -1200,7 +1203,7 @@ handler_requests_under_load(Config) ->
flush_qlen => 2000,
burst_limit_enable => false}},
ok = logger:set_handler_config(?MODULE, NewHConfig),
- Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{sync,[]},
+ Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]},
{info,[]},
{reset,[]},
{change_config,[]}])
@@ -1435,10 +1438,10 @@ wait_until_written(File, Sz) ->
{ok,#file_info{size = Sz}} ->
timer:sleep(1000),
case file:read_file_info(File) of
- {ok,#file_info{size = Sz1}} ->
+ {ok,#file_info{size = Sz}} ->
ok;
- {ok,#file_info{size = Sz2}} ->
- wait_until_written(File, Sz2)
+ {ok,#file_info{size = Sz1}} ->
+ wait_until_written(File, Sz1)
end;
{ok,#file_info{size = Sz1}} ->
wait_until_written(File, Sz1)
diff --git a/lib/kernel/test/logger_env_var_SUITE.erl b/lib/kernel/test/logger_env_var_SUITE.erl
index 3a8560bbf5..04a4364947 100644
--- a/lib/kernel/test/logger_env_var_SUITE.erl
+++ b/lib/kernel/test/logger_env_var_SUITE.erl
@@ -531,7 +531,7 @@ logger_many_handlers(Config, Env, LogErr, LogInfo, NumProgress) ->
file,% dest
0,% progress in std logger
error), % level
- ok = rpc:call(Node,logger_std_h,sync,[info]),
+ ok = rpc:call(Node,logger_std_h,filesync,[info]),
{ok, Bin} = file:read_file(LogInfo),
ct:log("Log content:~n~s",[Bin]),
match(Bin,<<"info:">>,NumProgress,info,info),
diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl
index 76f8a35406..ca54458ac1 100644
--- a/lib/kernel/test/logger_std_h_SUITE.erl
+++ b/lib/kernel/test/logger_std_h_SUITE.erl
@@ -45,10 +45,6 @@
-define(bin(Msg), list_to_binary(Msg++"\n")).
-define(domain,#{domain=>[?MODULE]}).
--define(FILESYNC_REP_INT, if is_atom(?FILESYNC_REPEAT_INTERVAL) -> 5500;
- true -> ?FILESYNC_REPEAT_INTERVAL + 500
- end).
-
suite() ->
[{timetrap,{seconds,30}},
{ct_hooks,[logger_test_lib]}].
@@ -73,9 +69,10 @@ end_per_group(_Group, _Config) ->
init_per_testcase(TestHooksCase, Config) when
TestHooksCase == write_failure;
TestHooksCase == sync_failure ->
- if ?TEST_HOOKS_TAB == undefined ->
+ case (fun() -> ?TEST_HOOKS_TAB == undefined end)() of
+ true ->
{skip,"Define the TEST_HOOKS macro to run this test"};
- true ->
+ false ->
ct:print("********** ~w **********", [TestHooksCase]),
Config
end;
@@ -187,13 +184,13 @@ add_remove_instance_file(Log, Type) ->
logger:notice(M1=?msg,?domain),
?check(M1),
B1 = ?bin(M1),
- try_read_file(Log, {ok,B1}, ?FILESYNC_REP_INT),
+ try_read_file(Log, {ok,B1}, filesync_rep_int()),
ok = logger:remove_handler(?MODULE),
timer:sleep(500),
undefined = whereis(h_proc_name()),
logger:notice(?msg,?domain),
?check_no_log,
- try_read_file(Log, {ok,B1}, ?FILESYNC_REP_INT),
+ try_read_file(Log, {ok,B1}, filesync_rep_int()),
ok.
default_formatter(_Config) ->
@@ -411,7 +408,7 @@ crash_std_h(cleanup) ->
[test_server:stop_node(Node) || Node <- Nodes].
sync_and_read(Node,disk_log,Log) ->
- rpc:call(Node,logger_disk_log_h,sync,[?STANDARD_HANDLER]),
+ rpc:call(Node,logger_disk_log_h,filesync,[?STANDARD_HANDLER]),
case file:read_file(Log ++ ".1") of
{ok,<<>>} ->
timer:sleep(5000),
@@ -420,7 +417,7 @@ sync_and_read(Node,disk_log,Log) ->
Ok
end;
sync_and_read(Node,file,Log) ->
- rpc:call(Node,logger_std_h,sync,[?STANDARD_HANDLER]),
+ rpc:call(Node,logger_std_h,filesync,[?STANDARD_HANDLER]),
case file:read_file(Log) of
{ok,<<>>} ->
timer:sleep(5000),
@@ -430,7 +427,7 @@ sync_and_read(Node,file,Log) ->
end.
bad_input(_Config) ->
- {error,{badarg,{sync,["BadType"]}}} = logger_std_h:sync("BadType"),
+ {error,{badarg,{filesync,["BadType"]}}} = logger_std_h:filesync("BadType"),
{error,{badarg,{info,["BadType"]}}} = logger_std_h:info("BadType"),
{error,{badarg,{reset,["BadType"]}}} = logger_std_h:reset("BadType").
@@ -473,7 +470,7 @@ reconfig(Config) ->
overload_kill_enable => true,
overload_kill_qlen => 100000,
overload_kill_mem_size => 10000000,
- overload_kill_restart_after => never,
+ overload_kill_restart_after => infinity,
filesync_repeat_interval => no_repeat}),
#{id := ?MODULE,
type := standard_io,
@@ -487,7 +484,7 @@ reconfig(Config) ->
overload_kill_enable := true,
overload_kill_qlen := 100000,
overload_kill_mem_size := 10000000,
- overload_kill_restart_after := never,
+ overload_kill_restart_after := infinity,
filesync_repeat_interval := no_repeat} = logger_std_h:info(?MODULE),
ok.
@@ -517,7 +514,7 @@ file_opts(Config) ->
logger:notice(M1=?msg,?domain),
?check(M1),
B1 = ?bin(M1),
- try_read_file(Log, {ok,B1}, ?FILESYNC_REP_INT),
+ try_read_file(Log, {ok,B1}, filesync_rep_int()),
ok.
file_opts(cleanup, _Config) ->
logger:remove_handler(?MODULE).
@@ -544,7 +541,7 @@ sync(Config) ->
logger:notice("first", ?domain),
%% wait for automatic filesync
- check_tracer(?FILESYNC_REP_INT*2),
+ check_tracer(filesync_rep_int()*2),
%% check that explicit filesync is only done once
start_tracer([{logger_std_h, write_to_dev, 5},
@@ -557,9 +554,9 @@ sync(Config) ->
]),
logger:notice("second", ?domain),
%% do explicit sync
- logger_std_h:sync(?MODULE),
+ logger_std_h:filesync(?MODULE),
%% a second sync should be ignored
- logger_std_h:sync(?MODULE),
+ logger_std_h:filesync(?MODULE),
check_tracer(100),
%% check that if there's no repeated filesync active,
@@ -618,9 +615,9 @@ write_failure(Config) ->
rpc:call(Node, ?MODULE, set_result, [file_write,ok]),
ok = log_on_remote_node(Node, "Logged1"),
- rpc:call(Node, logger_std_h, sync, [?STANDARD_HANDLER]),
+ rpc:call(Node, logger_std_h, filesync, [?STANDARD_HANDLER]),
?check_no_log,
- try_read_file(Log, {ok,<<"Logged1\n">>}, ?FILESYNC_REP_INT),
+ try_read_file(Log, {ok,<<"Logged1\n">>}, filesync_rep_int()),
rpc:call(Node, ?MODULE, set_result, [file_write,{error,terminated}]),
ok = log_on_remote_node(Node, "Cause simple error printout"),
@@ -636,9 +633,9 @@ write_failure(Config) ->
rpc:call(Node, ?MODULE, set_result, [file_write,ok]),
ok = log_on_remote_node(Node, "Logged2"),
- rpc:call(Node, logger_std_h, sync, [?STANDARD_HANDLER]),
+ rpc:call(Node, logger_std_h, filesync, [?STANDARD_HANDLER]),
?check_no_log,
- try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, ?FILESYNC_REP_INT),
+ try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, filesync_rep_int()),
ok.
write_failure(cleanup, _Config) ->
Nodes = nodes(),
@@ -667,14 +664,14 @@ sync_failure(Config) ->
rpc:call(Node, ?MODULE, set_result, [file_datasync,{error,terminated}]),
ok = log_on_remote_node(Node, "Cause simple error printout"),
- ?check({error,{?STANDARD_HANDLER,sync,Log,{error,terminated}}}),
+ ?check({error,{?STANDARD_HANDLER,filesync,Log,{error,terminated}}}),
ok = log_on_remote_node(Node, "No second error printout"),
?check_no_log,
rpc:call(Node, ?MODULE, set_result, [file_datasync,{error,eacces}]),
ok = log_on_remote_node(Node, "Cause simple error printout"),
- ?check({error,{?STANDARD_HANDLER,sync,Log,{error,eacces}}}),
+ ?check({error,{?STANDARD_HANDLER,filesync,Log,{error,eacces}}}),
rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]),
ok = log_on_remote_node(Node, "Logged2"),
@@ -1069,7 +1066,7 @@ restart_after(Config) ->
NewHConfig1 =
HConfig#{config=>StdHConfig#{overload_kill_enable=>true,
overload_kill_qlen=>10,
- overload_kill_restart_after=>never}},
+ overload_kill_restart_after=>infinity}},
ok = logger:set_handler_config(?MODULE, NewHConfig1),
MRef1 = erlang:monitor(process, whereis(h_proc_name())),
%% kill handler
@@ -1127,7 +1124,7 @@ handler_requests_under_load(Config) ->
flush_qlen => 2000,
burst_limit_enable => false}},
ok = logger:set_handler_config(?MODULE, NewHConfig),
- Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{sync,[]},
+ Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]},
{info,[]},
{reset,[]},
{change_config,[]}])
@@ -1211,10 +1208,10 @@ wait_until_written(File, Sz) ->
{ok,#file_info{size = Sz}} ->
timer:sleep(1000),
case file:read_file_info(File) of
- {ok,#file_info{size = Sz1}} ->
+ {ok,#file_info{size = Sz}} ->
ok;
- {ok,#file_info{size = Sz2}} ->
- wait_until_written(File, Sz2)
+ {ok,#file_info{size = Sz1}} ->
+ wait_until_written(File, Sz1)
end;
{ok,#file_info{size = Sz1}} ->
wait_until_written(File, Sz1)
@@ -1334,7 +1331,7 @@ add_remove_instance_nofile(Type) ->
logger:notice(M1=?msg,?domain),
?check(M1),
%% check that sync doesn't do damage even if not relevant
- ok = logger_std_h:sync(?MODULE),
+ ok = logger_std_h:filesync(?MODULE),
ok = logger:remove_handler(?MODULE),
timer:sleep(500),
undefined = whereis(h_proc_name()),
@@ -1583,7 +1580,7 @@ wait_for_process_up(Name,T) ->
N = (T div 500) + 1,
wait_for_process_up1(Name,N).
-wait_for_process_up1(Name,0) ->
+wait_for_process_up1(_Name,0) ->
error;
wait_for_process_up1(Name,N) ->
timer:sleep(500),
@@ -1595,3 +1592,9 @@ wait_for_process_up1(Name,N) ->
%% ct:pal("Waiting for process ~p (~p tries left)",[Name,N]),
wait_for_process_up1(Name,N-1)
end.
+
+filesync_rep_int() ->
+ case (fun() -> is_atom(?FILESYNC_REPEAT_INTERVAL) end)() of
+ true -> 5500;
+ false -> ?FILESYNC_REPEAT_INTERVAL + 500
+ end.
diff --git a/lib/kernel/test/logger_test_lib.erl b/lib/kernel/test/logger_test_lib.erl
index 9097453c10..81eb9ce5eb 100644
--- a/lib/kernel/test/logger_test_lib.erl
+++ b/lib/kernel/test/logger_test_lib.erl
@@ -52,10 +52,10 @@ log(Node, M, F, A) ->
rpc:call(Node, M, F, A ++ [MD]).
sync_and_read(Node,disk_log,Log) ->
- rpc:call(Node,logger_disk_log_h,sync,[?STANDARD_HANDLER]),
+ rpc:call(Node,logger_disk_log_h,filesync,[?STANDARD_HANDLER]),
file:read_file(Log ++ ".1");
sync_and_read(Node, file,Log) ->
- ok = rpc:call(Node,logger_std_h,sync,[?STANDARD_HANDLER]),
+ ok = rpc:call(Node,logger_std_h,filesync,[?STANDARD_HANDLER]),
file:read_file(Log).
diff --git a/lib/public_key/doc/src/public_key.xml b/lib/public_key/doc/src/public_key.xml
index 5d57109140..524869df16 100644
--- a/lib/public_key/doc/src/public_key.xml
+++ b/lib/public_key/doc/src/public_key.xml
@@ -969,35 +969,76 @@ fun(#'DistributionPoint'{}, #'CertificateList'{},
</type>
<desc>
<p>This function checks that the <i>Presented Identifier</i> (e.g hostname) in a peer certificate
- is in agreement with the <i>Reference Identifier</i> that the client expects to be connected to.
+ is in agreement with at least one of the <i>Reference Identifier</i> that the client expects to be connected to.
The function is intended to be added as an extra client check of the peer certificate when performing
<seealso marker="public_key:public_key#pkix_path_validation-3">public_key:pkix_path_validation/3</seealso>
</p>
<p>See <url href="https://tools.ietf.org/html/rfc6125">RFC 6125</url>
for detailed information about hostname verification.
- The <seealso marker="using_public_key#verify_hostname">User's Manual</seealso>
+ The <seealso marker="using_public_key#verify_hostname">User's Guide</seealso>
and
<seealso marker="using_public_key#verify_hostname_examples">code examples</seealso>
describes this function more detailed.
</p>
<p>The <c>{OtherRefId,term()}</c> is defined by the user and is passed to the <c>match_fun</c>, if defined.
- If that term is a binary, it will be converted to a string.
+ If the term in <c>OtherRefId</c> is a binary, it will be converted to a string.
</p>
<p>The <c>ip</c> Reference ID takes an <seealso marker="inet:inet#type-ip_address">inet:ip_address()</seealso>
or an ip address in string format (E.g "10.0.1.1" or "1234::5678:9012") as second element.
</p>
- <p>See <seealso marker="#pkix_verify_hostname_match_fun-1">pkix_verify_hostname_match_fun/1</seealso> for a
- function that return a fun suitable for this option.
- </p>
+ <p>The options are:</p>
+ <taglist>
+ <tag><c>match_fun</c></tag>
+ <item>
+ The <c>fun/2</c> in this option replaces the default host name matching rules. The fun should return a
+ boolean to tell if the Reference ID and Presented ID matches or not. The fun can also return a third
+ value, the atom <c>default</c>, if the default matching rules shall apply.
+ This makes it possible to augment the tests with a special case:
+ <code>
+fun(....) -> true; % My special case
+ (_, _) -> default % all others falls back to the inherit tests
+end
+ </code>
+ <br/>See <seealso marker="#pkix_verify_hostname_match_fun-1">pkix_verify_hostname_match_fun/1</seealso> for a
+ function that takes a protocol name as argument and returns a <c>fun/2</c> suitable for this option and
+ <seealso marker="using_public_key#redefining_match_op">Re-defining the match operation</seealso>
+ in the User's Guide for an example.
+ </item>
+
+ <tag><c>fail_callback</c></tag>
+ <item>If a matching fails, there could be circumstances when the certificate should be accepted anyway. Think for
+ example of a web browser where you choose to accept an outdated certificate. This option enables implementation
+ of such a function. This <c>fun/1</c> is called when no <c>ReferenceID</c> matches. The return value of the fun
+ (a <c>boolean()</c>) decides the outcome. If <c>true</c> the the certificate is accepted otherwise
+ it is rejected. See
+ <seealso marker="using_public_key#-pinning--a-certificate">"Pinning" a Certificate</seealso>
+ in the User's Guide.
+ </item>
+
+ <tag><c>fqdn_fun</c></tag>
+ <item>This option augments the host name extraction from URIs and other Reference IDs. It could for example be
+ a very special URI that is not standardised. The fun takes a Reference ID as argument and returns one of:
+ <list>
+ <item>the hostname</item>
+ <item>the atom <c>default</c>: the default host name extract function will be used</item>
+ <item>the atom <c>undefined</c>: a host name could not be extracted. The pkix_verify_hostname/3
+ will return <c>false</c>.</item>
+ </list>
+ <br/>For an example, see
+ <seealso marker="using_public_key#hostname_extraction">Hostname extraction</seealso>
+ in the User's Guide.
+ </item>
+ </taglist>
+
</desc>
</func>
<func>
- <name>pkix_verify_hostname_match_fun(Alg) -> fun(RefId | FQDN::string(), PresentedID) -> boolean() | default</name>
+ <name>pkix_verify_hostname_match_fun(Protcol) -> fun(RefId | FQDN::string(), PresentedID) -> boolean() | default</name>
<fsummary>Returns a fun that is intendended as argument to the match_fun option in pkix_verify_hostname/3.
</fsummary>
<type>
- <v>Alg = https</v>
+ <v>Protocol = https</v>
<d>The algorithm for wich the fun should implement the special matching rules</d>
<v>RefId</v>
<d>See <seealso marker="#pkix_verify_hostname-3">pkix_verify_hostname/3</seealso>.</d>
diff --git a/lib/public_key/doc/src/using_public_key.xml b/lib/public_key/doc/src/using_public_key.xml
index 417d479da3..b936b7fcb5 100644
--- a/lib/public_key/doc/src/using_public_key.xml
+++ b/lib/public_key/doc/src/using_public_key.xml
@@ -570,6 +570,7 @@ true = public_key:verify(Digest, none, Signature, PublicKey),</code>
<c>fqdn_fun</c> and <c>match_fun</c>.
</p>
<section>
+ <marker id="hostname_extraction"></marker>
<title>Hostname extraction</title>
<p>The <c>fqdn_fun</c> extracts hostnames (Fully Qualified Domain Names) from uri_id
or other ReferenceIDs that are not pre-defined in the public_key function.
@@ -595,7 +596,8 @@ true = public_key:verify(Digest, none, Signature, PublicKey),</code>
</code>
</section>
<section>
- <title>Re-defining the match operations</title>
+ <marker id="redefining_match_op"></marker>
+ <title>Re-defining the match operation</title>
<p>The default matching handles dns_id and uri_id. In an uri_id the value is tested for
equality with a value from the <c>Subject Alternate Name</c>. If som other kind of matching
is needed, use the <c>match_fun</c> option.
diff --git a/lib/snmp/doc/src/notes.xml b/lib/snmp/doc/src/notes.xml
index 8d48cb911d..f64e0cca97 100644
--- a/lib/snmp/doc/src/notes.xml
+++ b/lib/snmp/doc/src/notes.xml
@@ -34,7 +34,23 @@
</header>
- <section><title>SNMP 5.2.10</title>
+ <section><title>SNMP 5.2.11</title>
+
+ <section><title>Fixed Bugs and Malfunctions</title>
+ <list>
+ <item>
+ <p>
+ The Snmp MIB compiler now allows using a
+ TEXTUAL-CONVENTION type before defining it.</p>
+ <p>
+ Own Id: OTP-14196 Aux Id: ERIERL-161 </p>
+ </item>
+ </list>
+ </section>
+
+</section>
+
+<section><title>SNMP 5.2.10</title>
<section><title>Fixed Bugs and Malfunctions</title>
<list>
diff --git a/lib/snmp/vsn.mk b/lib/snmp/vsn.mk
index 2c97683625..96123f02f5 100644
--- a/lib/snmp/vsn.mk
+++ b/lib/snmp/vsn.mk
@@ -19,6 +19,6 @@
# %CopyrightEnd%
APPLICATION = snmp
-SNMP_VSN = 5.2.10
+SNMP_VSN = 5.2.11
PRE_VSN =
APP_VSN = "$(APPLICATION)-$(SNMP_VSN)$(PRE_VSN)"
diff --git a/lib/ssl/doc/src/ssl.xml b/lib/ssl/doc/src/ssl.xml
index adf4fb9ba4..19436b2100 100644
--- a/lib/ssl/doc/src/ssl.xml
+++ b/lib/ssl/doc/src/ssl.xml
@@ -896,16 +896,7 @@ fun(srp, Username :: string(), UserState :: term()) ->
<v>Type = erlang | openssl | all</v>
</type>
<desc>
- <p>Returns a list of supported cipher suites.
- This function will become deprecated in OTP 21, and replaced
- by <seealso marker="#cipher_suites-2">ssl:cipher-suites/2</seealso>
- <c>cipher_suites()</c> is equivalent to <c>cipher_suites(erlang).</c>
- Type <c>openssl</c> is provided for backwards compatibility with the
- old SSL, which used OpenSSL. <c>cipher_suites(all)</c> returns
- all available cipher suites. The cipher suites not present
- in <c>cipher_suites(erlang)</c> but included in
- <c>cipher_suites(all)</c> are not used unless explicitly configured
- by the user.</p>
+ <p>Deprecated in OTP 21, use <seealso marker="#cipher_suites-2">ssl:cipher_suites/2</seealso> instead.</p>
</desc>
</func>
@@ -948,7 +939,7 @@ fun(srp, Username :: string(), UserState :: term()) ->
<func>
<name>connect(Socket, SslOptions) -> </name>
- <name>connect(Socket, SslOptions, Timeout) -> {ok, TLSSocket} | {ok, TLSSocket, Ext}
+ <name>connect(Socket, SslOptions, Timeout) -> {ok, SslSocket} | {ok, SslSocket, Ext}
| {error, Reason}</name>
<fsummary>Upgrades a <c>gen_tcp</c>, or
equivalent, connected socket to an TLS socket.</fsummary>
@@ -956,7 +947,7 @@ fun(srp, Username :: string(), UserState :: term()) ->
<v>Socket = socket()</v>
<v>SslOptions = [{handshake, hello| full} | ssl_option()]</v>
<v>Timeout = integer() | infinity</v>
- <v>TLSSocket = sslsocket()</v>
+ <v>SslSocket = sslsocket()</v>
<v>Ext = hello_extensions()</v>
<v>Reason = term()</v>
</type>
@@ -973,8 +964,8 @@ fun(srp, Username :: string(), UserState :: term()) ->
<p> If the option <c>{handshake, hello}</c> is used the
handshake is paused after receiving the server hello message
- and the success response is <c>{ok, TLSSocket, Ext}</c>
- instead of <c>{ok, TLSSocket}</c>. Thereafter the handshake is continued or
+ and the success response is <c>{ok, SslSocket, Ext}</c>
+ instead of <c>{ok, SslSocket}</c>. Thereafter the handshake is continued or
canceled by calling <seealso marker="#handshake_continue-3">
<c>handshake_continue/3</c></seealso> or <seealso
marker="#handshake_cancel-1"><c>handshake_cancel/1</c></seealso>.
@@ -986,7 +977,7 @@ fun(srp, Username :: string(), UserState :: term()) ->
<func>
<name>connect(Host, Port, Options) -></name>
<name>connect(Host, Port, Options, Timeout) ->
- {ok, SslSocket}| {ok, TLSSocket, Ext} | {error, Reason}</name>
+ {ok, SslSocket}| {ok, SslSocket, Ext} | {error, Reason}</name>
<fsummary>Opens an TLS/DTLS connection to <c>Host</c>, <c>Port</c>.</fsummary>
<type>
<v>Host = host()</v>
@@ -1017,8 +1008,8 @@ fun(srp, Username :: string(), UserState :: term()) ->
<p> If the option <c>{handshake, hello}</c> is used the
handshake is paused after receiving the server hello message
- and the success response is <c>{ok, TLSSocket, Ext}</c>
- instead of <c>{ok, TLSSocket}</c>. Thereafter the handshake is continued or
+ and the success response is <c>{ok, SslSocket, Ext}</c>
+ instead of <c>{ok, SslSocket}</c>. Thereafter the handshake is continued or
canceled by calling <seealso marker="#handshake_continue-3">
<c>handshake_continue/3</c></seealso> or <seealso
marker="#handshake_cancel-1"><c>handshake_cancel/1</c></seealso>.
@@ -1074,6 +1065,7 @@ fun(srp, Username :: string(), UserState :: term()) ->
<fsummary>Returns all the connection information.
</fsummary>
<type>
+ <v>SslSocket = sslsocket()</v>
<v>Item = protocol | cipher_suite | sni_hostname | ecc | session_id | atom()</v>
<d>Meaningful atoms, not specified above, are the ssl option names.</d>
<v>Result = [{Item::atom(), Value::term()}]</v>
@@ -1091,6 +1083,7 @@ fun(srp, Username :: string(), UserState :: term()) ->
<fsummary>Returns the requested connection information.
</fsummary>
<type>
+ <v>SslSocket = sslsocket()</v>
<v>Items = [Item]</v>
<v>Item = protocol | cipher_suite | sni_hostname | ecc | session_id | client_random
| server_random | master_secret | atom()</v>
@@ -1133,7 +1126,7 @@ fun(srp, Username :: string(), UserState :: term()) ->
</func>
<func>
- <name>getopts(Socket, OptionNames) ->
+ <name>getopts(SslSocket, OptionNames) ->
{ok, [socketoption()]} | {error, Reason}</name>
<fsummary>Gets the values of the specified options.</fsummary>
<type>
@@ -1147,13 +1140,13 @@ fun(srp, Username :: string(), UserState :: term()) ->
</func>
<func>
- <name>getstat(Socket) ->
+ <name>getstat(SslSocket) ->
{ok, OptionValues} | {error, inet:posix()}</name>
- <name>getstat(Socket, OptionNames) ->
+ <name>getstat(SslSocket, OptionNames) ->
{ok, OptionValues} | {error, inet:posix()}</name>
<fsummary>Get one or more statistic options for a socket</fsummary>
<type>
- <v>Socket = sslsocket()</v>
+ <v>SslSocket = sslsocket()</v>
<v>OptionNames = [atom()]</v>
<v>OptionValues = [{inet:stat_option(), integer()}]</v>
</type>
@@ -1164,28 +1157,27 @@ fun(srp, Username :: string(), UserState :: term()) ->
</func>
<func>
- <name>handshake(Socket) -> </name>
- <name>handshake(Socket, Timeout) -> {ok, Socket} | {error, Reason}</name>
+ <name>handshake(HsSocket) -> </name>
+ <name>handshake(HsSocket, Timeout) -> {ok, SslSocket} | {error, Reason}</name>
<fsummary>Performs server-side SSL/TLS handshake.</fsummary>
<type>
- <v>Socket = sslsocket()</v>
+ <v>HsSocket = SslSocket = sslsocket()</v>
<v>Timeout = integer()</v>
<v>Reason = term()</v>
</type>
<desc>
<p>Performs the SSL/TLS/DTLS server-side handshake.</p>
- <p><c>Socket</c> is a socket as returned by
- <seealso marker="#transport_accept-2">ssl:transport_accept/[1,2]</seealso>.
- </p>
+ <p>Returns a new TLS/DTLS socket if the handshake is successful.</p>
</desc>
</func>
<func>
<name>handshake(Socket, SslOptions) -> </name>
- <name>handshake(Socket, SslOptions, Timeout) -> {ok, Socket} | {ok, Socket, Ext} | {error, Reason}</name>
+ <name>handshake(Socket, SslOptions, Timeout) -> {ok, SslSocket} | {ok, SslSocket, Ext} | {error, Reason}</name>
<fsummary>Performs server-side SSL/TLS/DTLS handshake.</fsummary>
<type>
<v>Socket = socket() | sslsocket() </v>
+ <v>SslSocket = sslsocket() </v>
<v>Ext = hello_extensions()</v>
<v>SslOptions = [{handshake, hello| full} | ssl_option()]</v>
<v>Timeout = integer()</v>
@@ -1194,22 +1186,23 @@ fun(srp, Username :: string(), UserState :: term()) ->
<desc>
<p>If <c>Socket</c> is a ordinary <c>socket()</c>: upgrades a <c>gen_tcp</c>,
or equivalent, socket to an SSL socket, that is, performs
- the SSL/TLS server-side handshake and returns the SSL socket.</p>
+ the SSL/TLS server-side handshake and returns a TLS socket.</p>
- <warning><p>The Socket shall be in passive mode ({active,
- false}) before calling this function or the handshake can fail
- due to a race condition.</p></warning>
+ <warning><p>The <c>Socket</c> shall be in passive mode ({active,
+ false}) before calling this function or else the behavior of this function
+ is undefined.
+ </p></warning>
<p>If <c>Socket</c> is an <c>sslsocket()</c>: provides extra SSL/TLS/DTLS
options to those specified in
<seealso marker="#listen-2">ssl:listen/2 </seealso> and then performs
- the SSL/TLS/DTLS handshake.</p>
-
+ the SSL/TLS/DTLS handshake. Returns a new TLS/DTLS socket if the handshake is successful.</p>
+
<p>
If option <c>{handshake, hello}</c> is specified the handshake is
paused after receiving the client hello message and the
- sucess response is <c>{ok, TLSSocket, Ext}</c> instead of <c>{ok,
- TLSSocket}</c>. Thereafter the handshake is continued or
+ success response is <c>{ok, SslSocket, Ext}</c> instead of <c>{ok,
+ SslSocket}</c>. Thereafter the handshake is continued or
canceled by calling <seealso marker="#handshake_continue-3">
<c>handshake_continue/3</c></seealso> or <seealso
marker="#handshake_cancel-1"><c>handshake_cancel/1</c></seealso>.
@@ -1218,10 +1211,10 @@ fun(srp, Username :: string(), UserState :: term()) ->
</func>
<func>
- <name>handshake_cancel(Socket) -> ok </name>
+ <name>handshake_cancel(SslSocket) -> ok </name>
<fsummary>Cancel handshake with a fatal alert</fsummary>
<type>
- <v>Socket = sslsocket()</v>
+ <v>SslSocket = sslsocket()</v>
</type>
<desc>
<p>Cancel the handshake with a fatal <c>USER_CANCELED</c> alert.</p>
@@ -1229,10 +1222,11 @@ fun(srp, Username :: string(), UserState :: term()) ->
</func>
<func>
- <name>handshake_continue(Socket, SSLOptions, Timeout) -> {ok, Socket} | {error, Reason}</name>
+ <name>handshake_continue(HsSocket, SSLOptions) -> {ok, SslSocket} | {error, Reason}</name>
+ <name>handshake_continue(HsSocket, SSLOptions, Timeout) -> {ok, SslSocket} | {error, Reason}</name>
<fsummary>Continue the SSL/TLS handshake.</fsummary>
<type>
- <v>Socket = sslsocket()</v>
+ <v>HsSocket = SslSocket = sslsocket()</v>
<v>SslOptions = [ssl_option()]</v>
<v>Timeout = integer()</v>
<v>Reason = term()</v>
@@ -1257,10 +1251,10 @@ fun(srp, Username :: string(), UserState :: term()) ->
</func>
<func>
- <name>negotiated_protocol(Socket) -> {ok, Protocol} | {error, protocol_not_negotiated}</name>
+ <name>negotiated_protocol(SslSocket) -> {ok, Protocol} | {error, protocol_not_negotiated}</name>
<fsummary>Returns the protocol negotiated through ALPN or NPN extensions.</fsummary>
<type>
- <v>Socket = sslsocket()</v>
+ <v>SslSocket = sslsocket()</v>
<v>Protocol = binary()</v>
</type>
<desc>
@@ -1271,10 +1265,10 @@ fun(srp, Username :: string(), UserState :: term()) ->
</func>
<func>
- <name>peercert(Socket) -> {ok, Cert} | {error, Reason}</name>
+ <name>peercert(SslSocket) -> {ok, Cert} | {error, Reason}</name>
<fsummary>Returns the peer certificate.</fsummary>
<type>
- <v>Socket = sslsocket()</v>
+ <v>SslSocket = sslsocket()</v>
<v>Cert = binary()</v>
</type>
<desc>
@@ -1285,11 +1279,11 @@ fun(srp, Username :: string(), UserState :: term()) ->
</func>
<func>
- <name>peername(Socket) -> {ok, {Address, Port}} |
+ <name>peername(SslSocket) -> {ok, {Address, Port}} |
{error, Reason}</name>
<fsummary>Returns the peer address and port.</fsummary>
<type>
- <v>Socket = sslsocket()</v>
+ <v>SslSocket = sslsocket()</v>
<v>Address = ipaddress()</v>
<v>Port = integer()</v>
</type>
@@ -1335,12 +1329,12 @@ fun(srp, Username :: string(), UserState :: term()) ->
</func>
<func>
- <name>recv(Socket, Length) -> </name>
- <name>recv(Socket, Length, Timeout) -> {ok, Data} | {error,
+ <name>recv(SslSocket, Length) -> </name>
+ <name>recv(SslSocket, Length, Timeout) -> {ok, Data} | {error,
Reason}</name>
<fsummary>Receives data on a socket.</fsummary>
<type>
- <v>Socket = sslsocket()</v>
+ <v>SslSocket = sslsocket()</v>
<v>Length = integer()</v>
<v>Timeout = integer()</v>
<v>Data = [char()] | binary()</v>
@@ -1362,10 +1356,10 @@ fun(srp, Username :: string(), UserState :: term()) ->
</func>
<func>
- <name>renegotiate(Socket) -> ok | {error, Reason}</name>
+ <name>renegotiate(SslSocket) -> ok | {error, Reason}</name>
<fsummary>Initiates a new handshake.</fsummary>
<type>
- <v>Socket = sslsocket()</v>
+ <v>SslSocket = sslsocket()</v>
</type>
<desc><p>Initiates a new handshake. A notable return value is
<c>{error, renegotiation_rejected}</c> indicating that the peer
@@ -1375,10 +1369,10 @@ fun(srp, Username :: string(), UserState :: term()) ->
</func>
<func>
- <name>send(Socket, Data) -> ok | {error, Reason}</name>
+ <name>send(SslSocket, Data) -> ok | {error, Reason}</name>
<fsummary>Writes data to a socket.</fsummary>
<type>
- <v>Socket = sslsocket()</v>
+ <v>SslSocket = sslsocket()</v>
<v>Data = iodata()</v>
</type>
<desc>
@@ -1389,10 +1383,10 @@ fun(srp, Username :: string(), UserState :: term()) ->
</func>
<func>
- <name>setopts(Socket, Options) -> ok | {error, Reason}</name>
+ <name>setopts(SslSocket, Options) -> ok | {error, Reason}</name>
<fsummary>Sets socket options.</fsummary>
<type>
- <v>Socket = sslsocket()</v>
+ <v>SslSocket = sslsocket()</v>
<v>Options = [socketoption]()</v>
</type>
<desc>
@@ -1402,10 +1396,10 @@ fun(srp, Username :: string(), UserState :: term()) ->
</func>
<func>
- <name>shutdown(Socket, How) -> ok | {error, Reason}</name>
+ <name>shutdown(SslSocket, How) -> ok | {error, Reason}</name>
<fsummary>Immediately closes a socket.</fsummary>
<type>
- <v>Socket = sslsocket()</v>
+ <v>SslSocket = sslsocket()</v>
<v>How = read | write | read_write</v>
<v>Reason = reason()</v>
</type>
@@ -1420,19 +1414,16 @@ fun(srp, Username :: string(), UserState :: term()) ->
</func>
<func>
- <name>ssl_accept(Socket) -> </name>
- <name>ssl_accept(Socket, Timeout) -> ok | {error, Reason}</name>
+ <name>ssl_accept(SslSocket) -> </name>
+ <name>ssl_accept(SslSocket, Timeout) -> ok | {error, Reason}</name>
<fsummary>Performs server-side SSL/TLS handshake.</fsummary>
<type>
- <v>Socket = sslsocket()</v>
+ <v>SslSocket = sslsocket()</v>
<v>Timeout = integer()</v>
<v>Reason = term()</v>
</type>
<desc>
- <p>Performs the SSL/TLS/DTLS server-side handshake.</p>
- <p><c>Socket</c> is a socket as returned by
- <seealso marker="#transport_accept-2">ssl:transport_accept/[1,2]</seealso>
- </p>
+ <p>Deprecated in OTP 21, use <seealso marker="#handshake-1">ssl:handshake[1,2]</seealso> instead.</p>
</desc>
</func>
@@ -1447,29 +1438,16 @@ fun(srp, Username :: string(), UserState :: term()) ->
<v>Reason = term()</v>
</type>
<desc>
- <p>If <c>Socket</c> is a <c>socket()</c>: upgrades a <c>gen_tcp</c>,
- or equivalent, socket to an SSL socket, that is, performs
- the SSL/TLS server-side handshake and returns the SSL socket.</p>
-
- <warning><p>The listen socket is to be in mode <c>{active, false}</c>
- before telling the client that the server is ready to upgrade
- by calling this function, else the upgrade succeeds or does not
- succeed depending on timing.</p></warning>
-
- <p>If <c>Socket</c> is an <c>sslsocket()</c>: provides extra SSL/TLS/DTLS
- options to those specified in
- <seealso marker="#listen-2">ssl:listen/2 </seealso> and then performs
- the SSL/TLS/DTLS handshake.
- </p>
+ <p>Deprecated in OTP 21, use <seealso marker="#handshake-3">ssl:handshake[2,3]</seealso> instead.</p>
</desc>
</func>
<func>
- <name>sockname(Socket) -> {ok, {Address, Port}} |
+ <name>sockname(SslSocket) -> {ok, {Address, Port}} |
{error, Reason}</name>
<fsummary>Returns the local address and port.</fsummary>
<type>
- <v>Socket = sslsocket()</v>
+ <v>SslSocket = sslsocket()</v>
<v>Address = ipaddress()</v>
<v>Port = integer()</v>
</type>
@@ -1515,11 +1493,11 @@ fun(srp, Username :: string(), UserState :: term()) ->
<func>
<name>transport_accept(ListenSocket) -></name>
<name>transport_accept(ListenSocket, Timeout) ->
- {ok, NewSocket} | {error, Reason}</name>
+ {ok, SslSocket} | {error, Reason}</name>
<fsummary>Accepts an incoming connection and
prepares for <c>ssl_accept</c>.</fsummary>
<type>
- <v>ListenSocket = NewSocket = sslsocket()</v>
+ <v>ListenSocket = SslSocket = sslsocket()</v>
<v>Timeout = integer()</v>
<v>Reason = reason()</v>
</type>
diff --git a/lib/ssl/doc/src/using_ssl.xml b/lib/ssl/doc/src/using_ssl.xml
index 3ef33df719..b2d649042b 100644
--- a/lib/ssl/doc/src/using_ssl.xml
+++ b/lib/ssl/doc/src/using_ssl.xml
@@ -66,7 +66,7 @@ ssl:listen(9999, [{certfile, "cert.pem"}, {keyfile, "key.pem"},{reuseaddr, true}
{ok,{sslsocket, [...]}}</code>
<p><em>Step 3:</em> Do a transport accept on the TLS listen socket:</p>
- <code type="erl">3 server> {ok, Socket} = ssl:transport_accept(ListenSocket).
+ <code type="erl">3 server> {ok, TLSTransportSocket} = ssl:transport_accept(ListenSocket).
{ok,{sslsocket, [...]}}</code>
<p><em>Step 4:</em> Start the client side: </p>
@@ -77,7 +77,7 @@ ok</code>
{ok,{sslsocket, [...]}}</code>
<p><em>Step 5:</em> Do the TLS handshake:</p>
- <code type="erl">4 server> ok = ssl:ssl_accept(Socket).
+ <code type="erl">4 server> {ok, Socket} = ssl:handshake(TLSTransportSocket).
ok</code>
<p><em>Step 6:</em> Send a message over TLS:</p>
@@ -126,7 +126,7 @@ ok</code>
ok</code>
<p><em>Step 6:</em> Do the TLS handshake:</p>
- <code type="erl">5 server> {ok, TLSSocket} = ssl:ssl_accept(Socket, [{cacertfile, "cacerts.pem"},
+ <code type="erl">5 server> {ok, TLSSocket} = ssl:handshake(Socket, [{cacertfile, "cacerts.pem"},
{certfile, "cert.pem"}, {keyfile, "key.pem"}]).
{ok,{sslsocket,[...]}}</code>
diff --git a/lib/ssl/src/ssl.erl b/lib/ssl/src/ssl.erl
index eb5b351dd3..f5e5336f81 100644
--- a/lib/ssl/src/ssl.erl
+++ b/lib/ssl/src/ssl.erl
@@ -40,7 +40,7 @@
%% Socket handling
-export([connect/3, connect/2, connect/4,
listen/2, transport_accept/1, transport_accept/2,
- handshake/1, handshake/2, handshake/3,
+ handshake/1, handshake/2, handshake/3, handshake_continue/2,
handshake_continue/3, handshake_cancel/1,
ssl_accept/1, ssl_accept/2, ssl_accept/3,
controlling_process/2, peername/1, peercert/1, sockname/1,
@@ -259,6 +259,16 @@ handshake(Socket, SslOptions, Timeout) when is_port(Socket),
Error = {error, _Reason} -> Error
end.
+
+%%--------------------------------------------------------------------
+-spec handshake_continue(#sslsocket{}, [ssl_option()]) ->
+ {ok, #sslsocket{}} | {error, reason()}.
+%%
+%%
+%% Description: Continues the handshke possible with newly supplied options.
+%%--------------------------------------------------------------------
+handshake_continue(Socket, SSLOptions) ->
+ handshake_continue(Socket, SSLOptions, infinity).
%%--------------------------------------------------------------------
-spec handshake_continue(#sslsocket{}, [ssl_option()], timeout()) ->
{ok, #sslsocket{}} | {error, reason()}.
diff --git a/lib/ssl/test/ssl_test_lib.erl b/lib/ssl/test/ssl_test_lib.erl
index 1e88ca15de..8532b9ac0f 100644
--- a/lib/ssl/test/ssl_test_lib.erl
+++ b/lib/ssl/test/ssl_test_lib.erl
@@ -309,7 +309,7 @@ client_cont_loop(Node, Host, Port, Pid, Transport, Options, ContOpts, Opts) ->
case rpc:call(Node, Transport, connect, [Host, Port, Options]) of
{ok, Socket0, _} ->
ct:log("~p:~p~nClient: handshake_continue(~p, ~p, infinity) ~n", [?MODULE, ?LINE, Socket0, ContOpts]),
- case rpc:call(Node, Transport, handshake_continue, [Socket0, ContOpts, infinity]) of
+ case rpc:call(Node, Transport, handshake_continue, [Socket0, ContOpts]) of
{ok, Socket} ->
Pid ! {connected, Socket},
{Module, Function, Args} = proplists:get_value(mfa, Opts),
@@ -1170,13 +1170,13 @@ rsa_suites(CounterPart) ->
lists:member(cipher_atom(Cipher), Ciphers);
({ecdhe_rsa, Cipher, _}) when ECC == true ->
lists:member(cipher_atom(Cipher), Ciphers);
+ ({ecdhe_rsa, Cipher, _,_}) when ECC == true ->
+ lists:member(cipher_atom(Cipher), Ciphers);
({rsa, Cipher, _, _}) ->
lists:member(cipher_atom(Cipher), Ciphers);
({dhe_rsa, Cipher, _,_}) ->
lists:member(cipher_atom(Cipher), Ciphers);
- ({ecdhe_rsa, Cipher, _,_}) when ECC == true ->
- lists:member(cipher_atom(Cipher), Ciphers);
- (_) ->
+ (_) ->
false
end,
common_ciphers(CounterPart)).
@@ -1530,7 +1530,7 @@ is_sane_ecc(crypto) ->
true
end;
is_sane_ecc(_) ->
- true.
+ sufficient_crypto_support(cipher_ec).
is_fips(openssl) ->
VersionStr = os:cmd("openssl version"),
@@ -1601,11 +1601,7 @@ openssl_sane_dtls() ->
false;
"OpenSSL 1.0.2k-freebsd" ++ _ ->
false;
- "OpenSSL 1.0.2d" ++ _ ->
- false;
- "OpenSSL 1.0.2n" ++ _ ->
- false;
- "OpenSSL 1.0.2m" ++ _ ->
+ "OpenSSL 1.0.2" ++ _ ->
false;
"OpenSSL 1.0.0" ++ _ ->
false;
diff --git a/lib/stdlib/src/gen_statem.erl b/lib/stdlib/src/gen_statem.erl
index 19d22f24f0..faa43fbc1e 100644
--- a/lib/stdlib/src/gen_statem.erl
+++ b/lib/stdlib/src/gen_statem.erl
@@ -1448,13 +1448,13 @@ loop_event_done(
[?sys_debug(
Debug_0,
{S#state.name,State},
- {postpone,Event_0,State}),
+ {postpone,Event_0,NextState}),
Event_0|P_0];
false ->
[?sys_debug(
Debug_0,
{S#state.name,State},
- {consume,Event_0,State})|P_0]
+ {consume,Event_0,NextState})|P_0]
end,
{Events_2,P_2,Timers_2} =
%% Move all postponed events to queue,