diff options
49 files changed, 1279 insertions, 796 deletions
diff --git a/bootstrap/lib/kernel/ebin/application_controller.beam b/bootstrap/lib/kernel/ebin/application_controller.beam Binary files differindex 4cc77ec55c..ed876f4d6a 100644 --- a/bootstrap/lib/kernel/ebin/application_controller.beam +++ b/bootstrap/lib/kernel/ebin/application_controller.beam diff --git a/bootstrap/lib/kernel/ebin/logger_server.beam b/bootstrap/lib/kernel/ebin/logger_server.beam Binary files differindex 72a14acce3..4885c0992a 100644 --- a/bootstrap/lib/kernel/ebin/logger_server.beam +++ b/bootstrap/lib/kernel/ebin/logger_server.beam diff --git a/bootstrap/lib/kernel/ebin/net_kernel.beam b/bootstrap/lib/kernel/ebin/net_kernel.beam Binary files differindex 03627a3d53..0a565d542d 100644 --- a/bootstrap/lib/kernel/ebin/net_kernel.beam +++ b/bootstrap/lib/kernel/ebin/net_kernel.beam diff --git a/bootstrap/lib/stdlib/ebin/gen_statem.beam b/bootstrap/lib/stdlib/ebin/gen_statem.beam Binary files differindex ebdb70b93c..1c1b93a036 100644 --- a/bootstrap/lib/stdlib/ebin/gen_statem.beam +++ b/bootstrap/lib/stdlib/ebin/gen_statem.beam diff --git a/bootstrap/lib/stdlib/ebin/io_lib_pretty.beam b/bootstrap/lib/stdlib/ebin/io_lib_pretty.beam Binary files differindex de468aacca..dc91c318c5 100644 --- a/bootstrap/lib/stdlib/ebin/io_lib_pretty.beam +++ b/bootstrap/lib/stdlib/ebin/io_lib_pretty.beam diff --git a/bootstrap/lib/stdlib/ebin/maps.beam b/bootstrap/lib/stdlib/ebin/maps.beam Binary files differindex 85e997bb1c..ec0ebce58a 100644 --- a/bootstrap/lib/stdlib/ebin/maps.beam +++ b/bootstrap/lib/stdlib/ebin/maps.beam diff --git a/bootstrap/lib/stdlib/ebin/otp_internal.beam b/bootstrap/lib/stdlib/ebin/otp_internal.beam Binary files differindex ab99005d7a..c42b18f6cf 100644 --- a/bootstrap/lib/stdlib/ebin/otp_internal.beam +++ b/bootstrap/lib/stdlib/ebin/otp_internal.beam diff --git a/bootstrap/lib/stdlib/ebin/uri_string.beam b/bootstrap/lib/stdlib/ebin/uri_string.beam Binary files differindex e14164a823..36aec511d8 100644 --- a/bootstrap/lib/stdlib/ebin/uri_string.beam +++ b/bootstrap/lib/stdlib/ebin/uri_string.beam diff --git a/erts/doc/src/notes.xml b/erts/doc/src/notes.xml index f7f86084a9..b9de00a8af 100644 --- a/erts/doc/src/notes.xml +++ b/erts/doc/src/notes.xml @@ -31,6 +31,26 @@ </header> <p>This document describes the changes made to the ERTS application.</p> +<section><title>Erts 9.3.3</title> + + <section><title>Fixed Bugs and Malfunctions</title> + <list> + <item> + <p> + Fixed bug in <c>ets</c> that could cause VM crash if + process A terminates after fixating a table and process B + deletes the table at "the same time". The table fixation + could be done with <c>ets:safe_fixtable</c> or if process + A terminates in the middle of a long running + <c>select</c> or <c>match</c> call.</p> + <p> + Own Id: OTP-15109</p> + </item> + </list> + </section> + +</section> + <section><title>Erts 9.3.2</title> <section><title>Fixed Bugs and Malfunctions</title> diff --git a/erts/vsn.mk b/erts/vsn.mk index 687c62343e..9222b74f81 100644 --- a/erts/vsn.mk +++ b/erts/vsn.mk @@ -18,7 +18,7 @@ # %CopyrightEnd% # -VSN = 9.3.2 +VSN = 9.3.3 # Port number 4365 in 4.2 # Port number 4366 in 4.3 diff --git a/lib/common_test/src/ct_run.erl b/lib/common_test/src/ct_run.erl index 223aa06e64..c9d406f1fd 100644 --- a/lib/common_test/src/ct_run.erl +++ b/lib/common_test/src/ct_run.erl @@ -1909,7 +1909,8 @@ auto_compile(TestSuites) -> SuiteMakeErrors = lists:flatmap(fun({TestDir,Suite} = TS) -> case run_make(suites, TestDir, - Suite, UserInclude) of + Suite, UserInclude, + [nowarn_export_all]) of {error,{make_failed,Bad}} -> [{TS,Bad}]; {error,_} -> @@ -1927,7 +1928,7 @@ auto_compile(TestSuites) -> case lists:member(Dir, Done) of false -> Failed1 = - case run_make(helpmods, Dir, Suite, UserInclude) of + case run_make(helpmods, Dir, Suite, UserInclude, []) of {error,{make_failed,BadMods}} -> [{{Dir,all},BadMods}|Failed]; {error,_} -> @@ -2669,12 +2670,12 @@ get_name(Dir) -> run_make(TestDir, Mod, UserInclude) -> - run_make(suites, TestDir, Mod, UserInclude). + run_make(suites, TestDir, Mod, UserInclude, [nowarn_export_all]). -run_make(Targets, TestDir0, Mod, UserInclude) when is_list(Mod) -> - run_make(Targets, TestDir0, list_to_atom(Mod), UserInclude); +run_make(Targets, TestDir0, Mod, UserInclude, COpts) when is_list(Mod) -> + run_make(Targets, TestDir0, list_to_atom(Mod), UserInclude, COpts); -run_make(Targets, TestDir0, Mod, UserInclude) -> +run_make(Targets, TestDir0, Mod, UserInclude, COpts) -> case locate_test_dir(TestDir0, Mod) of {ok,TestDir} -> %% send a start_make notification which may suspend @@ -2689,7 +2690,7 @@ run_make(Targets, TestDir0, Mod, UserInclude) -> XmerlInclude = get_dir(xmerl, "include"), ErlFlags = UserInclude ++ [{i,CtInclude}, {i,XmerlInclude}, - debug_info], + debug_info] ++ COpts, Result = if Mod == all ; Targets == helpmods -> case (catch ct_make:all([noexec|ErlFlags])) of diff --git a/lib/crypto/c_src/crypto.c b/lib/crypto/c_src/crypto.c index ef7830262f..0a2a235209 100644 --- a/lib/crypto/c_src/crypto.c +++ b/lib/crypto/c_src/crypto.c @@ -3068,202 +3068,187 @@ static ERL_NIF_TERM rsa_generate_key_nif(ErlNifEnv* env, int argc, const ERL_NIF static ERL_NIF_TERM dh_generate_key_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {/* (PrivKey|undefined, DHParams=[P,G], Mpint, Len|0) */ - DH* dh_params = NULL; - int pub_len, prv_len; - unsigned char *pub_ptr, *prv_ptr; - ERL_NIF_TERM ret, ret_pub, ret_prv, head, tail; + DH *dh_params = NULL; int mpint; /* 0 or 4 */ - BIGNUM *priv_key_in = NULL; - BIGNUM *dh_p = NULL, *dh_g = NULL; - unsigned long len = 0; -#ifdef HAS_EVP_PKEY_CTX - EVP_PKEY_CTX *ctx = NULL; - EVP_PKEY *dhkey = NULL, - *params = NULL; -#endif - if (!(get_bn_from_bin(env, argv[0], &priv_key_in) - || argv[0] == atom_undefined) - || !enif_get_list_cell(env, argv[1], &head, &tail) - || !get_bn_from_bin(env, head, &dh_p) - || !enif_get_list_cell(env, tail, &head, &tail) - || !get_bn_from_bin(env, head, &dh_g) - || !enif_is_empty_list(env, tail) - || !enif_get_int(env, argv[2], &mpint) || (mpint & ~4) - || !enif_get_ulong(env, argv[3], &len) - - /* Load dh_params with values to use by the generator. - Mem mgmnt transfered from dh_p etc to dh_params */ - || !(dh_params = DH_new()) - || (priv_key_in && !DH_set0_key(dh_params, NULL, priv_key_in)) - || !DH_set0_pqg(dh_params, dh_p, NULL, dh_g) - ) { - if (priv_key_in) BN_free(priv_key_in); - if (dh_p) BN_free(dh_p); - if (dh_g) BN_free(dh_g); - if (dh_params) DH_free(dh_params); - return enif_make_badarg(env); - } - - if (len) { - if (len < BN_num_bits(dh_p)) - DH_set_length(dh_params, len); - else { + { + ERL_NIF_TERM head, tail; + BIGNUM + *dh_p = NULL, + *dh_g = NULL, + *priv_key_in = NULL; + unsigned long + len = 0; + + if (!(get_bn_from_bin(env, argv[0], &priv_key_in) + || argv[0] == atom_undefined) + || !enif_get_list_cell(env, argv[1], &head, &tail) + || !get_bn_from_bin(env, head, &dh_p) + || !enif_get_list_cell(env, tail, &head, &tail) + || !get_bn_from_bin(env, head, &dh_g) + || !enif_is_empty_list(env, tail) + || !enif_get_int(env, argv[2], &mpint) || (mpint & ~4) + || !enif_get_ulong(env, argv[3], &len) + + /* Load dh_params with values to use by the generator. + Mem mgmnt transfered from dh_p etc to dh_params */ + || !(dh_params = DH_new()) + || (priv_key_in && !DH_set0_key(dh_params, NULL, priv_key_in)) + || !DH_set0_pqg(dh_params, dh_p, NULL, dh_g) + ) { if (priv_key_in) BN_free(priv_key_in); if (dh_p) BN_free(dh_p); if (dh_g) BN_free(dh_g); if (dh_params) DH_free(dh_params); return enif_make_badarg(env); } + + if (len) { + if (len < BN_num_bits(dh_p)) + DH_set_length(dh_params, len); + else { + if (priv_key_in) BN_free(priv_key_in); + if (dh_p) BN_free(dh_p); + if (dh_g) BN_free(dh_g); + if (dh_params) DH_free(dh_params); + return enif_make_badarg(env); + } + } } #ifdef HAS_EVP_PKEY_CTX - if ((dhkey = EVP_PKEY_new()) - && (params = EVP_PKEY_new()) - && EVP_PKEY_set1_DH(params, dh_params) /* set the key referenced by params to dh_params. - dh_params (and params) must be freed */ - && (ctx = EVP_PKEY_CTX_new(params, NULL)) - && EVP_PKEY_keygen_init(ctx) - && EVP_PKEY_keygen(ctx, &dhkey) /* "performs a key generation operation, the - generated key is written to ppkey." (=last arg) */ - && (dh_params = EVP_PKEY_get1_DH(dhkey)) /* return the referenced key. dh_params and dhkey must be freed */ - ) { + { + EVP_PKEY_CTX *ctx; + EVP_PKEY *dhkey, *params; + int success; + + params = EVP_PKEY_new(); + success = EVP_PKEY_set1_DH(params, dh_params); /* set the key referenced by params to dh_params... */ + DH_free(dh_params); /* ...dh_params (and params) must be freed */ + if (!success) return atom_error; + + ctx = EVP_PKEY_CTX_new(params, NULL); + EVP_PKEY_free(params); + if (!ctx) { + return atom_error; + } + + if (!EVP_PKEY_keygen_init(ctx)) { + /* EVP_PKEY_CTX_free(ctx); */ + return atom_error; + } + + dhkey = EVP_PKEY_new(); + if (!EVP_PKEY_keygen(ctx, &dhkey)) { /* "performs a key generation operation, the ... */ + /*... generated key is written to ppkey." (=last arg) */ + /* EVP_PKEY_CTX_free(ctx); */ + /* EVP_PKEY_free(dhkey); */ + return atom_error; + } + + dh_params = EVP_PKEY_get1_DH(dhkey); /* return the referenced key. dh_params and dhkey must be freed */ + EVP_PKEY_free(dhkey); + if (!dh_params) { + /* EVP_PKEY_CTX_free(ctx); */ + return atom_error; + } + EVP_PKEY_CTX_free(ctx); + } #else - if (DH_generate_key(dh_params)) { -#endif + if (!DH_generate_key(dh_params)) return atom_error; +#endif + { + unsigned char *pub_ptr, *prv_ptr; + int pub_len, prv_len; + ERL_NIF_TERM ret_pub, ret_prv; const BIGNUM *pub_key_gen, *priv_key_gen; - - DH_get0_key(dh_params, - &pub_key_gen, &priv_key_gen); /* Get pub_key_gen and priv_key_gen. - "The values point to the internal representation of - the public key and private key values. This memory + + DH_get0_key(dh_params, + &pub_key_gen, &priv_key_gen); /* Get pub_key_gen and priv_key_gen. + "The values point to the internal representation of + the public key and private key values. This memory should not be freed directly." says man */ - pub_len = BN_num_bytes(pub_key_gen); - prv_len = BN_num_bytes(priv_key_gen); - pub_ptr = enif_make_new_binary(env, pub_len+mpint, &ret_pub); - prv_ptr = enif_make_new_binary(env, prv_len+mpint, &ret_prv); - if (mpint) { - put_int32(pub_ptr, pub_len); pub_ptr += 4; - put_int32(prv_ptr, prv_len); prv_ptr += 4; - } - BN_bn2bin(pub_key_gen, pub_ptr); - BN_bn2bin(priv_key_gen, prv_ptr); - ERL_VALGRIND_MAKE_MEM_DEFINED(pub_ptr, pub_len); - ERL_VALGRIND_MAKE_MEM_DEFINED(prv_ptr, prv_len); - ret = enif_make_tuple2(env, ret_pub, ret_prv); - } - else { - ret = atom_error; - } + pub_len = BN_num_bytes(pub_key_gen); + prv_len = BN_num_bytes(priv_key_gen); + pub_ptr = enif_make_new_binary(env, pub_len+mpint, &ret_pub); + prv_ptr = enif_make_new_binary(env, prv_len+mpint, &ret_prv); + if (mpint) { + put_int32(pub_ptr, pub_len); pub_ptr += 4; + put_int32(prv_ptr, prv_len); prv_ptr += 4; + } + BN_bn2bin(pub_key_gen, pub_ptr); + BN_bn2bin(priv_key_gen, prv_ptr); + ERL_VALGRIND_MAKE_MEM_DEFINED(pub_ptr, pub_len); + ERL_VALGRIND_MAKE_MEM_DEFINED(prv_ptr, prv_len); - DH_free(dh_params); -#ifdef HAS_EVP_PKEY_CTX - if (ctx) EVP_PKEY_CTX_free(ctx); - if (dhkey) EVP_PKEY_free(dhkey); - if (params) EVP_PKEY_free(params); -#endif - return ret; + DH_free(dh_params); + + return enif_make_tuple2(env, ret_pub, ret_prv); + } } static ERL_NIF_TERM dh_compute_key_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {/* (OthersPublicKey, MyPrivateKey, DHParams=[P,G]) */ - BIGNUM *dummy_pub_key = NULL, - *priv_key = NULL, - *other_pub_key = NULL, + BIGNUM *other_pub_key = NULL, *dh_p = NULL, *dh_g = NULL; - ErlNifBinary ret_bin; - ERL_NIF_TERM ret, head, tail; - DH *dh_priv = DH_new(), *dh_pub = DH_new(); -#ifdef HAS_EVP_PKEY_CTX - EVP_PKEY_CTX *ctx = NULL; - EVP_PKEY *my_priv_key = NULL, *peer_pub_key = NULL; - size_t skeylen; -#else - int i; -#endif - - if (!get_bn_from_bin(env, argv[0], &other_pub_key) - || !get_bn_from_bin(env, argv[1], &priv_key) - || !enif_get_list_cell(env, argv[2], &head, &tail) - || !get_bn_from_bin(env, head, &dh_p) - || !enif_get_list_cell(env, tail, &head, &tail) - || !get_bn_from_bin(env, head, &dh_g) - || !enif_is_empty_list(env, tail) - - /* Note: DH_set0_key() does not allow setting only the - * private key, although DH_compute_key() does not use the - * public key. Work around this limitation by setting - * the public key to a copy of the private key. - */ - || !(dummy_pub_key = BN_dup(priv_key)) - || !DH_set0_key(dh_priv, dummy_pub_key, priv_key) - || !DH_set0_pqg(dh_priv, dh_p, NULL, dh_g) - ) { - if (dh_p) BN_free(dh_p); - if (dh_g) BN_free(dh_g); - if (other_pub_key) BN_free(other_pub_key); - if (dummy_pub_key) BN_free(dummy_pub_key); - if (priv_key) BN_free(priv_key); - return enif_make_badarg(env); - } + DH *dh_priv = DH_new(); -#ifdef HAS_EVP_PKEY_CTX - if (!(my_priv_key = EVP_PKEY_new()) - || !EVP_PKEY_set1_DH(my_priv_key, dh_priv) /* set the key referenced by my_priv_key to dh_priv. - dh_priv (and my_priv_key) must be freed by us*/ - - || !(peer_pub_key = EVP_PKEY_new()) - || !DH_set0_key(dh_pub, other_pub_key, NULL) - || !DH_set0_pqg(dh_pub, dh_p, NULL, dh_g) - || !EVP_PKEY_set1_DH(peer_pub_key, dh_pub) - - || !(ctx = EVP_PKEY_CTX_new(my_priv_key, NULL)) - || (EVP_PKEY_derive_init(ctx) <= 0) - || (EVP_PKEY_derive_set_peer(ctx, peer_pub_key) <= 0) - || (EVP_PKEY_derive(ctx, NULL, &skeylen) <= 0)) { + /* Check the arguments and get + my private key (dh_priv), + the peer's public key (other_pub_key), + the parameters p & q + */ - ret = atom_error; - } - else { - enif_alloc_binary(skeylen, &ret_bin); - - if ((EVP_PKEY_derive(ctx, ret_bin.data, &skeylen) > 0) - && (ret_bin.size >= skeylen)) { - /* Derivation succeded */ - if (ret_bin.size > skeylen) enif_realloc_binary(&ret_bin, skeylen); - ret = enif_make_binary(env, &ret_bin); + { + BIGNUM *dummy_pub_key = NULL, + *priv_key = NULL; + ERL_NIF_TERM head, tail; + + if (!get_bn_from_bin(env, argv[0], &other_pub_key) + || !get_bn_from_bin(env, argv[1], &priv_key) + || !enif_get_list_cell(env, argv[2], &head, &tail) + || !get_bn_from_bin(env, head, &dh_p) + || !enif_get_list_cell(env, tail, &head, &tail) + || !get_bn_from_bin(env, head, &dh_g) + || !enif_is_empty_list(env, tail) + + /* Note: DH_set0_key() does not allow setting only the + * private key, although DH_compute_key() does not use the + * public key. Work around this limitation by setting + * the public key to a copy of the private key. + */ + || !(dummy_pub_key = BN_dup(priv_key)) + || !DH_set0_key(dh_priv, dummy_pub_key, priv_key) + || !DH_set0_pqg(dh_priv, dh_p, NULL, dh_g) + ) { + if (dh_p) BN_free(dh_p); + if (dh_g) BN_free(dh_g); + if (other_pub_key) BN_free(other_pub_key); + if (dummy_pub_key) BN_free(dummy_pub_key); + if (priv_key) BN_free(priv_key); + return enif_make_badarg(env); } - else { + } + { + ErlNifBinary ret_bin; + int size; + + enif_alloc_binary(DH_size(dh_priv), &ret_bin); + size = DH_compute_key(ret_bin.data, other_pub_key, dh_priv); + BN_free(other_pub_key); + DH_free(dh_priv); + if (size<=0) { enif_release_binary(&ret_bin); - ret = atom_error; + return atom_error; } - } -#else - enif_alloc_binary(DH_size(dh_priv), &ret_bin); - i = DH_compute_key(ret_bin.data, other_pub_key, dh_priv); - if (i > 0) { - if (i != ret_bin.size) enif_realloc_binary(&ret_bin, i); - ret = enif_make_binary(env, &ret_bin); + if (size != ret_bin.size) enif_realloc_binary(&ret_bin, size); + return enif_make_binary(env, &ret_bin); } - else { - enif_release_binary(&ret_bin); - ret = atom_error; - } -#endif - - if (other_pub_key) BN_free(other_pub_key); - if (dh_priv) DH_free(dh_priv); - if (dh_pub) DH_free(dh_pub); -#ifdef HAS_EVP_PKEY_CTX - if (ctx) EVP_PKEY_CTX_free(ctx); - if (my_priv_key) EVP_PKEY_free(my_priv_key); - /* if (peer_pub_key) EVP_PKEY_free(peer_pub_key); */ -#endif - return ret; } + static ERL_NIF_TERM srp_value_B_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) {/* (Multiplier, Verifier, Generator, Exponent, Prime) */ BIGNUM *bn_verifier = NULL; diff --git a/lib/hipe/rtl/hipe_rtl_lcm.erl b/lib/hipe/rtl/hipe_rtl_lcm.erl index af39c9a0a4..2c8cc80e56 100644 --- a/lib/hipe/rtl/hipe_rtl_lcm.erl +++ b/lib/hipe/rtl/hipe_rtl_lcm.erl @@ -267,14 +267,17 @@ try_insert_expr_last(CFG0, Label, Instr) -> %% with the new code inserted second to last (assuming the last expression %% is a branch operation). insert_expr_last_work(_Instr, [#call{}]) -> - %% Call instructions clobber all expressions; we musn't insert the expression - %% before it + %% Call instructions clobber all expressions; we must not insert the + %% expression before it not_safe; insert_expr_last_work(Instr, [Code1]) -> %% We insert the code next to last. [Instr, Code1]; insert_expr_last_work(Instr, [Code|Codes]) -> - [Code|insert_expr_last_work(Instr, Codes)]. + case insert_expr_last_work(Instr, Codes) of + not_safe -> not_safe; + NewCodes -> [Code|NewCodes] + end. %%============================================================================= %% Inserts expression first in the block for the given label. diff --git a/lib/hipe/test/basic_SUITE_data/basic_issues_hipe.erl b/lib/hipe/test/basic_SUITE_data/basic_issues_hipe.erl index e71045bfe2..fc87abb54e 100644 --- a/lib/hipe/test/basic_SUITE_data/basic_issues_hipe.erl +++ b/lib/hipe/test/basic_SUITE_data/basic_issues_hipe.erl @@ -8,8 +8,9 @@ -export([test/0]). -%% functions that need to be exported so that they are retained. --export([auth/4]). +%% functions that need to be exported so that they are retained and/or +%% not specialized away by the compiler. +-export([auth/4, wxSizer_replace/2, parent_class/1]). test() -> ok = test_dominance_trees(), @@ -18,6 +19,7 @@ test() -> ok = test_bif_fails(), ok = test_find_catches(), ok = test_heap_allocate_trim(), + ok = wxSizer_replace(), ok. %%-------------------------------------------------------------------- @@ -151,3 +153,25 @@ get_next_retry(Error, Count) -> end. pair(A, B) -> {A, B}. + +%%-------------------------------------------------------------------- +%% Date: June 11, 2018 +%% +%% Stripped down test case (from `wxSizer') that crashed the lazy code +%% motion pass of the HiPE compiler in a pre-release of Erlang/OTP 21. +%% A similar crash existed in `ssl_correction'. +%%-------------------------------------------------------------------- + +wxSizer_replace() -> + wxSizer_replace(?MODULE, ?MODULE). + +-define(CLASS(Type, Class), ((Type) =:= Class) orelse (Type):parent_class(Class)). + +wxSizer_replace(OldwinT, NewwinT) -> % this function was the culprit + ?CLASS(OldwinT, ?MODULE), + ?CLASS(NewwinT, ?MODULE), + ok. + +parent_class(wxWindow) -> true; +parent_class(wxEvtHandler) -> true; +parent_class(_Class) -> erlang:error({badtype, ?MODULE}). diff --git a/lib/kernel/doc/src/config.xml b/lib/kernel/doc/src/config.xml index 8850c1736b..3f01170508 100644 --- a/lib/kernel/doc/src/config.xml +++ b/lib/kernel/doc/src/config.xml @@ -86,8 +86,13 @@ <tag><c>File = string()</c></tag> <item>Name of another <c>.config</c> file. Extension <c>.config</c> can be omitted. It is - recommended to use absolute paths. A relative path is - relative the current working directory of the emulator.</item> + recommended to use absolute paths. If a relative path is used, + <c>File</c> is searched, first, relative from <c>sys.config</c> directory, then relative + to the current working directory of the emulator, for backward compatibility. + This allow to use a <c>sys.config</c> pointing out other <c>.config</c> files in a release + or in a node started manually using <c>-config ...</c> with same result whatever + the current working directory. + </item> </taglist> <p>When traversing the contents of <c>sys.config</c> and a filename is encountered, its contents are read and merged with the result diff --git a/lib/kernel/doc/src/logger.xml b/lib/kernel/doc/src/logger.xml index 7f35a5d752..8940c89ab8 100644 --- a/lib/kernel/doc/src/logger.xml +++ b/lib/kernel/doc/src/logger.xml @@ -86,6 +86,14 @@ logger:error("error happened because: ~p", [Reason]). % Without macro <item>built-in filters, see <seealso marker="logger_filters">logger_filters</seealso>.</item> </list> + + <note> + <p>Since Logger is new in Erlang/OTP 21.0, we do reserve the right + to introduce changes to the Logger API and functionality in + patches following this release. These changes might or might not + be backwards compatible with the initial version.</p> + </note> + </description> <datatypes> @@ -939,7 +947,8 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(), Meta)). <funcs> <func> - <name>HModule:adding_handler(Config1) -> {ok, Config2} | {error, Reason}</name> + <name>HModule:adding_handler(Config1) -> {ok, Config2} | {error, + Reason}</name> <fsummary>An instance of this handler is about to be added.</fsummary> <type> <v>Config1 = Config2 = @@ -948,9 +957,10 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(), Meta)). </type> <desc> <p>This callback function is optional.</p> - <p>The function is called when an new handler is about to be - added, and the purpose is to verify the configuration and - initiate all resources needed by the handler.</p> + <p>The function is called on a temporary process when an new + handler is about to be added. The purpose is to verify the + configuration and initiate all resources needed by the + handler.</p> <p>The handler identity is associated with the <c>id</c> key in <c>Config1</c>.</p> <p>If everything succeeds, the callback function can add @@ -972,9 +982,9 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(), Meta)). </type> <desc> <p>This callback function is optional.</p> - <p>The function is called when the configuration for a handler - is about to change, and the purpose is to verify and act on - the new configuration.</p> + <p>The function is called on a temporary process when the + configuration for a handler is about to change. The purpose + is to verify and act on the new configuration.</p> <p><c>Config1</c> is the existing configuration and <c>Config2</c> is the new configuration.</p> <p>The handler identity is associated with the <c>id</c> key @@ -999,7 +1009,8 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(), Meta)). <p>This callback function is mandatory.</p> <p>The function is called when all primary filters and all handler filters for the handler in question have passed for - the given log event.</p> + the given log event. It is called on the client process, that + is, the process that issued the log event.</p> <p>The handler identity is associated with the <c>id</c> key in <c>Config</c>.</p> <p>The handler must log the event.</p> @@ -1017,9 +1028,9 @@ logger:set_process_metadata(maps:merge(logger:get_process_metadata(), Meta)). </type> <desc> <p>This callback function is optional.</p> - <p>The function is called when a handler is about to be - removed, and the purpose is to release all resources used by - the handler.</p> + <p>The function is called on a temporary process when a + handler is about to be removed. The purpose is to release + all resources used by the handler.</p> <p>The handler identity is associated with the <c>id</c> key in <c>Config</c>.</p> <p>The return value is ignored by Logger.</p> diff --git a/lib/kernel/doc/src/logger_arch.png b/lib/kernel/doc/src/logger_arch.png Binary files differindex a9b9a658b4..a3a863c511 100644 --- a/lib/kernel/doc/src/logger_arch.png +++ b/lib/kernel/doc/src/logger_arch.png diff --git a/lib/kernel/doc/src/logger_chapter.xml b/lib/kernel/doc/src/logger_chapter.xml index f7df0a3e6e..208193ee42 100644 --- a/lib/kernel/doc/src/logger_chapter.xml +++ b/lib/kernel/doc/src/logger_chapter.xml @@ -48,6 +48,13 @@ handler, replace it by a custom handler, and install additional handlers.</p> + <note> + <p>Since Logger is new in Erlang/OTP 21.0, we do reserve the right + to introduce changes to the Logger API and functionality in + patches following this release. These changes might or might not + be backwards compatible with the initial version.</p> + </note> + <section> <title>Overview</title> <p>A <em>log event</em> consists of a <em>log level</em>, the @@ -84,11 +91,11 @@ section <seealso marker="#filters">Filters</seealso> for more details.</p> <p>If a log event passes through all primary filters and all - handler filters for a specific handler, Logger forwards the event - to the handler callback. The handler formats and prints the - event to its destination. See - section <seealso marker="#handlers">Handlers</seealso> for - more details.</p> + handler filters for a specific handler, Logger forwards the + event to the <em>handler callback</em>. The handler formats and + prints the event to its destination. See + section <seealso marker="#handlers">Handlers</seealso> for more + details.</p> <p>Everything up to and including the call to the handler callbacks is executed on the client process, that is, the process where the log event was issued. It is up to the handler @@ -97,6 +104,7 @@ defined.</p> </section> <section> + <marker id="logger_api"/> <title>Logger API</title> <p>The API for logging consists of a set of <seealso marker="logger#macros">macros</seealso>, and a set @@ -113,10 +121,11 @@ <marker id="log_level"/> <title>Log Level</title> <p>The log level indicates the severity of a event. In - accordance with the Syslog protocol, RFC-5424, eight log - levels can be specified. The following table lists all - possible log levels by name (atom), integer value, and - description:</p> + accordance with the Syslog protocol, + <url href="https://www.ietf.org/rfc/rfc5424.txt">RFC + 5424</url>, eight log levels can be specified. The following + table lists all possible log levels by name (atom), integer + value, and description:</p> <table align="left"> <row> @@ -337,7 +346,7 @@ logger:debug(#{got => connection_request, id => Id, state => State}, <marker id="handlers"/> <title>Handlers</title> <p>A handler is defined as a module exporting at least the - following function:</p> + following callback function:</p> <pre><seealso marker="logger#HModule:log-2">log(LogEvent, Config) -> void()</seealso></pre> @@ -934,50 +943,50 @@ error_logger:add_report_handler/1,2. </section> <section> - <title>Example: Add a handler to log debug events to file</title> + <title>Example: Add a handler to log info events to file</title> <p>When starting an Erlang node, the default behaviour is that all - log events on level info or more severe, are logged to the - terminal via the default handler. To also log debug events, you - can either change the primary log level to <c>debug</c>:</p> + log events on level <c>notice</c> or more severe, are logged to + the terminal via the default handler. To also log info events, + you can either change the primary log level to <c>info</c>:</p> <pre> -1> <input>logger:set_primary_config(level, debug).</input> +1> <input>logger:set_primary_config(level, info).</input> ok</pre> <p>or set the level for one or a few modules only:</p> <pre> -2> <input>logger:set_module_level(mymodule, debug).</input> +2> <input>logger:set_module_level(mymodule, info).</input> ok</pre> - <p>This allows debug events to pass through to the default handler, - and be printed to the terminal as well. If there are many debug + <p>This allows info events to pass through to the default handler, + and be printed to the terminal as well. If there are many info events, it can be useful to print these to a file instead.</p> - <p>First, set the log level of the default handler to <c>info</c>, - preventing it from printing debug events to the terminal:</p> + <p>First, set the log level of the default handler + to <c>notice</c>, preventing it from printing info events to the + terminal:</p> <pre> -3> <input>logger:set_handler_config(default, level, info).</input> +3> <input>logger:set_handler_config(default, level, notice).</input> ok</pre> <p>Then, add a new handler which prints to file. You can use the handler module <seealso marker="logger_std_h"><c>logger_std_h</c></seealso>, - and specify type <c>{file,File}</c>. The default handler level - is <c>all</c>, so you don't need to specify that:</p> + and specify type <c>{file,File}</c>.:</p> <pre> -4> <input>Config = #{config => #{type => {file,"./debug.log"}}}.</input> -#{config => #{type => {file,"./debug.log"}}} -5> <input>logger:add_handler(debugger, logger_std_h, Config).</input> +4> <input>Config = #{config => #{type => {file,"./info.log"}}, level => info}.</input> +#{config => #{type => {file,"./info.log"}},level => info} +5> <input>logger:add_handler(myhandler, logger_std_h, Config).</input> ok</pre> <p>Since <c>filter_default</c> defaults to <c>log</c>, this - handler now receives all log events. If you want debug events - only in the file, you must add a filter to stop all non-debug + handler now receives all log events. If you want info events + only in the file, you must add a filter to stop all non-info events. The built-in filter <seealso marker="logger_filters#level-2"> <c>logger_filters:level/2</c></seealso> can do this:</p> <pre> -6> <input>logger:add_handler_filter(debugger, stop_non_debug, - {fun logger_filters:level/2, {stop, neq, debug}}).</input> +6> <input>logger:add_handler_filter(myhandler, stop_non_info, + {fun logger_filters:level/2, {stop, neq, info}}).</input> ok</pre> <p>See section <seealso marker="#filters">Filters</seealso> for - more information about the filters and the <c>filter_default</c> - configuration parameter.</p> + more information about the filters and the <c>filter_default</c> + configuration parameter.</p> </section> @@ -1023,63 +1032,42 @@ ok</pre> <p>A simple handler that prints to the terminal can be implemented as follows:</p> <code> --module(myhandler). +-module(myhandler1). -export([log/2]). -log(LogEvent, #{formatter := {FModule, FConfig}) -> +log(LogEvent, #{formatter := {FModule, FConfig}}) -> io:put_chars(FModule:format(LogEvent, FConfig)). </code> - <p>A simple handler which prints to file can be implemented like - this:</p> + <p>Notice that the above handler does not have any overload + protection, and all log events are printed directly from the + client process.</p> + <p>For information and examples of overload protection, please + refer to + section <seealso marker="#overload_protection">Protecting the + Handler from Overload</seealso>, and the implementation + of <seealso marker="logger_std_h"><c>logger_std_h</c></seealso> + and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c> + </seealso>.</p> + <p>The following is a simpler example of a handler which logs to a + file through one single process:</p> <code> --module(myhandler). +-module(myhandler2). -export([adding_handler/1, removing_handler/1, log/2]). -export([init/1, handle_call/3, handle_cast/2, terminate/2]). adding_handler(Config) -> - {ok, Fd} = file:open(File, [append, {encoding, utf8}]), - {ok, Config#{myhandler_fd => Fd}}. - -removing_handler(#{myhandler_fd := Fd}) -> - _ = file:close(Fd), - ok. + MyConfig = maps:get(config,Config,#{file => "myhandler2.log"}), + {ok, Pid} = gen_server:start(?MODULE, MyConfig, []), + {ok, Config#{config => MyConfig#{pid => Pid}}}. -log(LogEvent,#{myhandler_fd := Fd, formatter := {FModule, FConfig}}) -> - io:put_chars(Fd, FModule:format(LogEvent, FConfig)). - </code> - - <note> - <p>The above handlers do not have any overload - protection, and all log events are printed directly from the - client process.</p> - <p>For information and examples of overload protection, please - refer to - section <seealso marker="#overload_protection">Protecting the - Handler from Overload</seealso>, and the implementation - of <seealso marker="logger_std_h"><c>logger_std_h</c></seealso> - and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c> - </seealso>.</p> - </note> - - <p>Below is a simpler example of a handler which logs through one - single process.</p> - <code> --module(myhandler). --export([adding_handler/1, removing_handler/1, log/2]). --export([init/1, handle_call/3, handle_cast/2, terminate/2]). - -adding_handler(Config) -> - {ok, Pid} = gen_server:start(?MODULE, Config), - {ok, Config#{myhandler_pid => Pid}}. - -removing_handler(#{myhandler_pid := Pid}) -> +removing_handler(#{config := #{pid := Pid}}) -> gen_server:stop(Pid). -log(LogEvent,#{myhandler_pid := Pid} = Config) -> +log(LogEvent,#{config := #{pid := Pid}} = Config) -> gen_server:cast(Pid, {log, LogEvent, Config}). -init(#{myhandler_file := File}) -> +init(#{file := File}) -> {ok, Fd} = file:open(File, [append, {encoding, utf8}]), {ok, #{file => File, fd => Fd}}. @@ -1090,7 +1078,7 @@ handle_cast({log, LogEvent, Config}, #{fd := Fd} = State) -> do_log(Fd, LogEvent, Config), {noreply, State}. -terminate(Reason, #{fd := Fd}) -> +terminate(_Reason, #{fd := Fd}) -> _ = file:close(Fd), ok. @@ -1103,184 +1091,210 @@ do_log(Fd, LogEvent, #{formatter := {FModule, FConfig}}) -> <section> <marker id="overload_protection"/> <title>Protecting the Handler from Overload</title> - <p>In order for the built-in handlers to survive, and stay responsive, - during periods of high load (i.e. when huge numbers of incoming - log requests must be handled), a mechanism for overload protection - has been implemented in the - <seealso marker="logger_std_h"><c>logger_std_h</c></seealso> - and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c> - </seealso> handler. The mechanism, used by both handlers, works - as follows:</p> + <p>The default handlers, <seealso marker="logger_std_h"> + <c>logger_std_h</c></seealso> and <seealso marker="logger_disk_log_h"> + <c>logger_disk_log_h</c></seealso>, feature an overload protection + mechanism, which makes it possible for the handlers to survive, + and stay responsive, during periods of high load (when huge + numbers of incoming log requests must be handled). + The mechanism works as follows:</p> <section> <title>Message Queue Length</title> <p>The handler process keeps track of the length of its message - queue and reacts in different ways depending on the current status. - The purpose is to keep the handler in, or (as quickly as possible), - get the handler into, a state where it can keep up with the pace - of incoming log requests. The memory usage of the handler must never - keep growing larger and larger, since that would eventually cause the - handler to crash. Three thresholds with associated actions have been - defined:</p> + queue and takes some form of action when the current length exceeds a + configurable threshold. The purpose is to keep the handler in, or to + as quickly as possible get the handler into, a state where it can + keep up with the pace of incoming log events. The memory use of the + handler must never grow larger and larger, since that will eventually + cause the handler to crash. These three thresholds, with associated + actions, exist:</p> <taglist> - <tag><c>toggle_sync_qlen</c></tag> + <tag><c>sync_mode_qlen</c></tag> <item> - <p>The default value of this level is <c>10</c> messages, - and as long as the length of the message queue is lower, all log - requests are handled asynchronously. This simply means that the - process sending the log request (by calling a log function in the - Logger API) does not wait for a response from the handler but - continues executing immediately after the request (i.e. it will not - be affected by the time it takes the handler to print to the log - device). If the message queue grows larger than this value, however, - the handler starts handling the log requests synchronously instead, - meaning the process sending the request will have to wait for a - response. When the handler manages to reduce the message queue to a - level below the <c>toggle_sync_qlen</c> threshold, asynchronous + <p>As long as the length of the message queue is lower than this + value, all log events are handled asynchronously. This means that + the client process sending the log event, by calling a log function + in the <seealso marker="logger_chapter#logger_api">Logger API</seealso>, + does not wait for a response from the handler but continues + executing immediately after the event is sent. It is not affected + by the time it takes the handler to print the event to the log + device. If the message queue grows larger than this value, + the handler starts handling log events synchronously instead, + meaning that the client process sending the event must wait for a + response. When the handler reduces the message queue to a + level below the <c>sync_mode_qlen</c> threshold, asynchronous operation is resumed. The switch from asynchronous to synchronous - mode will force the logging tempo of few busy senders to slow down, - but cannot protect the handler sufficiently in situations of many - concurrent senders.</p> + mode can slow down the logging tempo of one, or a few, busy senders, + but cannot protect the handler sufficiently in a situation of many + busy concurrent senders.</p> + <p>Defaults to <c>10</c> messages.</p> </item> - <tag><c>drop_new_reqs_qlen</c></tag> + <tag><c>drop_mode_qlen</c></tag> <item> - <p>When the message queue has grown larger than this threshold, which - defaults to <c>200</c> messages, the handler switches to a mode in - which it drops any new requests being made. Dropping a message in - this state means that the log function never actually sends a message - to the handler. The log call simply returns without an action. When - the length of the message queue has been reduced to a level below this - threshold, synchronous or asynchronous request handling mode is - resumed.</p> + <p>When the message queue grows larger than this threshold, the + handler switches to a mode in which it drops all new events that + senders want to log. Dropping an event in this mode means that the + call to the log function never results in a message being sent to + the handler, but the function returns without taking any action. + The handler keeps logging the events that are already in its message + queue, and when the length of the message queue is reduced to a level + below the threshold, synchronous or asynchronous mode is resumed. + Notice that when the handler activates or deactivates drop mode, + information about it is printed in the log.</p> + <p>Defaults to <c>200</c> messages.</p> </item> - <tag><c>flush_reqs_qlen</c></tag> + <tag><c>flush_qlen</c></tag> <item> - <p>Above this threshold, which defaults to <c>1000</c> messages, a - flush operation takes place, in which all messages buffered in the - process mailbox get deleted without any logging actually taking - place. (Processes waiting for a response from a synchronous log request - will receive a reply indicating that the request has been dropped).</p> + <p>If the length of the message queue grows larger than this threshold, + a flush (delete) operation takes place. To flush events, the handler + discards the messages in the message queue by receiving them in a + loop without logging. Client processes waiting for a response from a + synchronous log request receive a reply from the handler indicating + that the request is dropped. The handler process increases its + priority during the flush loop to make sure that no new events + are received during the operation. Notice that after the flush operation + is performed, the handler prints information in the log about how many + events have been deleted.</p> + <p>Defaults to <c>1000</c> messages.</p> </item> </taglist> <p>For the overload protection algorithm to work properly, it is required that:</p> - <p><c>toggle_sync_qlen =< drop_new_reqs_qlen =< flush_reqs_qlen</c></p> + <p><c>sync_mode_qlen =< drop_mode_qlen =< flush_qlen</c></p> <p>and that:</p> - <p><c>drop_new_reqs_qlen > 1</c></p> + <p><c>drop_mode_qlen > 1</c></p> - <p>If <c>toggle_sync_qlen</c> is set to <c>0</c>, the handler will handle all - requests synchronously. Setting the value of <c>toggle_sync_qlen</c> to the same - as <c>drop_new_reqs_qlen</c>, disables the synchronous mode. Likewise, setting - the value of <c>drop_new_reqs_qlen</c> to the same as <c>flush_reqs_qlen</c>, - disables the drop mode.</p> + <p>To disable certain modes, do the following:</p> + <list> + <item>If <c>sync_mode_qlen</c> is set to <c>0</c>, all log events are handled + synchronously. That is, asynchronous logging is disabled.</item> + <item>If <c>sync_mode_qlen</c> is set to the same value as + <c>drop_mode_qlen</c>, synchronous mode is disabled. That is, the handler + always runs in asynchronous mode, unless dropping or flushing is invoked.</item> + <item>If <c>drop_mode_qlen</c> is set to the same value as <c>flush_qlen</c>, + drop mode is disabled and can never occur.</item> + </list> <p>During high load scenarios, the length of the handler message queue rarely grows in a linear and predictable way. Instead, whenever the - handler process gets scheduled in, it can have an almost arbitrary number - of messages waiting in the mailbox. It's for this reason that the overload - protection mechanism is focused on acting quickly and quite drastically - (such as immediately dropping or flushing messages) as soon as a large - queue length is detected. </p> - - <p>The thresholds listed above may be modified by the user if, e.g, a handler - shouldn't drop or flush messages unless the message queue length grows - extremely large. (The handler must be allowed to use large amounts of memory - under such circumstances however). Another example of when the user might want - to change the settings is if, for performance reasons, the logging processes must - never get blocked by synchronous log requests, while dropping or flushing requests - is perfectly acceptable (since it doesn't affect the performance of the - loggers).</p> + handler process is scheduled in, it can have an almost arbitrary number + of messages waiting in the message queue. It is for this reason that the overload + protection mechanism is focused on acting quickly, and quite drastically, + such as immediately dropping or flushing messages, when a large queue length + is detected.</p> + + <p>The values of the previously listed thresholds can be specified by the user. + This way, a handler can be configured to, for example, not drop or flush + messages unless the message queue length of the handler process grows extremely + large. Notice that large amounts of memory can be required for the node under such + circumstances. Another example of user configuration is when, for performance + reasons, the client processes must never be blocked by synchronous log requests. + It is possible, perhaps, that dropping or flushing events is still acceptable, since + it does not affect the performance of the client processes sending the log events.</p> <p>A configuration example:</p> <code type="none"> logger:add_handler(my_standard_h, logger_std_h, - #{config => - #{type => {file,"./system_info.log"}, - toggle_sync_qlen => 100, - drop_new_reqs_qlen => 1000, - flush_reqs_qlen => 2000}}). + #{config => #{type => {file,"./system_info.log"}, + sync_mode_qlen => 100, + drop_mode_qlen => 1000, + flush_qlen => 2000}}). </code> </section> <section> <title>Controlling Bursts of Log Requests</title> - <p>A potential problem with large bursts of log requests, is that log files - may get full or wrapped too quickly (in the latter case overwriting - previously logged data that could be of great importance). For this reason, - both built-in handlers offer the possibility to set a maximum level of how - many requests to process with a certain time frame. With this burst control - feature enabled, the handler will take care of bursts of log requests - without choking log files, or the terminal, with massive amounts of - printouts. These are the configuration parameters:</p> - + <p>Large bursts of log events - many events received by the handler + under a short period of time - can potentially cause problems, such as:</p> + <list> + <item>Log files grow very large, very quickly.</item> + <item>Circular logs wrap too quickly so that important data is overwritten.</item> + <item>Write buffers grow large, which slows down file sync operations.</item> + </list> + + <p>For this reason, both built-in handlers offer the possibility to specify the + maximum number of events to be handled within a certain time frame. + With this burst control feature enabled, the handler can avoid choking the log with + massive amounts of printouts. The configuration parameters are:</p> <taglist> - <tag><c>enable_burst_limit</c></tag> + <tag><c>burst_limit_enable</c></tag> <item> - <p>This is set to <c>true</c> by default. The value <c>false</c> - disables the burst control feature.</p> + <p>Value <c>true</c> enables burst control and <c>false</c> disables it.</p> + <p>Defaults to <c>true</c>.</p> </item> - <tag><c>burst_limit_size</c></tag> + <tag><c>burst_limit_max_count</c></tag> <item> - <p>This is how many requests should be processed within the - <c>burst_window_time</c> time frame. After this maximum has been - reached, successive requests will be dropped until the end of the - time frame. The default value is <c>500</c> messages.</p> + <p>This is the maximum number of events to handle within a + <c>burst_limit_window_time</c> time frame. After the limit is + reached, successive events are dropped until the end of the time frame.</p> + <p>Defaults to <c>500</c> events.</p> </item> - <tag><c>burst_window_time</c></tag> + <tag><c>burst_limit_window_time</c></tag> <item> - <p>The default window is <c>1000</c> milliseconds long.</p> + <p>See the previous description of <c>burst_limit_max_count</c>.</p> + <p>Defaults to <c>1000</c> milliseconds.</p> </item> </taglist> <p>A configuration example:</p> <code type="none"> logger:add_handler(my_disk_log_h, logger_disk_log_h, - #{disk_log_opts => - #{file => "./my_disk_log"}, - config => - #{burst_limit_size => 10, - burst_window_time => 500}}). + #{config => #{file => "./my_disk_log", + burst_limit_enable => true, + burst_limit_max_count => 20, + burst_limit_window_time => 500}}). </code> </section> <section> - <title>Terminating a Large Handler</title> - <p>A handler process may grow large even if it can manage peaks of high load - without crashing. The overload protection mechanism includes user configurable - levels for a maximum allowed message queue length and maximum allowed memory - usage. This feature is disabled by default, but can be switched on by means - of the following configuration parameters:</p> - + <title>Terminating an Overloaded Handler</title> + <p>It is possible that a handler, even if it can successfully manage peaks + of high load without crashing, can build up a large message queue, or use a + large amount of memory. The overload protection mechanism includes an + automatic termination and restart feature for the purpose of guaranteeing + that a handler does not grow out of bounds. The feature is configured + with the following parameters:</p> <taglist> - <tag><c>enable_kill_overloaded</c></tag> + <tag><c>overload_kill_enable</c></tag> <item> - <p>This is set to <c>false</c> by default. The value <c>true</c> - enables the feature.</p> + <p>Value <c>true</c> enables the feature and <c>false</c> disables it.</p> + <p>Defaults to <c>false</c>.</p> </item> - <tag><c>handler_overloaded_qlen</c></tag> + <tag><c>overload_kill_qlen</c></tag> <item> - <p>This is the maximum allowed queue length. If the mailbox grows larger - than this, the handler process gets terminated.</p> + <p>This is the maximum allowed queue length. If the message queue grows + larger than this, the handler process is terminated.</p> + <p>Defaults to <c>20000</c> messages.</p> </item> - <tag><c>handler_overloaded_mem</c></tag> + <tag><c>overload_kill_mem_size</c></tag> <item> - <p>This is the maximum allowed memory usage of the handler process. If - the handler grows any larger, the process gets terminated.</p> + <p>This is the maximum memory size that the handler process is allowed to use. + If the handler grows larger than this, the process is terminated.</p> + <p>Defaults to <c>3000000</c> bytes.</p> </item> - <tag><c>handler_restart_after</c></tag> + <tag><c>overload_kill_restart_after</c></tag> <item> - <p>If the handler gets terminated because of its queue length or - memory usage, it can get automatically restarted again after a - configurable delay time. The time is specified in milliseconds - and <c>5000</c> is the default value. The value <c>never</c> can - also be set, which prevents a restart.</p> + <p>If the handler is terminated, it restarts automatically after a + delay specified in milliseconds. The value <c>infinity</c> prevents + restarts.</p> + <p>Defaults to <c>5000</c> milliseconds.</p> </item> </taglist> + <p>If the handler process is terminated because of overload, it prints + information about it in the log. It also prints information about when a + restart has taken place, and the handler is back in action.</p> + <note> + <p>The sizes of the log events affect the memory needs of the handler. + For information about how to limit the size of log events, see the + <seealso marker="logger_formatter"><c>logger_formatter(3)</c></seealso> + manual page.</p> + </note> </section> </section> diff --git a/lib/kernel/doc/src/logger_disk_log_h.xml b/lib/kernel/doc/src/logger_disk_log_h.xml index 63c29cb010..98439983cf 100644 --- a/lib/kernel/doc/src/logger_disk_log_h.xml +++ b/lib/kernel/doc/src/logger_disk_log_h.xml @@ -33,106 +33,112 @@ <file>logger_disk_log_h.xml</file> </header> <module>logger_disk_log_h</module> - <modulesummary>A disk_log based handler for the Logger.</modulesummary> + <modulesummary>A disk_log based handler for Logger</modulesummary> <description> <p>This is a handler for Logger that offers circular (wrapped) logs by using <seealso marker="disk_log"><c>disk_log</c></seealso>. - Multiple instances - of this handler can be added to Logger, and each instance prints to - its own disk_log file, created with the name and settings specified in - the handler configuration.</p> + Multiple instances of this handler can be added to Logger, and each instance + prints to its own disk log file, created with the name and settings specified + in the handler configuration.</p> <p>The default standard handler, <seealso marker="logger_std_h"><c>logger_std_h</c></seealso>, can be - replaced by a disk_log handler at start up of the Kernel application. + replaced by a disk_log handler at startup of the Kernel application. See an example of this below.</p> - <p>The handler has an overload protection mechanism that will keep the handler - process and the Kernel application alive during a high load of log - requests. How this feature works, and how to modify the configuration, - is described in the + <p>The handler has an overload protection mechanism that keeps the handler + process and the Kernel application alive during high loads of log + events. How overload protection works, and how to configure it, is + described in the <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c> </seealso>.</p> <p>To add a new instance of the disk_log handler, use <seealso marker="logger#add_handler-3"><c>logger:add_handler/3</c> - </seealso>. The handler configuration argument is a map which may contain + </seealso>. The handler configuration argument is a map which can contain general configuration parameters, as documented in the <seealso marker="logger_chapter#handler_configuration"><c>User's Guide</c> - </seealso>, as well as handler specific parameters.</p> - <p>The settings for the disk_log log file should be specified with the - key <c>disk_log_opts</c>. These settings are a subset of the disk_log - data type - <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>.</p> - <p>Parameters in the <c>disk_log_opts</c> map:</p> + </seealso>, and handler specific parameters. The specific data + is stored in a sub map with the key <c>config</c>, and can contain the + following parameters:</p> <taglist> <tag><c>file</c></tag> - <item>This is the full name of the disk_log log file.</item> + <item> + <p>This is the full name of the disk log file. The option + corresponds to the <c>name</c> property in the + <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso> + datatype.</p> + </item> <tag><c>type</c></tag> - <item>This is the disk_log type, <c>wrap</c> or <c>halt</c>. The - default value is <c>wrap</c>.</item> + <item> + <p>This is the disk log type, <c>wrap</c> or <c>halt</c>. The option + corresponds to the <c>type</c> property in the + <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso> + datatype.</p> + <p>Defaults to <c>wrap</c>.</p> + </item> <tag><c>max_no_files</c></tag> - <item>This is the maximum number of files that disk_log will use - for its circular logging. The default value is <c>10</c>. (The setting - has no effect on a halt log).</item> + <item> + <p>This is the maximum number of files that disk_log uses + for its circular logging. The option + corresponds to the <c>MaxNoFiles</c> element in the <c>size</c> property in the + <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso> + datatype.</p> + <p>Defaults to <c>10</c>.</p> + <p>The setting has no effect on a halt log.</p> + </item> <tag><c>max_no_bytes</c></tag> - <item>This is the maximum number of bytes that will be written to - a log file before disk_log proceeds with the next file in order (or - generates an error in case of a full halt log). The default value for - a wrap log is <c>1048576</c> bytes, and <c>infinity</c> for a halt - log.</item> - </taglist> - <p>Specific configuration for the handler (represented as a sub map) - is specified with the key <c>config</c>. It may contain the - following parameter:</p> - <taglist> + <item> + <p>This is the maximum number of bytes that is written to + a log file before disk_log proceeds with the next file in order, or + generates an error in case of a full halt log. The option + corresponds to the <c>MaxNoBytes</c> element in the <c>size</c> property in the + <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso> + datatype.</p> + <p>Defaults to <c>1048576</c> bytes for a wrap log, and + <c>infinity</c> for a halt log.</p> + </item> <tag><c>filesync_repeat_interval</c></tag> <item> - <p>This value (in milliseconds) specifies how often the handler will - do a disk_log sync operation in order to make sure that buffered data - gets written to disk. The handler will repeatedly attempt this - operation, but only perform it if something has actually been logged - since the last sync. The default value is <c>5000</c> milliseconds. - If <c>no_repeat</c> is set as value, the repeated sync operation is - disabled. The user can also call the - <seealso marker="logger_disk_log_h#sync-1"><c>sync/1</c> - </seealso> function to perform a disk_log sync.</p></item> + <p>This value, in milliseconds, specifies how often the handler does + a disk_log sync operation to write buffered data to disk. The handler attempts + the operation repeatedly, but only performs a new sync if something has + actually been logged.</p> + <p>Defaults to <c>5000</c> milliseconds.</p> + <p>If <c>no_repeat</c> is set as value, the repeated sync operation + is disabled. The user can also call the + <seealso marker="logger_disk_log_h#filesync-1"><c>filesync/1</c> + </seealso> function to perform a disk_log sync.</p> + </item> </taglist> - <p>There are a number of other configuration parameters available, that are - to be used for customizing the overload protection behaviour. The same - parameters are used both in the standard handler and the disk_log handler, - and are documented in the + <p>Other configuration parameters exist, to be used for customizing + the overload protection behaviour. The same parameters are used both in the + standard handler and the disk_log handler, and are documented in the <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c> </seealso>.</p> - <p>Note that when changing the configuration of the handler in runtime, by - calling - <seealso marker="logger#set_handler_config-2"><c>logger:set_handler_config/2 - or logger:set_handler_config/3</c></seealso>, the <c>disk_log_opts</c> - settings may not be modified.</p> + <p>Notice that when changing the configuration of the handler in runtime, the + disk_log options (<c>file</c>, <c>type</c>, <c>max_no_files</c>, + <c>max_no_bytes</c>) must not be modified.</p> <p>Example of adding a disk_log handler:</p> <code type="none"> logger:add_handler(my_disk_log_h, logger_disk_log_h, - #{level => error, - filter_default => log, - disk_log_opts => - #{file => "./my_disk_log", - type => wrap, - max_no_files => 4, - max_no_bytes => 10000}, - config => - #{filesync_repeat_interval => 1000}}). + #{config => #{file => "./my_disk_log", + type => wrap, + max_no_files => 4, + max_no_bytes => 10000}, + filesync_repeat_interval => 1000}}). </code> - <p>In order to use the disk_log handler instead of the default standard + <p>To use the disk_log handler instead of the default standard handler when starting an Erlang node, change the Kernel default logger to - use disk_log. Example:</p> + use <c>logger_disk_log_h</c>. Example:</p> <code type="none"> erl -kernel logger '[{handler,default,logger_disk_log_h, - #{disk_log_opts => #{file => "./system_disk_log"}}}]' + #{config => #{file => "./system_disk_log"}}}]' </code> </description> <funcs> <func> - <name name="sync" arity="1" clause_i="1"/> + <name name="filesync" arity="1" clause_i="1"/> <fsummary>Writes buffered data to disk.</fsummary> <desc> <p>Write buffered data to disk.</p> diff --git a/lib/kernel/doc/src/logger_std_h.xml b/lib/kernel/doc/src/logger_std_h.xml index 89e11389c5..95b4baf160 100644 --- a/lib/kernel/doc/src/logger_std_h.xml +++ b/lib/kernel/doc/src/logger_std_h.xml @@ -33,92 +33,84 @@ <file>logger_std_h.xml</file> </header> <module>logger_std_h</module> - <modulesummary>Default handler for Logger.</modulesummary> + <modulesummary>Standard handler for Logger.</modulesummary> <description> - <p>This is the default handler for Logger. + <p>This is the standard handler for Logger. Multiple instances of this handler can be added to - Logger, and each instance will print logs to <c>standard_io</c>, - <c>standard_error</c> or to file. The default instance that starts - with Kernel is named <c>default</c> - which is the name to be used - for reconfiguration.</p> - <p>The handler has an overload protection mechanism that will keep the handler - process and the Kernel application alive during a high load of log - requests. How this feature works, and how to modify the configuration, - is described in the + Logger, and each instance prints logs to <c>standard_io</c>, + <c>standard_error</c>, or to file.</p> + <p>The handler has an overload protection mechanism that keeps the handler + process and the Kernel application alive during high loads of log + events. How overload protection works, and how to configure it, is + described in the <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c> </seealso>.</p> <p>To add a new instance of the standard handler, use <seealso marker="logger#add_handler-3"><c>logger:add_handler/3</c> - </seealso>. The handler configuration argument is a map which may contain - general configuration parameters, as documented in the + </seealso>. The handler configuration argument is a map which can contain + general configuration parameters, as documented in the <seealso marker="logger_chapter#handler_configuration"><c>User's Guide</c> - </seealso>, as well as handler specific parameters. The specific parameters - are stored in a sub map with the key <c>config</c>. The following - keys and values may be specified:</p> + </seealso>, and handler specific parameters. The specific data + is stored in a sub map with the key <c>config</c>, and can contain the + following parameters:</p> <taglist> <tag><marker id="type"/><c>type</c></tag> <item> - <p>This will have the value <c>standard_io</c>, <c>standard_error</c>, - <c>{file,LogFileName}</c>, or <c>{file,LogFileName,LogFileOpts}</c>, - where <c>standard_io</c> is the default value for type. It's recommended - to not specify <c>LogFileOpts</c> if not absolutely necessary. The - default options used by the handler to open a file for logging are: - <c>raw</c>, <c>append</c> and <c>delayed_write</c>. The standard - handler does not have support for circular logging. Use the - <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c> - </seealso> handler for this.</p></item> + <p>This has the value <c>standard_io</c>, <c>standard_error</c>, + <c>{file,LogFileName}</c>, or <c>{file,LogFileName,LogFileOpts}</c>.</p> + <p> Defaults to <c>standard_io</c>.</p> + <p>It is recommended not to specify <c>LogFileOpts</c> unless absolutely + necessary. The default options used by the handler to open a file for logging are + <c>raw</c>, <c>append</c>, and <c>delayed_write</c>. Notice that the standard + handler does not have support for circular logging. Use the disk_log handler, + <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c></seealso>, + for this.</p> + </item> <tag><c>filesync_repeat_interval</c></tag> <item> - <p>This value (in milliseconds) specifies how often the handler will - do a file sync operation in order to make sure that buffered data gets - written to disk. The handler will repeatedly attempt this - operation, but only perform it if something has actually been logged - since the last sync. The default value is <c>5000</c> milliseconds. - If <c>no_repeat</c> is set as value, the repeated file sync operation - is disabled, and it will be the operating system settings that determine - how quickly or slowly data gets written to disk. The user can also call - the <seealso marker="logger_std_h#sync-1"><c>sync/1</c></seealso> - function to perform a file sync.</p></item> + <p>This value, in milliseconds, specifies how often the handler does + a file sync operation to write buffered data to disk. The handler attempts + the operation repeatedly, but only performs a new sync if something has + actually been logged.</p> + <p>Defaults to <c>5000</c> milliseconds.</p> + <p>If <c>no_repeat</c> is set as value, the repeated file sync operation + is disabled, and it is the operating system settings that determine + how quickly or slowly data is written to disk. The user can also call + the <seealso marker="logger_std_h#filesync-1"><c>filesync/1</c></seealso> + function to perform a file sync.</p> + </item> </taglist> - <p>There are a number of other configuration parameters available, that are - to be used for customizing the overload protection behaviour. The same - parameters are used both in the standard handler and the disk_log handler, - and are documented in the + <p>Other configuration parameters exist, to be used for customizing + the overload protection behaviour. The same parameters are used both in the + standard handler and the disk_log handler, and are documented in the <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c> </seealso>.</p> - <p>Note that when changing the configuration of the handler in runtime, by - calling - <seealso marker="logger#set_handler_config-2"><c>logger:set_handler_config/2</c> - </seealso>, or - <seealso marker="logger#set_handler_config-3"><c>logger:set_handler_config/3</c> - </seealso>, - the <c>type</c> parameter may not be modified.</p> + <p>Notice that if changing the configuration of the handler in runtime, + the <c>type</c> parameter must not be modified.</p> <p>Example of adding a standard handler:</p> <code type="none"> logger:add_handler(my_standard_h, logger_std_h, - #{level => info, - filter_default => log, - config => - #{type => {file,"./system_info.log"}, - filesync_repeat_interval => 1000}}). + #{config => #{type => {file,"./system_info.log"}, + filesync_repeat_interval => 1000}}). </code> - <p>In order to configure the default handler (that starts initially with - the Kernel application) to log to file instead of <c>standard_io</c>, - change the Kernel default logger to use a file. Example:</p> + <p>To set the default handler, that starts initially with + the Kernel application, to log to file instead of <c>standard_io</c>, + change the Kernel default logger configuration. Example:</p> <code type="none"> erl -kernel logger '[{handler,default,logger_std_h, #{config => #{type => {file,"./log.log"}}}}]' </code> <p>An example of how to replace the standard handler with a disk_log handler - at start up can be found in the manual of - <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c></seealso>.</p> + at startup is found in the + <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c></seealso> + manual.</p> </description> <funcs> <func> - <name name="sync" arity="1" clause_i="1"/> + <name name="filesync" arity="1" clause_i="1"/> <fsummary>Writes buffered data to disk.</fsummary> <desc> <p>Write buffered data to disk.</p> diff --git a/lib/kernel/src/application_controller.erl b/lib/kernel/src/application_controller.erl index 0dad6ae935..a074d2e74b 100644 --- a/lib/kernel/src/application_controller.erl +++ b/lib/kernel/src/application_controller.erl @@ -1814,8 +1814,9 @@ check_conf() -> %% Therefore read and merge contents. if BFName =:= "sys" -> + DName = filename:dirname(FName), {ok, SysEnv, Errors} = - check_conf_sys(NewEnv), + check_conf_sys(NewEnv, [], [], DName), %% Report first error, if any, and %% terminate @@ -1837,20 +1838,31 @@ check_conf() -> end. check_conf_sys(Env) -> - check_conf_sys(Env, [], []). + check_conf_sys(Env, [], [], []). -check_conf_sys([File|T], SysEnv, Errors) when is_list(File) -> +check_conf_sys([File|T], SysEnv, Errors, DName) when is_list(File),is_list(DName) -> BFName = filename:basename(File, ".config"), FName = filename:join(filename:dirname(File), BFName ++ ".config"), - case load_file(FName) of + LName = case filename:pathtype(FName) of + relative when (DName =/= []) -> + % Check if relative to sys.config dir otherwise use legacy mode, + % i.e relative to cwd. + RName = filename:join(DName, FName), + case erl_prim_loader:read_file_info(RName) of + {ok, _} -> RName ; + error -> FName + end; + _ -> FName + end, + case load_file(LName) of {ok, NewEnv} -> - check_conf_sys(T, merge_env(SysEnv, NewEnv), Errors); + check_conf_sys(T, merge_env(SysEnv, NewEnv), Errors, DName); {error, {Line, _Mod, Str}} -> - check_conf_sys(T, SysEnv, [{error, {FName, Line, Str}}|Errors]) + check_conf_sys(T, SysEnv, [{error, {LName, Line, Str}}|Errors], DName) end; -check_conf_sys([Tuple|T], SysEnv, Errors) -> - check_conf_sys(T, merge_env(SysEnv, [Tuple]), Errors); -check_conf_sys([], SysEnv, Errors) -> +check_conf_sys([Tuple|T], SysEnv, Errors, DName) -> + check_conf_sys(T, merge_env(SysEnv, [Tuple]), Errors, DName); +check_conf_sys([], SysEnv, Errors, _) -> {ok, SysEnv, lists:reverse(Errors)}. load_file(File) -> diff --git a/lib/kernel/src/logger_disk_log_h.erl b/lib/kernel/src/logger_disk_log_h.erl index a074d0210e..cf8ea658e3 100644 --- a/lib/kernel/src/logger_disk_log_h.erl +++ b/lib/kernel/src/logger_disk_log_h.erl @@ -26,7 +26,7 @@ -include("logger_h_common.hrl"). %%% API --export([start_link/3, info/1, sync/1, reset/1]). +-export([start_link/3, info/1, filesync/1, reset/1]). %% gen_server callbacks -export([init/1, handle_call/3, handle_cast/2, handle_info/2, @@ -58,19 +58,19 @@ start_link(Name, Config, HandlerState) -> %%%----------------------------------------------------------------- %%% --spec sync(Name) -> ok | {error,Reason} when +-spec filesync(Name) -> ok | {error,Reason} when Name :: atom(), Reason :: handler_busy | {badarg,term()}. -sync(Name) when is_atom(Name) -> +filesync(Name) when is_atom(Name) -> try gen_server:call(?name_to_reg_name(?MODULE,Name), disk_log_sync, ?DEFAULT_CALL_TIMEOUT) catch _:{timeout,_} -> {error,handler_busy} end; -sync(Name) -> - {error,{badarg,{sync,[Name]}}}. +filesync(Name) -> + {error,{badarg,{filesync,[Name]}}}. %%%----------------------------------------------------------------- %%% @@ -249,7 +249,8 @@ init([Name, max_no_files:=MNF}}, State = #{dl_sync_int := DLSyncInt}]) -> - register(?name_to_reg_name(?MODULE,Name), self()), + RegName = ?name_to_reg_name(?MODULE,Name), + register(RegName, self()), process_flag(trap_exit, true), process_flag(message_queue_data, off_heap), @@ -296,10 +297,12 @@ init([Name, enter_loop(Config1, State1) catch _:Error -> + unregister(RegName), logger_h_common:error_notify({open_disk_log,Name,Error}), proc_lib:init_ack(Error) end; Error -> + unregister(RegName), logger_h_common:error_notify({open_disk_log,Name,Error}), proc_lib:init_ack(Error) end. @@ -426,6 +429,7 @@ terminate(Reason, State = #{id := Name}) -> _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, State, undefined)), _ = close_disk_log(Name, normal), + unregister(?name_to_reg_name(?MODULE, Name)), logger_h_common:stop_or_restart(Name, Reason, State). code_change(_OldVsn, State, _Extra) -> @@ -704,7 +708,7 @@ disk_log_sync(Name, State) -> ok; _ -> LogOpts = maps:get(log_opts, State), - logger_h_common:error_notify({Name,sync, + logger_h_common:error_notify({Name,filesync, LogOpts, SyncError}) end, diff --git a/lib/kernel/src/logger_h_common.erl b/lib/kernel/src/logger_h_common.erl index d556938f02..f9f762405c 100644 --- a/lib/kernel/src/logger_h_common.erl +++ b/lib/kernel/src/logger_h_common.erl @@ -115,7 +115,7 @@ check_common_config({overload_kill_qlen,N}) when is_integer(N) -> check_common_config({overload_kill_mem_size,N}) when is_integer(N) -> valid; check_common_config({overload_kill_restart_after,NorA}) when is_integer(NorA); - NorA == never -> + NorA == infinity -> valid; check_common_config({filesync_repeat_interval,NorA}) when is_integer(NorA); @@ -261,7 +261,7 @@ flush_log_events(Limit, Limit) -> Limit; flush_log_events(N, Limit) -> %% flush log events but leave other events, such as - %% file/disk_log_sync, info and change_config, so that these + %% filesync, info and change_config, so that these %% have a chance to be processed even under heavy load receive {'$gen_cast',{log,_}} -> diff --git a/lib/kernel/src/logger_h_common.hrl b/lib/kernel/src/logger_h_common.hrl index ad80b51109..e0a7b6e3ca 100644 --- a/lib/kernel/src/logger_h_common.hrl +++ b/lib/kernel/src/logger_h_common.hrl @@ -41,10 +41,10 @@ -define(OVERLOAD_KILL_MEM_SIZE, 3000000). %% This is the default time that the handler will wait before -%% restarting and accepting new requests. The value 'never' +%% restarting and accepting new requests. The value 'infinity' %% disables restarts. -define(OVERLOAD_KILL_RESTART_AFTER, 5000). -%%-define(OVERLOAD_KILL_RESTART_AFTER, never). +%%-define(OVERLOAD_KILL_RESTART_AFTER, infinity). %% The handler sends asynchronous write requests to the process %% controlling the i/o device, but every once in this interval diff --git a/lib/kernel/src/logger_std_h.erl b/lib/kernel/src/logger_std_h.erl index ce9daa50ab..2a37076dda 100644 --- a/lib/kernel/src/logger_std_h.erl +++ b/lib/kernel/src/logger_std_h.erl @@ -28,7 +28,7 @@ -include_lib("kernel/include/file.hrl"). %% API --export([start_link/3, info/1, sync/1, reset/1]). +-export([start_link/3, info/1, filesync/1, reset/1]). %% gen_server and proc_lib callbacks -export([init/1, handle_call/3, handle_cast/2, handle_info/2, @@ -60,19 +60,19 @@ start_link(Name, Config, HandlerState) -> %%%----------------------------------------------------------------- %%% --spec sync(Name) -> ok | {error,Reason} when +-spec filesync(Name) -> ok | {error,Reason} when Name :: atom(), Reason :: handler_busy | {badarg,term()}. -sync(Name) when is_atom(Name) -> +filesync(Name) when is_atom(Name) -> try gen_server:call(?name_to_reg_name(?MODULE,Name), filesync, ?DEFAULT_CALL_TIMEOUT) catch _:{timeout,_} -> {error,handler_busy} end; -sync(Name) -> - {error,{badarg,{sync,[Name]}}}. +filesync(Name) -> + {error,{badarg,{filesync,[Name]}}}. %%%----------------------------------------------------------------- %%% @@ -229,7 +229,8 @@ log(LogEvent, Config = #{id := Name, init([Name, Config = #{config := HConfig}, State0 = #{type := Type, file_ctrl_sync_int := FileCtrlSyncInt}]) -> - register(?name_to_reg_name(?MODULE,Name), self()), + RegName = ?name_to_reg_name(?MODULE,Name), + register(RegName, self()), process_flag(trap_exit, true), process_flag(message_queue_data, off_heap), @@ -261,10 +262,12 @@ init([Name, Config = #{config := HConfig}, enter_loop(Config1, State1) catch _:Error -> + unregister(RegName), logger_h_common:error_notify({init_handler,Name,Error}), proc_lib:init_ack(Error) end; Error -> + unregister(RegName), logger_h_common:error_notify({init_handler,Name,Error}), proc_lib:init_ack(Error) end. @@ -415,6 +418,7 @@ terminate(Reason, State = #{id:=Name, file_ctrl_pid:=FWPid, false -> ok end, + unregister(?name_to_reg_name(?MODULE, Name)), logger_h_common:stop_or_restart(Name, Reason, State). code_change(_OldVsn, State, _Extra) -> @@ -816,7 +820,7 @@ sync_dev(Fd, DevName, PrevSyncResult, HandlerName) -> %% don't report same error twice PrevSyncResult; Error -> - logger_h_common:error_notify({HandlerName,sync,DevName,Error}), + logger_h_common:error_notify({HandlerName,filesync,DevName,Error}), Error end. diff --git a/lib/kernel/test/application_SUITE.erl b/lib/kernel/test/application_SUITE.erl index 988f26280f..3d07d6d70d 100644 --- a/lib/kernel/test/application_SUITE.erl +++ b/lib/kernel/test/application_SUITE.erl @@ -37,7 +37,8 @@ -export([config_change/1, persistent_env/1, distr_changed_tc1/1, distr_changed_tc2/1, ensure_started/1, ensure_all_started/1, - shutdown_func/1, do_shutdown/1, shutdown_timeout/1, shutdown_deadlock/1]). + shutdown_func/1, do_shutdown/1, shutdown_timeout/1, shutdown_deadlock/1, + config_relative_paths/1]). -define(TESTCASE, testcase_name). -define(testcase, proplists:get_value(?TESTCASE, Config)). @@ -55,7 +56,7 @@ all() -> script_start, nodedown_start, permit_false_start_local, permit_false_start_dist, get_key, get_env, ensure_all_started, {group, distr_changed}, config_change, shutdown_func, shutdown_timeout, - shutdown_deadlock, + shutdown_deadlock, config_relative_paths, persistent_env]. groups() -> @@ -2075,6 +2076,42 @@ shutdown_deadlock(Config) when is_list(Config) -> %%----------------------------------------------------------------- +%% Relative paths in sys.config +%%----------------------------------------------------------------- +config_relative_paths(Config) -> + Dir = ?config(priv_dir,Config), + SubDir = filename:join(Dir,"subdir"), + Sys = filename:join(SubDir,"sys.config"), + ok = filelib:ensure_dir(Sys), + ok = file:write_file(Sys,"[\"../up.config\",\"current\"].\n"), + + Up = filename:join(Dir,"up.config"), + ok = file:write_file(Up,"[{app1,[{key1,value}]}].\n"), + + {ok,Cwd} = file:get_cwd(), + Current1 = filename:join(Cwd,"current.config"), + ok = file:write_file(Current1,"[{app1,[{key2,value1}]}].\n"), + + N1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])), + {ok,Node1} = start_node(N1,filename:rootname(Sys)), + ok = rpc:call(Node1, application, load, [app1()]), + {ok, value} = rpc:call(Node1, application, get_env,[app1,key1]), + {ok, value1} = rpc:call(Node1, application, get_env,[app1,key2]), + + Current2 = filename:join(SubDir,"current.config"), + ok = file:write_file(Current2,"[{app1,[{key2,value2}]}].\n"), + + N2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_2"])), + {ok, Node2} = start_node(N2,filename:rootname(Sys)), + ok = rpc:call(Node2, application, load, [app1()]), + {ok, value} = rpc:call(Node2, application, get_env,[app1,key1]), + {ok, value2} = rpc:call(Node2, application, get_env,[app1,key2]), + + stop_node_nice([Node1,Node2]), + + ok. + +%%----------------------------------------------------------------- %% Utility functions %%----------------------------------------------------------------- app0() -> diff --git a/lib/kernel/test/logger_SUITE.erl b/lib/kernel/test/logger_SUITE.erl index f837c31e64..b367b4dd54 100644 --- a/lib/kernel/test/logger_SUITE.erl +++ b/lib/kernel/test/logger_SUITE.erl @@ -329,7 +329,7 @@ log_no_levels(_Config) -> ok. log_no_levels(cleanup,_Config) -> logger:remove_handler(h1), - logger:set_primary_config(level,info), + logger:set_primary_config(level,notice), logger:unset_module_level(?MODULE), ok. diff --git a/lib/kernel/test/logger_disk_log_h_SUITE.erl b/lib/kernel/test/logger_disk_log_h_SUITE.erl index 5f7b505ecb..7e5b574869 100644 --- a/lib/kernel/test/logger_disk_log_h_SUITE.erl +++ b/lib/kernel/test/logger_disk_log_h_SUITE.erl @@ -1,3 +1,22 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% -module(logger_disk_log_h_SUITE). -compile(export_all). @@ -26,10 +45,6 @@ -define(log_no(File,N), lists:concat([File,".",N])). -define(domain,#{domain=>[?MODULE]}). --define(SYNC_REP_INT, if is_atom(?FILESYNC_REPEAT_INTERVAL) -> 5500; - true -> ?FILESYNC_REPEAT_INTERVAL + 500 - end). - suite() -> [{timetrap,{seconds,30}}, {ct_hooks,[logger_test_lib]}]. @@ -50,9 +65,10 @@ end_per_group(_Group, _Config) -> init_per_testcase(TestHooksCase, Config) when TestHooksCase == write_failure; TestHooksCase == sync_failure -> - if ?TEST_HOOKS_TAB == undefined -> + case (fun() -> ?TEST_HOOKS_TAB == undefined end)() of + true -> {skip,"Define the TEST_HOOKS macro to run this test"}; - true -> + false -> ct:print("********** ~w **********", [TestHooksCase]), Config end; @@ -124,7 +140,7 @@ create_log(Config) -> formatter=>{?MODULE,self()}}, #{file=>LogFile1}), logger:notice("hello", ?domain), - logger_disk_log_h:sync(Name1), + logger_disk_log_h:filesync(Name1), ct:pal("Checking contents of ~p", [?log_no(LogFile1,1)]), try_read_file(?log_no(LogFile1,1), {ok,<<"hello\n">>}, 5000), @@ -137,7 +153,7 @@ create_log(Config) -> formatter=>{?MODULE,self()}}, #{file=>LogFile2}), logger:notice("dummy", ?domain), - logger_disk_log_h:sync(Name2), + logger_disk_log_h:filesync(Name2), ct:pal("Checking contents of ~p", [?log_no(LogFile2,1)]), try_read_file(?log_no(LogFile2,1), {ok,<<"dummy\n">>}, 5000), @@ -158,7 +174,7 @@ open_existing_log(Config) -> formatter=>{?MODULE,self()}}, #{file=>LogFile1}), logger:notice("one", ?domain), - logger_disk_log_h:sync(HName), + logger_disk_log_h:filesync(HName), ct:pal("Checking contents of ~p", [?log_no(LogFile1,1)]), try_read_file(?log_no(LogFile1,1), {ok,<<"one\n">>}, 5000), logger:notice("two", ?domain), @@ -172,7 +188,7 @@ open_existing_log(Config) -> formatter=>{?MODULE,self()}}, #{file=>LogFile1}), logger:notice("three", ?domain), - logger_disk_log_h:sync(HName), + logger_disk_log_h:filesync(HName), try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\nthree\n">>}, 5000), remove_and_stop(HName), try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\nthree\n">>}, 5000). @@ -197,22 +213,22 @@ disk_log_opts(Config) -> {WFileFull,wrap,{Size,2},1} = {Get(file,WInfo1),Get(type,WInfo1), Get(size,WInfo1),Get(current_file,WInfo1)}, logger:notice("123", ?domain), - logger_disk_log_h:sync(WName), + logger_disk_log_h:filesync(WName), timer:sleep(500), 1 = Get(current_file, disk_log:info(WName)), logger:notice("45", ?domain), - logger_disk_log_h:sync(WName), + logger_disk_log_h:filesync(WName), timer:sleep(500), 1 = Get(current_file, disk_log:info(WName)), logger:notice("6", ?domain), - logger_disk_log_h:sync(WName), + logger_disk_log_h:filesync(WName), timer:sleep(500), 2 = Get(current_file, disk_log:info(WName)), logger:notice("7890", ?domain), - logger_disk_log_h:sync(WName), + logger_disk_log_h:filesync(WName), timer:sleep(500), 2 = Get(current_file, disk_log:info(WName)), @@ -230,7 +246,7 @@ disk_log_opts(Config) -> {HFile1Full,halt,infinity} = {Get(file,HInfo1),Get(type,HInfo1), Get(size,HInfo1)}, logger:notice("12345", ?domain), - logger_disk_log_h:sync(HName1), + logger_disk_log_h:filesync(HName1), timer:sleep(500), 1 = Get(no_written_items, disk_log:info(HName1)), @@ -407,8 +423,8 @@ config_fail(cleanup,_Config) -> logger:remove_handler(?MODULE). bad_input(_Config) -> - {error,{badarg,{sync,["BadType"]}}} = - logger_disk_log_h:sync("BadType"), + {error,{badarg,{filesync,["BadType"]}}} = + logger_disk_log_h:filesync("BadType"), {error,{badarg,{info,["BadType"]}}} = logger_disk_log_h:info("BadType"), {error,{badarg,{reset,["BadType"]}}} = logger_disk_log_h:reset("BadType"). @@ -456,7 +472,7 @@ reconfig(Config) -> overload_kill_enable => true, overload_kill_qlen => 100000, overload_kill_mem_size => 10000000, - overload_kill_restart_after => never, + overload_kill_restart_after => infinity, filesync_repeat_interval => no_repeat}, ok = logger:set_handler_config(?MODULE, config, HConfig1), #{id := ?MODULE, @@ -469,7 +485,7 @@ reconfig(Config) -> overload_kill_enable := true, overload_kill_qlen := 100000, overload_kill_mem_size := 10000000, - overload_kill_restart_after := never, + overload_kill_restart_after := infinity, filesync_repeat_interval := no_repeat} = logger_disk_log_h:info(?MODULE), @@ -526,7 +542,7 @@ sync(Config) -> logger:notice("second", ?domain), logger:notice("third", ?domain), %% do explicit sync - logger_disk_log_h:sync(?MODULE), + logger_disk_log_h:filesync(?MODULE), check_tracer(100), %% check that if there's no repeated disk_log_sync active, @@ -739,9 +755,15 @@ write_failure(Config) -> ct:pal("LogOpts = ~p", [LogOpts = maps:get(log_opts, HState)]), ok = log_on_remote_node(Node, "Logged1"), - rpc:call(Node, logger_disk_log_h, sync, [?STANDARD_HANDLER]), + rpc:call(Node, logger_disk_log_h, filesync, [?STANDARD_HANDLER]), ?check_no_log, - try_read_file(Log, {ok,<<"Logged1\n">>}, ?SYNC_REP_INT), + + SyncRepInt = case (fun() -> is_atom(?FILESYNC_REPEAT_INTERVAL) end)() of + true -> 5500; + false -> ?FILESYNC_REPEAT_INTERVAL + 500 + end, + + try_read_file(Log, {ok,<<"Logged1\n">>}, SyncRepInt), rpc:call(Node, ?MODULE, set_result, [disk_log_blog,{error,no_such_log}]), ok = log_on_remote_node(Node, "Cause simple error printout"), @@ -759,9 +781,9 @@ write_failure(Config) -> rpc:call(Node, ?MODULE, set_result, [disk_log_blog,ok]), ok = log_on_remote_node(Node, "Logged2"), - rpc:call(Node, logger_disk_log_h, sync, [?STANDARD_HANDLER]), + rpc:call(Node, logger_disk_log_h, filesync, [?STANDARD_HANDLER]), ?check_no_log, - try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, ?SYNC_REP_INT), + try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, SyncRepInt), ok. write_failure(cleanup, _Config) -> Nodes = nodes(), @@ -795,7 +817,7 @@ sync_failure(Config) -> rpc:call(Node, ?MODULE, set_result, [disk_log_sync,{error,no_such_log}]), ok = log_on_remote_node(Node, "Cause simple error printout"), - ?check({error,{?STANDARD_HANDLER,sync,LogOpts,{error,no_such_log}}}), + ?check({error,{?STANDARD_HANDLER,filesync,LogOpts,{error,no_such_log}}}), ok = log_on_remote_node(Node, "No second error printout"), ?check_no_log, @@ -803,7 +825,7 @@ sync_failure(Config) -> rpc:call(Node, ?MODULE, set_result, [disk_log_sync,{error,{blocked_log,?STANDARD_HANDLER}}]), ok = log_on_remote_node(Node, "Cause simple error printout"), - ?check({error,{?STANDARD_HANDLER,sync,LogOpts, + ?check({error,{?STANDARD_HANDLER,filesync,LogOpts, {error,{blocked_log,?STANDARD_HANDLER}}}}), rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]), @@ -861,8 +883,8 @@ op_switch_to_sync(Config) -> ok = logger:set_handler_config(?MODULE, NewHConfig), send_burst({n,NumOfReqs}, seq, {chars,79}, notice), Lines = count_lines(Log), - ok = file_delete(Log), NumOfReqs = Lines, + ok = file_delete(Log), ok. op_switch_to_sync(cleanup, _Config) -> ok = stop_handler(?MODULE). @@ -892,11 +914,11 @@ op_switch_to_drop(Config) -> _ <- lists:seq(1, Bursts)], Logged = count_lines(Log), ok = stop_handler(?MODULE), - _ = file_delete(Log), ct:pal("Number of messages dropped = ~w (~w)", [Procs*NumOfReqs*Bursts-Logged,Procs*NumOfReqs*Bursts]), true = (Logged < (Procs*NumOfReqs*Bursts)), true = (Logged > 0), + _ = file_delete(Log), ok end, %% As it's tricky to get the timing right in only one go, we perform the @@ -944,11 +966,11 @@ op_switch_to_flush(Config) -> _ <- lists:seq(1,Bursts)], Logged = count_lines(Log), ok= stop_handler(?MODULE), - _ = file_delete(Log), ct:pal("Number of messages flushed/dropped = ~w (~w)", [NumOfReqs*Procs*Bursts-Logged,NumOfReqs*Procs*Bursts]), true = (Logged < (NumOfReqs*Procs*Bursts)), true = (Logged > 0), + _ = file_delete(Log), ok end, %% As it's tricky to get the timing right in only one go, we perform the @@ -976,8 +998,9 @@ limit_burst_disabled(Config) -> send_burst({n,NumOfReqs}, seq, {chars,79}, notice), Logged = count_lines(Log), ct:pal("Number of messages logged = ~w", [Logged]), + NumOfReqs = Logged, ok = file_delete(Log), - NumOfReqs = Logged. + ok. limit_burst_disabled(cleanup, _Config) -> ok = stop_handler(?MODULE). @@ -995,8 +1018,9 @@ limit_burst_enabled_one(Config) -> send_burst({n,NumOfReqs}, seq, {chars,79}, notice), Logged = count_lines(Log), ct:pal("Number of messages logged = ~w", [Logged]), + ReqLimit = Logged, ok = file_delete(Log), - ReqLimit = Logged. + ok. limit_burst_enabled_one(cleanup, _Config) -> ok = stop_handler(?MODULE). @@ -1017,9 +1041,10 @@ limit_burst_enabled_period(Config) -> Logged = count_lines(Log), ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w", [Sent,Logged]), - ok = file_delete(Log), true = (Logged > (ReqLimit*Windows)) andalso - (Logged < (ReqLimit*(Windows+2))). + (Logged < (ReqLimit*(Windows+2))), + ok = file_delete(Log), + ok. limit_burst_enabled_period(cleanup, _Config) -> ok = stop_handler(?MODULE). @@ -1041,7 +1066,7 @@ kill_disabled(cleanup, _Config) -> ok = stop_handler(?MODULE). qlen_kill_new(Config) -> - {_Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), Pid0 = whereis(h_proc_name()), {_,Mem0} = process_info(Pid0, memory), RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER, @@ -1054,7 +1079,7 @@ qlen_kill_new(Config) -> ok = logger:set_handler_config(?MODULE, NewHConfig), MRef = erlang:monitor(process, Pid0), NumOfReqs = 100, - Procs = 2, + Procs = 4, send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice), %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice), receive @@ -1065,8 +1090,8 @@ qlen_kill_new(Config) -> killed -> ct:pal("Slow shutdown, handler process was killed!", []) end, - timer:sleep(RestartAfter + 2000), - true = is_pid(whereis(h_proc_name())), + file_delete(Log), + {ok,_} = wait_for_process_up(RestartAfter * 3), ok after 5000 -> @@ -1078,7 +1103,7 @@ qlen_kill_new(cleanup, _Config) -> ok = stop_handler(?MODULE). mem_kill_new(Config) -> - {_Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), Pid0 = whereis(h_proc_name()), {_,Mem0} = process_info(Pid0, memory), RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER, @@ -1091,7 +1116,7 @@ mem_kill_new(Config) -> ok = logger:set_handler_config(?MODULE, NewHConfig), MRef = erlang:monitor(process, Pid0), NumOfReqs = 100, - Procs = 2, + Procs = 4, send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice), %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice), receive @@ -1102,8 +1127,8 @@ mem_kill_new(Config) -> killed -> ct:pal("Slow shutdown, handler process was killed!", []) end, - timer:sleep(RestartAfter + 2000), - true = is_pid(whereis(h_proc_name())), + file_delete(Log), + {ok,_} = wait_for_process_up(RestartAfter * 3), ok after 5000 -> @@ -1114,23 +1139,27 @@ mem_kill_new(Config) -> mem_kill_new(cleanup, _Config) -> ok = stop_handler(?MODULE). +restart_after() -> + [{timetrap,{minutes,2}}]. restart_after(Config) -> {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), NewHConfig1 = HConfig#{config=>DLHConfig#{overload_kill_enable=>true, overload_kill_qlen=>10, - overload_kill_restart_after=>never}}, + overload_kill_restart_after=>infinity}}, ok = logger:set_handler_config(?MODULE, NewHConfig1), MRef1 = erlang:monitor(process, whereis(h_proc_name())), %% kill handler - send_burst({n,100}, {spawn,2,0}, {chars,79}, notice), + send_burst({n,100}, {spawn,4,0}, {chars,79}, notice), receive - {'DOWN', MRef1, _, _, _Info1} -> - timer:sleep(?OVERLOAD_KILL_RESTART_AFTER + 1000), - undefined = whereis(h_proc_name()), + {'DOWN', MRef1, _, _, _Reason1} -> + file_delete(Log), + error = wait_for_process_up(?OVERLOAD_KILL_RESTART_AFTER * 3), ok after 5000 -> + Info1 = logger_std_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info1]), ct:fail("Handler not dead! It should not have survived this!") end, @@ -1144,16 +1173,17 @@ restart_after(Config) -> Pid0 = whereis(h_proc_name()), MRef2 = erlang:monitor(process, Pid0), %% kill handler - send_burst({n,100}, {spawn,2,0}, {chars,79}, notice), + send_burst({n,100}, {spawn,4,0}, {chars,79}, notice), receive - {'DOWN', MRef2, _, _, _Info2} -> - timer:sleep(RestartAfter + 2000), - Pid1 = whereis(h_proc_name()), - true = is_pid(Pid1), + {'DOWN', MRef2, _, _, _Reason2} -> + file_delete(Log), + {ok,Pid1} = wait_for_process_up(RestartAfter * 3), false = (Pid1 == Pid0), ok after 5000 -> + Info2 = logger_std_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info2]), ct:fail("Handler not dead! It should not have survived this!") end, ok. @@ -1173,7 +1203,7 @@ handler_requests_under_load(Config) -> flush_qlen => 2000, burst_limit_enable => false}}, ok = logger:set_handler_config(?MODULE, NewHConfig), - Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{sync,[]}, + Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]}, {info,[]}, {reset,[]}, {change_config,[]}]) @@ -1220,6 +1250,8 @@ start_handler(Name, FuncName, Config) -> Dir = ?config(priv_dir,Config), File = filename:join(Dir, FuncName), ct:pal("Logging to ~tp", [File]), + FullFile = lists:concat([File,".1"]), + _ = file_delete(FullFile), ok = logger:add_handler(Name, logger_disk_log_h, #{config=>#{file => File, @@ -1229,7 +1261,7 @@ start_handler(Name, FuncName, Config) -> filters=>?DEFAULT_HANDLER_FILTERS([Name]), formatter=>{?MODULE,op}}), {ok,HConfig = #{config := DLHConfig}} = logger:get_handler_config(Name), - {lists:concat([File,".1"]),HConfig,DLHConfig}. + {FullFile,HConfig,DLHConfig}. stop_handler(Name) -> ct:pal("Stopping handler ~p!", [Name]), @@ -1256,12 +1288,9 @@ send_n_burst(N, seq, Text, Class) -> send_n_burst(N-1, seq, Text, Class); send_n_burst(N, {spawn,Ps,TO}, Text, Class) -> ct:pal("~w processes each sending ~w messages", [Ps,N]), - PerProc = fun() -> - send_n_burst(N, seq, Text, Class) - end, MRefs = [begin if TO == 0 -> ok; true -> timer:sleep(TO) end, - monitor(process,spawn_link(PerProc)) end || - _ <- lists:seq(1,Ps)], + monitor(process,spawn_link(per_proc_fun(N,Text,Class,X))) + end || X <- lists:seq(1,Ps)], lists:foreach(fun(MRef) -> receive {'DOWN', MRef, _, _, _} -> @@ -1280,6 +1309,16 @@ send_t_burst(T0, T, Text, Class, N) -> send_t_burst(T0, T, Text, Class, N+1) end. +per_proc_fun(N,Text,Class,X) when X rem 2 == 0 -> + fun() -> + process_flag(priority,high), + send_n_burst(N, seq, Text, Class) + end; +per_proc_fun(N,Text,Class,_) -> + fun() -> + send_n_burst(N, seq, Text, Class) + end. + %%%----------------------------------------------------------------- %%% Formatter callback %%% Using this to send the formatted string back to the test case @@ -1399,10 +1438,10 @@ wait_until_written(File, Sz) -> {ok,#file_info{size = Sz}} -> timer:sleep(1000), case file:read_file_info(File) of - {ok,#file_info{size = Sz1}} -> + {ok,#file_info{size = Sz}} -> ok; - {ok,#file_info{size = Sz2}} -> - wait_until_written(File, Sz2) + {ok,#file_info{size = Sz1}} -> + wait_until_written(File, Sz1) end; {ok,#file_info{size = Sz1}} -> wait_until_written(File, Sz1) @@ -1514,3 +1553,23 @@ h_proc_name(Name) -> file_delete(Log) -> file:delete(Log). + +wait_for_process_up(T) -> + wait_for_process_up(h_proc_name(),T). + +wait_for_process_up(Name,T) -> + N = (T div 500) + 1, + wait_for_process_up1(Name,N). + +wait_for_process_up1(Name,0) -> + error; +wait_for_process_up1(Name,N) -> + timer:sleep(500), + case whereis(Name) of + Pid when is_pid(Pid) -> + %% ct:pal("Process ~p up (~p tries left)",[Name,N]), + {ok,Pid}; + undefined -> + %% ct:pal("Waiting for process ~p (~p tries left)",[Name,N]), + wait_for_process_up1(Name,N-1) + end. diff --git a/lib/kernel/test/logger_env_var_SUITE.erl b/lib/kernel/test/logger_env_var_SUITE.erl index 6e182d4bca..04a4364947 100644 --- a/lib/kernel/test/logger_env_var_SUITE.erl +++ b/lib/kernel/test/logger_env_var_SUITE.erl @@ -214,6 +214,7 @@ logger_file(Config) -> file,% dest 0),% progress in std logger + notice = maps:get(level,P), #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), all = maps:get(level,StdC), StdFilters = maps:get(filters,StdC), @@ -525,12 +526,12 @@ logger_many_handlers_default_last_broken_filter(Config) -> {logger_level,info}], LogErr, LogInfo, 7). logger_many_handlers(Config, Env, LogErr, LogInfo, NumProgress) -> - {ok,#{handlers:=Hs},Node} = setup(Config,Env), + {ok,_,Node} = setup(Config,Env), check_single_log(Node,LogErr, file,% dest 0,% progress in std logger error), % level - ok = rpc:call(Node,logger_std_h,sync,[info]), + ok = rpc:call(Node,logger_std_h,filesync,[info]), {ok, Bin} = file:read_file(LogInfo), ct:log("Log content:~n~s",[Bin]), match(Bin,<<"info:">>,NumProgress,info,info), diff --git a/lib/kernel/test/logger_formatter_SUITE.erl b/lib/kernel/test/logger_formatter_SUITE.erl index 864a40b618..8fe8d5199b 100644 --- a/lib/kernel/test/logger_formatter_SUITE.erl +++ b/lib/kernel/test/logger_formatter_SUITE.erl @@ -208,7 +208,7 @@ template(_Config) -> time=>Time, tuple=>{1,atom,"list"}, nested=>#{subkey=>subvalue}}, - Template6 = lists:join(";",maps:keys(maps:remove(nested,Meta6)) ++ + Template6 = lists:join(";",lists:sort(maps:keys(maps:remove(nested,Meta6))) ++ [[nested,subkey]]), String6 = format(info,{"~p",[term]},Meta6,#{template=>Template6, single_line=>true}), diff --git a/lib/kernel/test/logger_legacy_SUITE.erl b/lib/kernel/test/logger_legacy_SUITE.erl index de73b6152a..c3cab07d81 100644 --- a/lib/kernel/test/logger_legacy_SUITE.erl +++ b/lib/kernel/test/logger_legacy_SUITE.erl @@ -20,6 +20,8 @@ -module(logger_legacy_SUITE). -compile(export_all). +-compile({nowarn_deprecated_function,[{gen_fsm,start,3}, + {gen_fsm,send_all_state_event,2}]}). -include_lib("common_test/include/ct.hrl"). -include_lib("kernel/include/logger.hrl"). @@ -89,6 +91,7 @@ init_per_group(_Group, Config) -> end_per_group(sasl, Config) -> Apps = ?config(stop_apps,Config), [application:stop(App) || App <- Apps], + ok = logger:set_primary_config(level,notice), ok; end_per_group(_Group, _Config) -> ok. @@ -122,7 +125,7 @@ all() -> gen_server(_Config) -> {ok,Pid} = gen_server:start(?MODULE,gen_server,[]), - Msg = fun() -> a=b end, + Msg = fun() -> erlang:error({badmatch,b}) end, Pid ! Msg, ?check({warning_msg,"** Undefined handle_info in ~p"++_,[?MODULE,Msg]}), ok = gen_server:cast(Pid,Msg), @@ -132,7 +135,7 @@ gen_server(_Config) -> gen_event(_Config) -> {ok,Pid} = gen_event:start(), ok = gen_event:add_handler(Pid,?MODULE,gen_event), - Msg = fun() -> a=b end, + Msg = fun() -> erlang:error({badmatch,b}) end, Pid ! Msg, ?check({warning_msg,"** Undefined handle_info in ~tp"++_,[?MODULE,Msg]}), gen_event:notify(Pid,Msg), @@ -141,7 +144,7 @@ gen_event(_Config) -> gen_fsm(_Config) -> {ok,Pid} = gen_fsm:start(?MODULE,gen_fsm,[]), - Msg = fun() -> a=b end, + Msg = fun() -> erlang:error({badmatch,b}) end, Pid ! Msg, ?check({warning_msg,"** Undefined handle_info in ~p"++_,[?MODULE,Msg]}), gen_fsm:send_all_state_event(Pid,Msg), @@ -150,7 +153,7 @@ gen_fsm(_Config) -> gen_statem(_Config) -> {ok,Pid} = gen_statem:start(?MODULE,gen_statem,[]), - Msg = fun() -> a=b end, + Msg = fun() -> erlang:error({badmatch,b}) end, Pid ! Msg, ?check({error,"** State machine ~tp terminating"++_, [Pid,{info,Msg},{mystate,gen_statem},error,{badmatch,b}|_]}). @@ -179,7 +182,7 @@ sasl_reports(Config) -> ok = gen_server:cast(ChPid, fun() -> spawn_link(fun() -> receive x->ok end end) end), - Msg = fun() -> a=b end, + Msg = fun() -> erlang:error({badmatch,b}) end, ok = gen_server:cast(ChPid,Msg), ?check_no_flush({error,"** Generic server ~tp terminating"++_, [ChPid,{'$gen_cast',Msg},gen_server,{{badmatch,b},_}]}), diff --git a/lib/kernel/test/logger_simple_h_SUITE.erl b/lib/kernel/test/logger_simple_h_SUITE.erl index e4e48b538a..79e5c057ad 100644 --- a/lib/kernel/test/logger_simple_h_SUITE.erl +++ b/lib/kernel/test/logger_simple_h_SUITE.erl @@ -107,15 +107,15 @@ start_stop(cleanup,_Config) -> replace_default(Config) -> {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]), - log(Node, emergency, [M1=?str]), - log(Node, alert, [M2=?str,[]]), - log(Node, error, [M3=?map_rep]), - log(Node, info, [M4=?keyval_rep]), - log(Node, info, [M41=?keyval_rep++[not_key_val]]), - rpc:call(Node, error_logger, error_report, [some_type,M5=?map_rep]), - rpc:call(Node, error_logger, warning_report, ["some_type",M6=?map_rep]), - log(Node, critical, [M7=?str,[A7=?keyval_rep]]), - log(Node, notice, [M8=["fake",string,"line:",?LINE]]), + log(Node, emergency, [?str]), + log(Node, alert, [?str,[]]), + log(Node, error, [?map_rep]), + log(Node, info, [?keyval_rep]), + log(Node, info, [?keyval_rep++[not_key_val]]), + rpc:call(Node, error_logger, error_report, [some_type,?map_rep]), + rpc:call(Node, error_logger, warning_report, ["some_type",?map_rep]), + log(Node, critical, [?str,[?keyval_rep]]), + log(Node, notice, [["fake",string,"line:",?LINE]]), Env = rpc:call(Node, application, get_env, [kernel, logger, []]), ok = rpc:call(Node, logger, add_handlers, [Env]), @@ -127,11 +127,11 @@ replace_file(Config) -> {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]), log(Node, emergency, [M1=?str]), log(Node, alert, [M2=?str,[]]), - log(Node, error, [M3=?map_rep]), - log(Node, warning, [M4=?keyval_rep]), - log(Node, warning, [M41=?keyval_rep++[not_key_val]]), - log(Node, critical, [M7=?str,[A7=?keyval_rep]]), - log(Node, notice, [M8=["fake",string,"line:",?LINE]]), + log(Node, error, [?map_rep]), + log(Node, warning, [?keyval_rep]), + log(Node, warning, [?keyval_rep++[not_key_val]]), + log(Node, critical, [?str,[?keyval_rep]]), + log(Node, notice, [["fake",string,"line:",?LINE]]), File = filename:join(proplists:get_value(priv_dir,Config), atom_to_list(?FUNCTION_NAME)++".log"), @@ -171,11 +171,11 @@ replace_disk_log(Config) -> {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]), log(Node, emergency, [M1=?str]), log(Node, alert, [M2=?str,[]]), - log(Node, error, [M3=?map_rep]), - log(Node, warning, [M4=?keyval_rep]), - log(Node, warning, [M41=?keyval_rep++[not_key_val]]), - log(Node, critical, [M7=?str,[A7=?keyval_rep]]), - log(Node, notice, [M8=["fake",string,"line:",?LINE]]), + log(Node, error, [?map_rep]), + log(Node, warning, [?keyval_rep]), + log(Node, warning, [?keyval_rep++[not_key_val]]), + log(Node, critical, [?str,[?keyval_rep]]), + log(Node, notice, [["fake",string,"line:",?LINE]]), File = filename:join(proplists:get_value(priv_dir,Config), atom_to_list(?FUNCTION_NAME)++".log"), diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl index 36b123b07d..ca54458ac1 100644 --- a/lib/kernel/test/logger_std_h_SUITE.erl +++ b/lib/kernel/test/logger_std_h_SUITE.erl @@ -45,10 +45,6 @@ -define(bin(Msg), list_to_binary(Msg++"\n")). -define(domain,#{domain=>[?MODULE]}). --define(FILESYNC_REP_INT, if is_atom(?FILESYNC_REPEAT_INTERVAL) -> 5500; - true -> ?FILESYNC_REPEAT_INTERVAL + 500 - end). - suite() -> [{timetrap,{seconds,30}}, {ct_hooks,[logger_test_lib]}]. @@ -73,12 +69,25 @@ end_per_group(_Group, _Config) -> init_per_testcase(TestHooksCase, Config) when TestHooksCase == write_failure; TestHooksCase == sync_failure -> - if ?TEST_HOOKS_TAB == undefined -> + case (fun() -> ?TEST_HOOKS_TAB == undefined end)() of + true -> {skip,"Define the TEST_HOOKS macro to run this test"}; - true -> + false -> ct:print("********** ~w **********", [TestHooksCase]), Config end; +init_per_testcase(OPCase, Config) when + OPCase == qlen_kill_new; + OPCase == restart_after -> + case re:run(erlang:system_info(system_version), + "dirty-schedulers-TEST", + [{capture,none}]) of + match -> + {skip,"Overload protection test skipped on dirty-schedulers-TEST"}; + nomatch -> + ct:print("********** ~w **********", [OPCase]), + Config + end; init_per_testcase(TestCase, Config) -> ct:print("********** ~w **********", [TestCase]), Config. @@ -175,13 +184,13 @@ add_remove_instance_file(Log, Type) -> logger:notice(M1=?msg,?domain), ?check(M1), B1 = ?bin(M1), - try_read_file(Log, {ok,B1}, ?FILESYNC_REP_INT), + try_read_file(Log, {ok,B1}, filesync_rep_int()), ok = logger:remove_handler(?MODULE), timer:sleep(500), undefined = whereis(h_proc_name()), logger:notice(?msg,?domain), ?check_no_log, - try_read_file(Log, {ok,B1}, ?FILESYNC_REP_INT), + try_read_file(Log, {ok,B1}, filesync_rep_int()), ok. default_formatter(_Config) -> @@ -214,11 +223,17 @@ errors(Config) -> logger:add_handler(?MODULE,logger_std_h, #{config => #{type => faulty_type}}), - NoDir = lists:concat(["/",?MODULE,"_dir"]), - {error, - {handler_not_added,{{open_failed,NoDir,eacces},_}}} = - logger:add_handler(myh2,logger_std_h, - #{config=>#{type=>{file,NoDir}}}), + case os:type() of + {win32,_} -> + %% No use in testing file access on windows + ok; + _ -> + NoDir = lists:concat(["/",?MODULE,"_dir"]), + {error, + {handler_not_added,{{open_failed,NoDir,eacces},_}}} = + logger:add_handler(myh2,logger_std_h, + #{config=>#{type=>{file,NoDir}}}) + end, {error, {handler_not_added,{{open_failed,Log,_},_}}} = @@ -393,7 +408,7 @@ crash_std_h(cleanup) -> [test_server:stop_node(Node) || Node <- Nodes]. sync_and_read(Node,disk_log,Log) -> - rpc:call(Node,logger_disk_log_h,sync,[?STANDARD_HANDLER]), + rpc:call(Node,logger_disk_log_h,filesync,[?STANDARD_HANDLER]), case file:read_file(Log ++ ".1") of {ok,<<>>} -> timer:sleep(5000), @@ -402,7 +417,7 @@ sync_and_read(Node,disk_log,Log) -> Ok end; sync_and_read(Node,file,Log) -> - rpc:call(Node,logger_std_h,sync,[?STANDARD_HANDLER]), + rpc:call(Node,logger_std_h,filesync,[?STANDARD_HANDLER]), case file:read_file(Log) of {ok,<<>>} -> timer:sleep(5000), @@ -412,7 +427,7 @@ sync_and_read(Node,file,Log) -> end. bad_input(_Config) -> - {error,{badarg,{sync,["BadType"]}}} = logger_std_h:sync("BadType"), + {error,{badarg,{filesync,["BadType"]}}} = logger_std_h:filesync("BadType"), {error,{badarg,{info,["BadType"]}}} = logger_std_h:info("BadType"), {error,{badarg,{reset,["BadType"]}}} = logger_std_h:reset("BadType"). @@ -455,7 +470,7 @@ reconfig(Config) -> overload_kill_enable => true, overload_kill_qlen => 100000, overload_kill_mem_size => 10000000, - overload_kill_restart_after => never, + overload_kill_restart_after => infinity, filesync_repeat_interval => no_repeat}), #{id := ?MODULE, type := standard_io, @@ -469,7 +484,7 @@ reconfig(Config) -> overload_kill_enable := true, overload_kill_qlen := 100000, overload_kill_mem_size := 10000000, - overload_kill_restart_after := never, + overload_kill_restart_after := infinity, filesync_repeat_interval := no_repeat} = logger_std_h:info(?MODULE), ok. @@ -499,7 +514,7 @@ file_opts(Config) -> logger:notice(M1=?msg,?domain), ?check(M1), B1 = ?bin(M1), - try_read_file(Log, {ok,B1}, ?FILESYNC_REP_INT), + try_read_file(Log, {ok,B1}, filesync_rep_int()), ok. file_opts(cleanup, _Config) -> logger:remove_handler(?MODULE). @@ -526,7 +541,7 @@ sync(Config) -> logger:notice("first", ?domain), %% wait for automatic filesync - check_tracer(?FILESYNC_REP_INT*2), + check_tracer(filesync_rep_int()*2), %% check that explicit filesync is only done once start_tracer([{logger_std_h, write_to_dev, 5}, @@ -539,9 +554,9 @@ sync(Config) -> ]), logger:notice("second", ?domain), %% do explicit sync - logger_std_h:sync(?MODULE), + logger_std_h:filesync(?MODULE), %% a second sync should be ignored - logger_std_h:sync(?MODULE), + logger_std_h:filesync(?MODULE), check_tracer(100), %% check that if there's no repeated filesync active, @@ -600,9 +615,9 @@ write_failure(Config) -> rpc:call(Node, ?MODULE, set_result, [file_write,ok]), ok = log_on_remote_node(Node, "Logged1"), - rpc:call(Node, logger_std_h, sync, [?STANDARD_HANDLER]), + rpc:call(Node, logger_std_h, filesync, [?STANDARD_HANDLER]), ?check_no_log, - try_read_file(Log, {ok,<<"Logged1\n">>}, ?FILESYNC_REP_INT), + try_read_file(Log, {ok,<<"Logged1\n">>}, filesync_rep_int()), rpc:call(Node, ?MODULE, set_result, [file_write,{error,terminated}]), ok = log_on_remote_node(Node, "Cause simple error printout"), @@ -618,9 +633,9 @@ write_failure(Config) -> rpc:call(Node, ?MODULE, set_result, [file_write,ok]), ok = log_on_remote_node(Node, "Logged2"), - rpc:call(Node, logger_std_h, sync, [?STANDARD_HANDLER]), + rpc:call(Node, logger_std_h, filesync, [?STANDARD_HANDLER]), ?check_no_log, - try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, ?FILESYNC_REP_INT), + try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, filesync_rep_int()), ok. write_failure(cleanup, _Config) -> Nodes = nodes(), @@ -649,14 +664,14 @@ sync_failure(Config) -> rpc:call(Node, ?MODULE, set_result, [file_datasync,{error,terminated}]), ok = log_on_remote_node(Node, "Cause simple error printout"), - ?check({error,{?STANDARD_HANDLER,sync,Log,{error,terminated}}}), + ?check({error,{?STANDARD_HANDLER,filesync,Log,{error,terminated}}}), ok = log_on_remote_node(Node, "No second error printout"), ?check_no_log, rpc:call(Node, ?MODULE, set_result, [file_datasync,{error,eacces}]), ok = log_on_remote_node(Node, "Cause simple error printout"), - ?check({error,{?STANDARD_HANDLER,sync,Log,{error,eacces}}}), + ?check({error,{?STANDARD_HANDLER,filesync,Log,{error,eacces}}}), rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]), ok = log_on_remote_node(Node, "Logged2"), @@ -677,10 +692,10 @@ start_std_h_on_new_node(Config, Log) -> Node. %% functions for test hook macros to be called by rpc -set_internal_log(Mod, Func) -> - ?set_internal_log({Mod,Func}). -set_result(Op, Result) -> - ?set_result(Op, Result). +set_internal_log(_Mod, _Func) -> + ?set_internal_log({_Mod,_Func}). +set_result(_Op, _Result) -> + ?set_result(_Op, _Result). set_defaults() -> ?set_defaults(). @@ -707,7 +722,6 @@ op_switch_to_sync_file(Config) -> %% TRecvPid = start_op_trace(), send_burst({n,NumOfReqs}, seq, {chars,79}, notice), Lines = count_lines(Log), - ok = file_delete(Log), %% true = analyse_trace(TRecvPid, %% fun(Events) -> find_mode(async,Events) end), %% true = analyse_trace(TRecvPid, @@ -720,6 +734,7 @@ op_switch_to_sync_file(Config) -> %% fun(Events) -> find_mode(flush,Events) end), %% stop_op_trace(TRecvPid), NumOfReqs = Lines, + ok = file_delete(Log), ok. op_switch_to_sync_file(cleanup, _Config) -> ok = stop_handler(?MODULE). @@ -764,11 +779,11 @@ op_switch_to_drop_file(Config) -> _ <- lists:seq(1, Bursts)], Logged = count_lines(Log), ok = stop_handler(?MODULE), - _ = file_delete(Log), ct:pal("Number of messages dropped = ~w (~w)", [Procs*NumOfReqs*Bursts-Logged,Procs*NumOfReqs*Bursts]), true = (Logged < (Procs*NumOfReqs*Bursts)), true = (Logged > 0), + _ = file_delete(Log), ok end, %% As it's tricky to get the timing right in only one go, we perform the @@ -832,11 +847,11 @@ op_switch_to_flush_file(Config) -> _ <- lists:seq(1,Bursts)], Logged = count_lines(Log), ok = stop_handler(?MODULE), - _ = file_delete(Log), ct:pal("Number of messages flushed/dropped = ~w (~w)", [NumOfReqs*Procs*Bursts-Logged,NumOfReqs*Procs*Bursts]), true = (Logged < (NumOfReqs*Procs*Bursts)), true = (Logged > 0), + _ = file_delete(Log), ok end, %% As it's tricky to get the timing right in only one go, we perform the @@ -885,8 +900,9 @@ limit_burst_disabled(Config) -> send_burst({n,NumOfReqs}, seq, {chars,79}, notice), Logged = count_lines(Log), ct:pal("Number of messages logged = ~w", [Logged]), + NumOfReqs = Logged, ok = file_delete(Log), - NumOfReqs = Logged. + ok. limit_burst_disabled(cleanup, _Config) -> ok = stop_handler(?MODULE). @@ -904,8 +920,9 @@ limit_burst_enabled_one(Config) -> send_burst({n,NumOfReqs}, seq, {chars,79}, notice), Logged = count_lines(Log), ct:pal("Number of messages logged = ~w", [Logged]), + ReqLimit = Logged, ok = file_delete(Log), - ReqLimit = Logged. + ok. limit_burst_enabled_one(cleanup, _Config) -> ok = stop_handler(?MODULE). @@ -926,9 +943,10 @@ limit_burst_enabled_period(Config) -> Logged = count_lines(Log), ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w", [Sent,Logged]), - ok = file_delete(Log), true = (Logged > (ReqLimit*Windows)) andalso - (Logged < (ReqLimit*(Windows+2))). + (Logged < (ReqLimit*(Windows+2))), + ok = file_delete(Log), + ok. limit_burst_enabled_period(cleanup, _Config) -> ok = stop_handler(?MODULE). @@ -950,7 +968,7 @@ kill_disabled(cleanup, _Config) -> ok = stop_handler(?MODULE). qlen_kill_new(Config) -> - {_Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), Pid0 = whereis(h_proc_name()), {_,Mem0} = process_info(Pid0, memory), RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER, @@ -962,7 +980,7 @@ qlen_kill_new(Config) -> ok = logger:set_handler_config(?MODULE, NewHConfig), MRef = erlang:monitor(process, Pid0), NumOfReqs = 100, - Procs = 2, + Procs = 4, send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice), %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice), receive @@ -973,8 +991,8 @@ qlen_kill_new(Config) -> killed -> ct:pal("Slow shutdown, handler process was killed!", []) end, - timer:sleep(RestartAfter + 2000), - true = is_pid(whereis(h_proc_name())), + file_delete(Log), + {ok,_} = wait_for_process_up(RestartAfter * 3), ok after 5000 -> @@ -1001,7 +1019,7 @@ qlen_kill_std(_Config) -> {skip,"Not done yet"}. mem_kill_new(Config) -> - {_Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), Pid0 = whereis(h_proc_name()), {_,Mem0} = process_info(Pid0, memory), RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER, @@ -1013,7 +1031,7 @@ mem_kill_new(Config) -> ok = logger:set_handler_config(?MODULE, NewHConfig), MRef = erlang:monitor(process, Pid0), NumOfReqs = 100, - Procs = 2, + Procs = 4, send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice), %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice), receive @@ -1024,8 +1042,8 @@ mem_kill_new(Config) -> killed -> ct:pal("Slow shutdown, handler process was killed!", []) end, - timer:sleep(RestartAfter * 3), - true = is_pid(whereis(h_proc_name())), + file_delete(Log), + {ok,_} = wait_for_process_up(RestartAfter * 3), ok after 5000 -> @@ -1041,23 +1059,27 @@ mem_kill_new(cleanup, _Config) -> mem_kill_std(_Config) -> {skip,"Not done yet"}. +restart_after() -> + [{timetrap,{minutes,2}}]. restart_after(Config) -> {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), NewHConfig1 = HConfig#{config=>StdHConfig#{overload_kill_enable=>true, overload_kill_qlen=>10, - overload_kill_restart_after=>never}}, + overload_kill_restart_after=>infinity}}, ok = logger:set_handler_config(?MODULE, NewHConfig1), MRef1 = erlang:monitor(process, whereis(h_proc_name())), %% kill handler - send_burst({n,100}, {spawn,2,0}, {chars,79}, notice), + send_burst({n,100}, {spawn,4,0}, {chars,79}, notice), receive - {'DOWN', MRef1, _, _, _Info1} -> - timer:sleep(?OVERLOAD_KILL_RESTART_AFTER + 1000), - undefined = whereis(h_proc_name()), + {'DOWN', MRef1, _, _, _Reason1} -> + file_delete(Log), + error = wait_for_process_up(?OVERLOAD_KILL_RESTART_AFTER * 3), ok after 5000 -> + Info1 = logger_std_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info1]), ct:fail("Handler not dead! It should not have survived this!") end, @@ -1071,16 +1093,17 @@ restart_after(Config) -> Pid0 = whereis(h_proc_name()), MRef2 = erlang:monitor(process, Pid0), %% kill handler - send_burst({n,100}, {spawn,2,0}, {chars,79}, notice), + send_burst({n,100}, {spawn,4,0}, {chars,79}, notice), receive - {'DOWN', MRef2, _, _, _Info2} -> - timer:sleep(RestartAfter + 2000), - Pid1 = whereis(h_proc_name()), - true = is_pid(Pid1), + {'DOWN', MRef2, _, _, _Reason2} -> + file_delete(Log), + {ok,Pid1} = wait_for_process_up(RestartAfter * 3), false = (Pid1 == Pid0), ok after 5000 -> + Info2 = logger_std_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info2]), ct:fail("Handler not dead! It should not have survived this!") end, ok. @@ -1101,7 +1124,7 @@ handler_requests_under_load(Config) -> flush_qlen => 2000, burst_limit_enable => false}}, ok = logger:set_handler_config(?MODULE, NewHConfig), - Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{sync,[]}, + Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]}, {info,[]}, {reset,[]}, {change_config,[]}]) @@ -1160,6 +1183,7 @@ start_handler(Name, FuncName, Config) -> Log = filename:join(Dir, lists:concat([FuncName,".log"])), ct:pal("Logging to ~tp", [Log]), Type = {file,Log}, + _ = file_delete(Log), ok = logger:add_handler(Name, logger_std_h, #{config => #{type => Type}, @@ -1184,10 +1208,10 @@ wait_until_written(File, Sz) -> {ok,#file_info{size = Sz}} -> timer:sleep(1000), case file:read_file_info(File) of - {ok,#file_info{size = Sz1}} -> + {ok,#file_info{size = Sz}} -> ok; - {ok,#file_info{size = Sz2}} -> - wait_until_written(File, Sz2) + {ok,#file_info{size = Sz1}} -> + wait_until_written(File, Sz1) end; {ok,#file_info{size = Sz1}} -> wait_until_written(File, Sz1) @@ -1230,12 +1254,9 @@ send_n_burst(N, seq, Text, Class) -> send_n_burst(N-1, seq, Text, Class); send_n_burst(N, {spawn,Ps,TO}, Text, Class) -> ct:pal("~w processes each sending ~w messages", [Ps,N]), - PerProc = fun() -> - send_n_burst(N, seq, Text, Class) - end, MRefs = [begin if TO == 0 -> ok; true -> timer:sleep(TO) end, - monitor(process,spawn_link(PerProc)) end || - _ <- lists:seq(1,Ps)], + monitor(process,spawn_link(per_proc_fun(N,Text,Class,X))) + end || X <- lists:seq(1,Ps)], lists:foreach(fun(MRef) -> receive {'DOWN', MRef, _, _, _} -> @@ -1254,6 +1275,16 @@ send_t_burst(T0, T, Text, Class, N) -> send_t_burst(T0, T, Text, Class, N+1) end. +per_proc_fun(N,Text,Class,X) when X rem 2 == 0 -> + fun() -> + process_flag(priority,high), + send_n_burst(N, seq, Text, Class) + end; +per_proc_fun(N,Text,Class,_) -> + fun() -> + send_n_burst(N, seq, Text, Class) + end. + %%%----------------------------------------------------------------- %%% Formatter callback %%% Using this to send the formatted string back to the test case @@ -1300,7 +1331,7 @@ add_remove_instance_nofile(Type) -> logger:notice(M1=?msg,?domain), ?check(M1), %% check that sync doesn't do damage even if not relevant - ok = logger_std_h:sync(?MODULE), + ok = logger_std_h:filesync(?MODULE), ok = logger:remove_handler(?MODULE), timer:sleep(500), undefined = whereis(h_proc_name()), @@ -1541,3 +1572,29 @@ h_proc_name(Name) -> file_delete(Log) -> file:delete(Log). + +wait_for_process_up(T) -> + wait_for_process_up(h_proc_name(),T). + +wait_for_process_up(Name,T) -> + N = (T div 500) + 1, + wait_for_process_up1(Name,N). + +wait_for_process_up1(_Name,0) -> + error; +wait_for_process_up1(Name,N) -> + timer:sleep(500), + case whereis(Name) of + Pid when is_pid(Pid) -> + %% ct:pal("Process ~p up (~p tries left)",[Name,N]), + {ok,Pid}; + undefined -> + %% ct:pal("Waiting for process ~p (~p tries left)",[Name,N]), + wait_for_process_up1(Name,N-1) + end. + +filesync_rep_int() -> + case (fun() -> is_atom(?FILESYNC_REPEAT_INTERVAL) end)() of + true -> 5500; + false -> ?FILESYNC_REPEAT_INTERVAL + 500 + end. diff --git a/lib/kernel/test/logger_test_lib.erl b/lib/kernel/test/logger_test_lib.erl index 9097453c10..81eb9ce5eb 100644 --- a/lib/kernel/test/logger_test_lib.erl +++ b/lib/kernel/test/logger_test_lib.erl @@ -52,10 +52,10 @@ log(Node, M, F, A) -> rpc:call(Node, M, F, A ++ [MD]). sync_and_read(Node,disk_log,Log) -> - rpc:call(Node,logger_disk_log_h,sync,[?STANDARD_HANDLER]), + rpc:call(Node,logger_disk_log_h,filesync,[?STANDARD_HANDLER]), file:read_file(Log ++ ".1"); sync_and_read(Node, file,Log) -> - ok = rpc:call(Node,logger_std_h,sync,[?STANDARD_HANDLER]), + ok = rpc:call(Node,logger_std_h,filesync,[?STANDARD_HANDLER]), file:read_file(Log). diff --git a/lib/os_mon/doc/src/os_mon_mib.xml b/lib/os_mon/doc/src/os_mon_mib.xml index dcf3649876..217d1e4804 100644 --- a/lib/os_mon/doc/src/os_mon_mib.xml +++ b/lib/os_mon/doc/src/os_mon_mib.xml @@ -35,7 +35,10 @@ <p>Functions for loading and unloading the OTP-OS-MON-MIB into/from an SNMP agent. The instrumentation of the OTP-OS-MON-MIB uses Mnesia, hence Mnesia must be started prior to loading - the OTP-OS-MON-MIB.</p> + the OTP-OS-MON-MIB.</p> + <warning> + <p>This module has been deprecated and will be removed in a furture release.</p> + </warning> </description> <funcs> <func> diff --git a/lib/otp_mibs/doc/src/otp_mib.xml b/lib/otp_mibs/doc/src/otp_mib.xml index 681ce9fa2d..adf3b00c7b 100644 --- a/lib/otp_mibs/doc/src/otp_mib.xml +++ b/lib/otp_mibs/doc/src/otp_mib.xml @@ -34,7 +34,10 @@ <p>The SNMP application should be used to start an SNMP agent. Then the API functions below can be used to load/unload the OTP-MIB into/from the agent. The instrumentation of the OTP-MIB uses - Mnesia, hence Mnesia must be started prior to loading the OTP-MIB.</p> + Mnesia, hence Mnesia must be started prior to loading the OTP-MIB.</p> + <warning> + <p>This application has been deprecated and will be removed in a furture release.</p> + </warning> </description> <funcs> <func> diff --git a/lib/otp_mibs/src/otp_mib.erl b/lib/otp_mibs/src/otp_mib.erl index 1431818be4..d18afd9bbd 100644 --- a/lib/otp_mibs/src/otp_mib.erl +++ b/lib/otp_mibs/src/otp_mib.erl @@ -36,6 +36,9 @@ %% Exported for internal use via rpc -export([get_erl_node/1, get_appls/1]). +-deprecated([{load,1,eventually}, + {unload,1,eventually}]). + %% Shadow tables -record(erlNodeTable, {erlNodeId, erlNodeName, erlNodeMachine, erlNodeVersion, diff --git a/lib/snmp/doc/src/notes.xml b/lib/snmp/doc/src/notes.xml index 8d48cb911d..f64e0cca97 100644 --- a/lib/snmp/doc/src/notes.xml +++ b/lib/snmp/doc/src/notes.xml @@ -34,7 +34,23 @@ </header> - <section><title>SNMP 5.2.10</title> + <section><title>SNMP 5.2.11</title> + + <section><title>Fixed Bugs and Malfunctions</title> + <list> + <item> + <p> + The Snmp MIB compiler now allows using a + TEXTUAL-CONVENTION type before defining it.</p> + <p> + Own Id: OTP-14196 Aux Id: ERIERL-161 </p> + </item> + </list> + </section> + +</section> + +<section><title>SNMP 5.2.10</title> <section><title>Fixed Bugs and Malfunctions</title> <list> diff --git a/lib/snmp/vsn.mk b/lib/snmp/vsn.mk index 2c97683625..96123f02f5 100644 --- a/lib/snmp/vsn.mk +++ b/lib/snmp/vsn.mk @@ -19,6 +19,6 @@ # %CopyrightEnd% APPLICATION = snmp -SNMP_VSN = 5.2.10 +SNMP_VSN = 5.2.11 PRE_VSN = APP_VSN = "$(APPLICATION)-$(SNMP_VSN)$(PRE_VSN)" diff --git a/lib/ssl/doc/src/ssl.xml b/lib/ssl/doc/src/ssl.xml index adf4fb9ba4..19436b2100 100644 --- a/lib/ssl/doc/src/ssl.xml +++ b/lib/ssl/doc/src/ssl.xml @@ -896,16 +896,7 @@ fun(srp, Username :: string(), UserState :: term()) -> <v>Type = erlang | openssl | all</v> </type> <desc> - <p>Returns a list of supported cipher suites. - This function will become deprecated in OTP 21, and replaced - by <seealso marker="#cipher_suites-2">ssl:cipher-suites/2</seealso> - <c>cipher_suites()</c> is equivalent to <c>cipher_suites(erlang).</c> - Type <c>openssl</c> is provided for backwards compatibility with the - old SSL, which used OpenSSL. <c>cipher_suites(all)</c> returns - all available cipher suites. The cipher suites not present - in <c>cipher_suites(erlang)</c> but included in - <c>cipher_suites(all)</c> are not used unless explicitly configured - by the user.</p> + <p>Deprecated in OTP 21, use <seealso marker="#cipher_suites-2">ssl:cipher_suites/2</seealso> instead.</p> </desc> </func> @@ -948,7 +939,7 @@ fun(srp, Username :: string(), UserState :: term()) -> <func> <name>connect(Socket, SslOptions) -> </name> - <name>connect(Socket, SslOptions, Timeout) -> {ok, TLSSocket} | {ok, TLSSocket, Ext} + <name>connect(Socket, SslOptions, Timeout) -> {ok, SslSocket} | {ok, SslSocket, Ext} | {error, Reason}</name> <fsummary>Upgrades a <c>gen_tcp</c>, or equivalent, connected socket to an TLS socket.</fsummary> @@ -956,7 +947,7 @@ fun(srp, Username :: string(), UserState :: term()) -> <v>Socket = socket()</v> <v>SslOptions = [{handshake, hello| full} | ssl_option()]</v> <v>Timeout = integer() | infinity</v> - <v>TLSSocket = sslsocket()</v> + <v>SslSocket = sslsocket()</v> <v>Ext = hello_extensions()</v> <v>Reason = term()</v> </type> @@ -973,8 +964,8 @@ fun(srp, Username :: string(), UserState :: term()) -> <p> If the option <c>{handshake, hello}</c> is used the handshake is paused after receiving the server hello message - and the success response is <c>{ok, TLSSocket, Ext}</c> - instead of <c>{ok, TLSSocket}</c>. Thereafter the handshake is continued or + and the success response is <c>{ok, SslSocket, Ext}</c> + instead of <c>{ok, SslSocket}</c>. Thereafter the handshake is continued or canceled by calling <seealso marker="#handshake_continue-3"> <c>handshake_continue/3</c></seealso> or <seealso marker="#handshake_cancel-1"><c>handshake_cancel/1</c></seealso>. @@ -986,7 +977,7 @@ fun(srp, Username :: string(), UserState :: term()) -> <func> <name>connect(Host, Port, Options) -></name> <name>connect(Host, Port, Options, Timeout) -> - {ok, SslSocket}| {ok, TLSSocket, Ext} | {error, Reason}</name> + {ok, SslSocket}| {ok, SslSocket, Ext} | {error, Reason}</name> <fsummary>Opens an TLS/DTLS connection to <c>Host</c>, <c>Port</c>.</fsummary> <type> <v>Host = host()</v> @@ -1017,8 +1008,8 @@ fun(srp, Username :: string(), UserState :: term()) -> <p> If the option <c>{handshake, hello}</c> is used the handshake is paused after receiving the server hello message - and the success response is <c>{ok, TLSSocket, Ext}</c> - instead of <c>{ok, TLSSocket}</c>. Thereafter the handshake is continued or + and the success response is <c>{ok, SslSocket, Ext}</c> + instead of <c>{ok, SslSocket}</c>. Thereafter the handshake is continued or canceled by calling <seealso marker="#handshake_continue-3"> <c>handshake_continue/3</c></seealso> or <seealso marker="#handshake_cancel-1"><c>handshake_cancel/1</c></seealso>. @@ -1074,6 +1065,7 @@ fun(srp, Username :: string(), UserState :: term()) -> <fsummary>Returns all the connection information. </fsummary> <type> + <v>SslSocket = sslsocket()</v> <v>Item = protocol | cipher_suite | sni_hostname | ecc | session_id | atom()</v> <d>Meaningful atoms, not specified above, are the ssl option names.</d> <v>Result = [{Item::atom(), Value::term()}]</v> @@ -1091,6 +1083,7 @@ fun(srp, Username :: string(), UserState :: term()) -> <fsummary>Returns the requested connection information. </fsummary> <type> + <v>SslSocket = sslsocket()</v> <v>Items = [Item]</v> <v>Item = protocol | cipher_suite | sni_hostname | ecc | session_id | client_random | server_random | master_secret | atom()</v> @@ -1133,7 +1126,7 @@ fun(srp, Username :: string(), UserState :: term()) -> </func> <func> - <name>getopts(Socket, OptionNames) -> + <name>getopts(SslSocket, OptionNames) -> {ok, [socketoption()]} | {error, Reason}</name> <fsummary>Gets the values of the specified options.</fsummary> <type> @@ -1147,13 +1140,13 @@ fun(srp, Username :: string(), UserState :: term()) -> </func> <func> - <name>getstat(Socket) -> + <name>getstat(SslSocket) -> {ok, OptionValues} | {error, inet:posix()}</name> - <name>getstat(Socket, OptionNames) -> + <name>getstat(SslSocket, OptionNames) -> {ok, OptionValues} | {error, inet:posix()}</name> <fsummary>Get one or more statistic options for a socket</fsummary> <type> - <v>Socket = sslsocket()</v> + <v>SslSocket = sslsocket()</v> <v>OptionNames = [atom()]</v> <v>OptionValues = [{inet:stat_option(), integer()}]</v> </type> @@ -1164,28 +1157,27 @@ fun(srp, Username :: string(), UserState :: term()) -> </func> <func> - <name>handshake(Socket) -> </name> - <name>handshake(Socket, Timeout) -> {ok, Socket} | {error, Reason}</name> + <name>handshake(HsSocket) -> </name> + <name>handshake(HsSocket, Timeout) -> {ok, SslSocket} | {error, Reason}</name> <fsummary>Performs server-side SSL/TLS handshake.</fsummary> <type> - <v>Socket = sslsocket()</v> + <v>HsSocket = SslSocket = sslsocket()</v> <v>Timeout = integer()</v> <v>Reason = term()</v> </type> <desc> <p>Performs the SSL/TLS/DTLS server-side handshake.</p> - <p><c>Socket</c> is a socket as returned by - <seealso marker="#transport_accept-2">ssl:transport_accept/[1,2]</seealso>. - </p> + <p>Returns a new TLS/DTLS socket if the handshake is successful.</p> </desc> </func> <func> <name>handshake(Socket, SslOptions) -> </name> - <name>handshake(Socket, SslOptions, Timeout) -> {ok, Socket} | {ok, Socket, Ext} | {error, Reason}</name> + <name>handshake(Socket, SslOptions, Timeout) -> {ok, SslSocket} | {ok, SslSocket, Ext} | {error, Reason}</name> <fsummary>Performs server-side SSL/TLS/DTLS handshake.</fsummary> <type> <v>Socket = socket() | sslsocket() </v> + <v>SslSocket = sslsocket() </v> <v>Ext = hello_extensions()</v> <v>SslOptions = [{handshake, hello| full} | ssl_option()]</v> <v>Timeout = integer()</v> @@ -1194,22 +1186,23 @@ fun(srp, Username :: string(), UserState :: term()) -> <desc> <p>If <c>Socket</c> is a ordinary <c>socket()</c>: upgrades a <c>gen_tcp</c>, or equivalent, socket to an SSL socket, that is, performs - the SSL/TLS server-side handshake and returns the SSL socket.</p> + the SSL/TLS server-side handshake and returns a TLS socket.</p> - <warning><p>The Socket shall be in passive mode ({active, - false}) before calling this function or the handshake can fail - due to a race condition.</p></warning> + <warning><p>The <c>Socket</c> shall be in passive mode ({active, + false}) before calling this function or else the behavior of this function + is undefined. + </p></warning> <p>If <c>Socket</c> is an <c>sslsocket()</c>: provides extra SSL/TLS/DTLS options to those specified in <seealso marker="#listen-2">ssl:listen/2 </seealso> and then performs - the SSL/TLS/DTLS handshake.</p> - + the SSL/TLS/DTLS handshake. Returns a new TLS/DTLS socket if the handshake is successful.</p> + <p> If option <c>{handshake, hello}</c> is specified the handshake is paused after receiving the client hello message and the - sucess response is <c>{ok, TLSSocket, Ext}</c> instead of <c>{ok, - TLSSocket}</c>. Thereafter the handshake is continued or + success response is <c>{ok, SslSocket, Ext}</c> instead of <c>{ok, + SslSocket}</c>. Thereafter the handshake is continued or canceled by calling <seealso marker="#handshake_continue-3"> <c>handshake_continue/3</c></seealso> or <seealso marker="#handshake_cancel-1"><c>handshake_cancel/1</c></seealso>. @@ -1218,10 +1211,10 @@ fun(srp, Username :: string(), UserState :: term()) -> </func> <func> - <name>handshake_cancel(Socket) -> ok </name> + <name>handshake_cancel(SslSocket) -> ok </name> <fsummary>Cancel handshake with a fatal alert</fsummary> <type> - <v>Socket = sslsocket()</v> + <v>SslSocket = sslsocket()</v> </type> <desc> <p>Cancel the handshake with a fatal <c>USER_CANCELED</c> alert.</p> @@ -1229,10 +1222,11 @@ fun(srp, Username :: string(), UserState :: term()) -> </func> <func> - <name>handshake_continue(Socket, SSLOptions, Timeout) -> {ok, Socket} | {error, Reason}</name> + <name>handshake_continue(HsSocket, SSLOptions) -> {ok, SslSocket} | {error, Reason}</name> + <name>handshake_continue(HsSocket, SSLOptions, Timeout) -> {ok, SslSocket} | {error, Reason}</name> <fsummary>Continue the SSL/TLS handshake.</fsummary> <type> - <v>Socket = sslsocket()</v> + <v>HsSocket = SslSocket = sslsocket()</v> <v>SslOptions = [ssl_option()]</v> <v>Timeout = integer()</v> <v>Reason = term()</v> @@ -1257,10 +1251,10 @@ fun(srp, Username :: string(), UserState :: term()) -> </func> <func> - <name>negotiated_protocol(Socket) -> {ok, Protocol} | {error, protocol_not_negotiated}</name> + <name>negotiated_protocol(SslSocket) -> {ok, Protocol} | {error, protocol_not_negotiated}</name> <fsummary>Returns the protocol negotiated through ALPN or NPN extensions.</fsummary> <type> - <v>Socket = sslsocket()</v> + <v>SslSocket = sslsocket()</v> <v>Protocol = binary()</v> </type> <desc> @@ -1271,10 +1265,10 @@ fun(srp, Username :: string(), UserState :: term()) -> </func> <func> - <name>peercert(Socket) -> {ok, Cert} | {error, Reason}</name> + <name>peercert(SslSocket) -> {ok, Cert} | {error, Reason}</name> <fsummary>Returns the peer certificate.</fsummary> <type> - <v>Socket = sslsocket()</v> + <v>SslSocket = sslsocket()</v> <v>Cert = binary()</v> </type> <desc> @@ -1285,11 +1279,11 @@ fun(srp, Username :: string(), UserState :: term()) -> </func> <func> - <name>peername(Socket) -> {ok, {Address, Port}} | + <name>peername(SslSocket) -> {ok, {Address, Port}} | {error, Reason}</name> <fsummary>Returns the peer address and port.</fsummary> <type> - <v>Socket = sslsocket()</v> + <v>SslSocket = sslsocket()</v> <v>Address = ipaddress()</v> <v>Port = integer()</v> </type> @@ -1335,12 +1329,12 @@ fun(srp, Username :: string(), UserState :: term()) -> </func> <func> - <name>recv(Socket, Length) -> </name> - <name>recv(Socket, Length, Timeout) -> {ok, Data} | {error, + <name>recv(SslSocket, Length) -> </name> + <name>recv(SslSocket, Length, Timeout) -> {ok, Data} | {error, Reason}</name> <fsummary>Receives data on a socket.</fsummary> <type> - <v>Socket = sslsocket()</v> + <v>SslSocket = sslsocket()</v> <v>Length = integer()</v> <v>Timeout = integer()</v> <v>Data = [char()] | binary()</v> @@ -1362,10 +1356,10 @@ fun(srp, Username :: string(), UserState :: term()) -> </func> <func> - <name>renegotiate(Socket) -> ok | {error, Reason}</name> + <name>renegotiate(SslSocket) -> ok | {error, Reason}</name> <fsummary>Initiates a new handshake.</fsummary> <type> - <v>Socket = sslsocket()</v> + <v>SslSocket = sslsocket()</v> </type> <desc><p>Initiates a new handshake. A notable return value is <c>{error, renegotiation_rejected}</c> indicating that the peer @@ -1375,10 +1369,10 @@ fun(srp, Username :: string(), UserState :: term()) -> </func> <func> - <name>send(Socket, Data) -> ok | {error, Reason}</name> + <name>send(SslSocket, Data) -> ok | {error, Reason}</name> <fsummary>Writes data to a socket.</fsummary> <type> - <v>Socket = sslsocket()</v> + <v>SslSocket = sslsocket()</v> <v>Data = iodata()</v> </type> <desc> @@ -1389,10 +1383,10 @@ fun(srp, Username :: string(), UserState :: term()) -> </func> <func> - <name>setopts(Socket, Options) -> ok | {error, Reason}</name> + <name>setopts(SslSocket, Options) -> ok | {error, Reason}</name> <fsummary>Sets socket options.</fsummary> <type> - <v>Socket = sslsocket()</v> + <v>SslSocket = sslsocket()</v> <v>Options = [socketoption]()</v> </type> <desc> @@ -1402,10 +1396,10 @@ fun(srp, Username :: string(), UserState :: term()) -> </func> <func> - <name>shutdown(Socket, How) -> ok | {error, Reason}</name> + <name>shutdown(SslSocket, How) -> ok | {error, Reason}</name> <fsummary>Immediately closes a socket.</fsummary> <type> - <v>Socket = sslsocket()</v> + <v>SslSocket = sslsocket()</v> <v>How = read | write | read_write</v> <v>Reason = reason()</v> </type> @@ -1420,19 +1414,16 @@ fun(srp, Username :: string(), UserState :: term()) -> </func> <func> - <name>ssl_accept(Socket) -> </name> - <name>ssl_accept(Socket, Timeout) -> ok | {error, Reason}</name> + <name>ssl_accept(SslSocket) -> </name> + <name>ssl_accept(SslSocket, Timeout) -> ok | {error, Reason}</name> <fsummary>Performs server-side SSL/TLS handshake.</fsummary> <type> - <v>Socket = sslsocket()</v> + <v>SslSocket = sslsocket()</v> <v>Timeout = integer()</v> <v>Reason = term()</v> </type> <desc> - <p>Performs the SSL/TLS/DTLS server-side handshake.</p> - <p><c>Socket</c> is a socket as returned by - <seealso marker="#transport_accept-2">ssl:transport_accept/[1,2]</seealso> - </p> + <p>Deprecated in OTP 21, use <seealso marker="#handshake-1">ssl:handshake[1,2]</seealso> instead.</p> </desc> </func> @@ -1447,29 +1438,16 @@ fun(srp, Username :: string(), UserState :: term()) -> <v>Reason = term()</v> </type> <desc> - <p>If <c>Socket</c> is a <c>socket()</c>: upgrades a <c>gen_tcp</c>, - or equivalent, socket to an SSL socket, that is, performs - the SSL/TLS server-side handshake and returns the SSL socket.</p> - - <warning><p>The listen socket is to be in mode <c>{active, false}</c> - before telling the client that the server is ready to upgrade - by calling this function, else the upgrade succeeds or does not - succeed depending on timing.</p></warning> - - <p>If <c>Socket</c> is an <c>sslsocket()</c>: provides extra SSL/TLS/DTLS - options to those specified in - <seealso marker="#listen-2">ssl:listen/2 </seealso> and then performs - the SSL/TLS/DTLS handshake. - </p> + <p>Deprecated in OTP 21, use <seealso marker="#handshake-3">ssl:handshake[2,3]</seealso> instead.</p> </desc> </func> <func> - <name>sockname(Socket) -> {ok, {Address, Port}} | + <name>sockname(SslSocket) -> {ok, {Address, Port}} | {error, Reason}</name> <fsummary>Returns the local address and port.</fsummary> <type> - <v>Socket = sslsocket()</v> + <v>SslSocket = sslsocket()</v> <v>Address = ipaddress()</v> <v>Port = integer()</v> </type> @@ -1515,11 +1493,11 @@ fun(srp, Username :: string(), UserState :: term()) -> <func> <name>transport_accept(ListenSocket) -></name> <name>transport_accept(ListenSocket, Timeout) -> - {ok, NewSocket} | {error, Reason}</name> + {ok, SslSocket} | {error, Reason}</name> <fsummary>Accepts an incoming connection and prepares for <c>ssl_accept</c>.</fsummary> <type> - <v>ListenSocket = NewSocket = sslsocket()</v> + <v>ListenSocket = SslSocket = sslsocket()</v> <v>Timeout = integer()</v> <v>Reason = reason()</v> </type> diff --git a/lib/ssl/doc/src/using_ssl.xml b/lib/ssl/doc/src/using_ssl.xml index 3ef33df719..b2d649042b 100644 --- a/lib/ssl/doc/src/using_ssl.xml +++ b/lib/ssl/doc/src/using_ssl.xml @@ -66,7 +66,7 @@ ssl:listen(9999, [{certfile, "cert.pem"}, {keyfile, "key.pem"},{reuseaddr, true} {ok,{sslsocket, [...]}}</code> <p><em>Step 3:</em> Do a transport accept on the TLS listen socket:</p> - <code type="erl">3 server> {ok, Socket} = ssl:transport_accept(ListenSocket). + <code type="erl">3 server> {ok, TLSTransportSocket} = ssl:transport_accept(ListenSocket). {ok,{sslsocket, [...]}}</code> <p><em>Step 4:</em> Start the client side: </p> @@ -77,7 +77,7 @@ ok</code> {ok,{sslsocket, [...]}}</code> <p><em>Step 5:</em> Do the TLS handshake:</p> - <code type="erl">4 server> ok = ssl:ssl_accept(Socket). + <code type="erl">4 server> {ok, Socket} = ssl:handshake(TLSTransportSocket). ok</code> <p><em>Step 6:</em> Send a message over TLS:</p> @@ -126,7 +126,7 @@ ok</code> ok</code> <p><em>Step 6:</em> Do the TLS handshake:</p> - <code type="erl">5 server> {ok, TLSSocket} = ssl:ssl_accept(Socket, [{cacertfile, "cacerts.pem"}, + <code type="erl">5 server> {ok, TLSSocket} = ssl:handshake(Socket, [{cacertfile, "cacerts.pem"}, {certfile, "cert.pem"}, {keyfile, "key.pem"}]). {ok,{sslsocket,[...]}}</code> diff --git a/lib/ssl/src/ssl.erl b/lib/ssl/src/ssl.erl index eb5b351dd3..f5e5336f81 100644 --- a/lib/ssl/src/ssl.erl +++ b/lib/ssl/src/ssl.erl @@ -40,7 +40,7 @@ %% Socket handling -export([connect/3, connect/2, connect/4, listen/2, transport_accept/1, transport_accept/2, - handshake/1, handshake/2, handshake/3, + handshake/1, handshake/2, handshake/3, handshake_continue/2, handshake_continue/3, handshake_cancel/1, ssl_accept/1, ssl_accept/2, ssl_accept/3, controlling_process/2, peername/1, peercert/1, sockname/1, @@ -259,6 +259,16 @@ handshake(Socket, SslOptions, Timeout) when is_port(Socket), Error = {error, _Reason} -> Error end. + +%%-------------------------------------------------------------------- +-spec handshake_continue(#sslsocket{}, [ssl_option()]) -> + {ok, #sslsocket{}} | {error, reason()}. +%% +%% +%% Description: Continues the handshke possible with newly supplied options. +%%-------------------------------------------------------------------- +handshake_continue(Socket, SSLOptions) -> + handshake_continue(Socket, SSLOptions, infinity). %%-------------------------------------------------------------------- -spec handshake_continue(#sslsocket{}, [ssl_option()], timeout()) -> {ok, #sslsocket{}} | {error, reason()}. diff --git a/lib/ssl/test/ssl_test_lib.erl b/lib/ssl/test/ssl_test_lib.erl index 1e88ca15de..8532b9ac0f 100644 --- a/lib/ssl/test/ssl_test_lib.erl +++ b/lib/ssl/test/ssl_test_lib.erl @@ -309,7 +309,7 @@ client_cont_loop(Node, Host, Port, Pid, Transport, Options, ContOpts, Opts) -> case rpc:call(Node, Transport, connect, [Host, Port, Options]) of {ok, Socket0, _} -> ct:log("~p:~p~nClient: handshake_continue(~p, ~p, infinity) ~n", [?MODULE, ?LINE, Socket0, ContOpts]), - case rpc:call(Node, Transport, handshake_continue, [Socket0, ContOpts, infinity]) of + case rpc:call(Node, Transport, handshake_continue, [Socket0, ContOpts]) of {ok, Socket} -> Pid ! {connected, Socket}, {Module, Function, Args} = proplists:get_value(mfa, Opts), @@ -1170,13 +1170,13 @@ rsa_suites(CounterPart) -> lists:member(cipher_atom(Cipher), Ciphers); ({ecdhe_rsa, Cipher, _}) when ECC == true -> lists:member(cipher_atom(Cipher), Ciphers); + ({ecdhe_rsa, Cipher, _,_}) when ECC == true -> + lists:member(cipher_atom(Cipher), Ciphers); ({rsa, Cipher, _, _}) -> lists:member(cipher_atom(Cipher), Ciphers); ({dhe_rsa, Cipher, _,_}) -> lists:member(cipher_atom(Cipher), Ciphers); - ({ecdhe_rsa, Cipher, _,_}) when ECC == true -> - lists:member(cipher_atom(Cipher), Ciphers); - (_) -> + (_) -> false end, common_ciphers(CounterPart)). @@ -1530,7 +1530,7 @@ is_sane_ecc(crypto) -> true end; is_sane_ecc(_) -> - true. + sufficient_crypto_support(cipher_ec). is_fips(openssl) -> VersionStr = os:cmd("openssl version"), @@ -1601,11 +1601,7 @@ openssl_sane_dtls() -> false; "OpenSSL 1.0.2k-freebsd" ++ _ -> false; - "OpenSSL 1.0.2d" ++ _ -> - false; - "OpenSSL 1.0.2n" ++ _ -> - false; - "OpenSSL 1.0.2m" ++ _ -> + "OpenSSL 1.0.2" ++ _ -> false; "OpenSSL 1.0.0" ++ _ -> false; diff --git a/lib/stdlib/src/otp_internal.erl b/lib/stdlib/src/otp_internal.erl index ceec3079a1..24d6e384d6 100644 --- a/lib/stdlib/src/otp_internal.erl +++ b/lib/stdlib/src/otp_internal.erl @@ -620,6 +620,8 @@ obsolete_1(ssl, ssl_accept, 2) -> {deprecated, "deprecated; use ssl:handshake/2 instead"}; obsolete_1(ssl, ssl_accept, 3) -> {deprecated, "deprecated; use ssl:handshake/3 instead"}; +obsolete_1(otp_mib, F, _) when F =:= load; F =:= unload -> + {deprecated, "deprecated; functionality will be removed in a future release"}; %% not obsolete diff --git a/otp_versions.table b/otp_versions.table index 22762f0691..9f6488b159 100644 --- a/otp_versions.table +++ b/otp_versions.table @@ -1,3 +1,4 @@ +OTP-20.3.8 : erts-9.3.3 snmp-5.2.11 # asn1-5.0.5 common_test-1.15.4 compiler-7.1.5 cosEvent-2.2.2 cosEventDomain-1.2.2 cosFileTransfer-1.2.2 cosNotification-1.2.3 cosProperty-1.2.3 cosTime-1.2.3 cosTransactions-1.3.3 crypto-4.2.2 debugger-4.2.4 dialyzer-3.2.4 diameter-2.1.4 edoc-0.9.2 eldap-1.2.3 erl_docgen-0.7.3 erl_interface-3.10.2 et-1.6.1 eunit-2.3.5 hipe-3.17.1 ic-4.4.4 inets-6.5.2 jinterface-1.8.1 kernel-5.4.3 megaco-3.18.3 mnesia-4.15.3 observer-2.7 odbc-2.12.1 orber-3.8.4 os_mon-2.4.4 otp_mibs-1.1.2 parsetools-2.1.6 public_key-1.5.2 reltool-0.7.5 runtime_tools-1.12.5 sasl-3.1.2 ssh-4.6.9 ssl-8.2.6 stdlib-3.4.5 syntax_tools-2.1.4 tools-2.11.2 wx-1.8.3 xmerl-1.3.16 : OTP-20.3.7 : erl_docgen-0.7.3 erts-9.3.2 inets-6.5.2 # asn1-5.0.5 common_test-1.15.4 compiler-7.1.5 cosEvent-2.2.2 cosEventDomain-1.2.2 cosFileTransfer-1.2.2 cosNotification-1.2.3 cosProperty-1.2.3 cosTime-1.2.3 cosTransactions-1.3.3 crypto-4.2.2 debugger-4.2.4 dialyzer-3.2.4 diameter-2.1.4 edoc-0.9.2 eldap-1.2.3 erl_interface-3.10.2 et-1.6.1 eunit-2.3.5 hipe-3.17.1 ic-4.4.4 jinterface-1.8.1 kernel-5.4.3 megaco-3.18.3 mnesia-4.15.3 observer-2.7 odbc-2.12.1 orber-3.8.4 os_mon-2.4.4 otp_mibs-1.1.2 parsetools-2.1.6 public_key-1.5.2 reltool-0.7.5 runtime_tools-1.12.5 sasl-3.1.2 snmp-5.2.10 ssh-4.6.9 ssl-8.2.6 stdlib-3.4.5 syntax_tools-2.1.4 tools-2.11.2 wx-1.8.3 xmerl-1.3.16 : OTP-20.3.6 : crypto-4.2.2 ssh-4.6.9 # asn1-5.0.5 common_test-1.15.4 compiler-7.1.5 cosEvent-2.2.2 cosEventDomain-1.2.2 cosFileTransfer-1.2.2 cosNotification-1.2.3 cosProperty-1.2.3 cosTime-1.2.3 cosTransactions-1.3.3 debugger-4.2.4 dialyzer-3.2.4 diameter-2.1.4 edoc-0.9.2 eldap-1.2.3 erl_docgen-0.7.2 erl_interface-3.10.2 erts-9.3.1 et-1.6.1 eunit-2.3.5 hipe-3.17.1 ic-4.4.4 inets-6.5.1 jinterface-1.8.1 kernel-5.4.3 megaco-3.18.3 mnesia-4.15.3 observer-2.7 odbc-2.12.1 orber-3.8.4 os_mon-2.4.4 otp_mibs-1.1.2 parsetools-2.1.6 public_key-1.5.2 reltool-0.7.5 runtime_tools-1.12.5 sasl-3.1.2 snmp-5.2.10 ssl-8.2.6 stdlib-3.4.5 syntax_tools-2.1.4 tools-2.11.2 wx-1.8.3 xmerl-1.3.16 : OTP-20.3.5 : erts-9.3.1 ssl-8.2.6 # asn1-5.0.5 common_test-1.15.4 compiler-7.1.5 cosEvent-2.2.2 cosEventDomain-1.2.2 cosFileTransfer-1.2.2 cosNotification-1.2.3 cosProperty-1.2.3 cosTime-1.2.3 cosTransactions-1.3.3 crypto-4.2.1 debugger-4.2.4 dialyzer-3.2.4 diameter-2.1.4 edoc-0.9.2 eldap-1.2.3 erl_docgen-0.7.2 erl_interface-3.10.2 et-1.6.1 eunit-2.3.5 hipe-3.17.1 ic-4.4.4 inets-6.5.1 jinterface-1.8.1 kernel-5.4.3 megaco-3.18.3 mnesia-4.15.3 observer-2.7 odbc-2.12.1 orber-3.8.4 os_mon-2.4.4 otp_mibs-1.1.2 parsetools-2.1.6 public_key-1.5.2 reltool-0.7.5 runtime_tools-1.12.5 sasl-3.1.2 snmp-5.2.10 ssh-4.6.8 stdlib-3.4.5 syntax_tools-2.1.4 tools-2.11.2 wx-1.8.3 xmerl-1.3.16 : diff --git a/system/doc/system_principles/misc.xml b/system/doc/system_principles/misc.xml new file mode 100644 index 0000000000..dd6c2a1336 --- /dev/null +++ b/system/doc/system_principles/misc.xml @@ -0,0 +1,198 @@ +<?xml version="1.0" encoding="utf-8" ?> +<!DOCTYPE chapter SYSTEM "chapter.dtd"> + +<chapter> + <header> + <copyright> + <year>2018</year> + <holder>Ericsson AB. All Rights Reserved.</holder> + </copyright> + <legalnotice> + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + </legalnotice> + + <title>Support, Compatibility, Deprecations, and Removal</title> + <prepared></prepared> + <responsible></responsible> + <docno></docno> + <approved></approved> + <checked></checked> + <date>2018-05-21</date> + <rev></rev> + <file>misc.xml</file> + </header> + + <section> + <marker id="supported_releases"/> + <title>Supported Releases</title> + <p> + In general, bugs are only fixed on the latest + <seealso marker="versions#releases_and_patches">release</seealso>, + and new features are introduced in the upcoming release that is + under development. However, when we, due to internal reasons, fix + bugs on older releases, these will be available and announced as well. + </p> + <p> + Due to the above, pull requests are only accepted on the + <c>maint</c> and the <c>master</c> branches in our + <url href="https://github.com/erlang/otp">git repository</url>. + The <c>maint</c> branch contains changes planned for the next + <seealso marker="versions#releases_and_patches">maintenance patch package</seealso> + on the latest OTP release and the <c>master</c> branch contain + changes planned for the upcoming OTP release. + </p> + </section> + + <section> + <marker id="compatibility"/> + <title>Compatibility</title> + <p> + We always strive to remain as compatible as possible + even in the cases where we give no compatibility guarantees. + </p> + <p> + Different parts of the system will be handled differently + regarding compatibility. The following items describe how + different parts of the system are handled. + </p> + <taglist> + <tag>Erlang Distribution</tag> + <item> + <p> + Erlang nodes can communicate across at least + two preceding and two subsequent releases. + </p> + </item> + <tag>Compiled BEAM Code, NIF Libraries and Drivers</tag> + <item> + <p> + Compiled code can be loaded on at least two + subsequent releases. + </p> + <p> + Loading on previous releases is <em>not</em> supported. + </p> + </item> + <tag>Compiled HiPE Code</tag> + <item> + <p> + Compiled HiPE code can be loaded on the exact same build + of ERTS that was used when compiling the code. It might + however work on other builds, the emulator verifies + checksums in order to determine if it can load the code + or not. Note that HiPE has some limitations. For more + information see the documentation of the + <seealso marker="hipe:HiPE_app">HiPE</seealso> application. + </p> + </item> + <tag>APIs</tag> + <item> + <p>Compatible between releases.</p> + </item> + <tag>Compiler Warnings</tag> + <item> + <p>New warnings may be issued between releases.</p> + </item> + <tag>Command Line Arguments</tag> + <item> + <p>Incompatible changes may occur between releases.</p> + </item> + <tag>OTP Build Procedures</tag> + <item><p>Incompatible changes may occur between releases.</p></item> + </taglist> + <p> + Under certain circumstances incompatible changes might be + introduced even in parts of the system that should be compatible + between releases. Things that might trigger incompatible changes + like this are: + </p> + <taglist> + <tag>Security Issues</tag> + <item> + <p> + It might be necessary to introduce incompatible changes + in order to solve a security issue. This kind of + incompatibility might occur in a patch. + </p> + </item> + <tag>Bug Fixes</tag> + <item> + <p> + We will not be bug-compatible. A bug fix might introduce + incompatible changes. This kind of incompatibility + might occur in a patch. + </p> + </item> + <tag>Severe Previous Design Issues</tag> + <item> + <p> + Some parts of OTP were designed a very long time ago and + did not necessarily take today's computing environments into + account. In some cases the consequences of those design + decisions are too severe. This may be performance wise, + scalability wise, etc. If we deem the consequences too + severe, we might introduce incompatible changes. This kind + of incompatibility will not be introduced in a patch, but + instead in the next release. + </p> + </item> + </taglist> + <p> + Peripheral, trace, and debug functionality is at greater + risk of being changed in an incompatible way than functionality + in the language itself and core libraries used during operation. + </p> + </section> + + <section> + <marker id="deprecation"/> + <title>Deprecation</title> + <p> + Functionality is deprecated when new functionality is + introduced that is preferred to be used instead of the + old functionality that is being deprecated. The deprecation + does <em>not</em> imply removal of the functionality unless + an upcoming removal is explicitly stated in the deprecation. + </p> + <p> + Deprecated functionality will be documented as deprecated, and + compiler warnings will be issued, when appropriate, as + early as possible. That is, the new preferred functionality + will appear at the same time as the deprecation is issued. + A new deprecation will at least be announced in a release + note and the documentation. + </p> + </section> + + <section> + <marker id="removal"/> + <title>Removal</title> + <p> + Legacy solutions may eventually need to be removed. In such + cases, they will be phased out on a long enough time period + to give users the time to adapt. Before removal of + functionality it will be deprecated at least during one + release with an explicit announcement about + the upcoming removal. A new deprecation will at least be + announced in a release note and the documentation. + </p> + <p> + Peripheral, trace, and debug functionality is at greater + risk of removal than functionality in the language itself + and core libraries used during operation. + </p> + </section> + +</chapter> + diff --git a/system/doc/system_principles/part.xml b/system/doc/system_principles/part.xml index 1b87ecd350..9f70f40dfb 100644 --- a/system/doc/system_principles/part.xml +++ b/system/doc/system_principles/part.xml @@ -34,4 +34,5 @@ <xi:include href="create_target.xml"/> <xi:include href="upgrade.xml"/> <xi:include href="versions.xml"/> + <xi:include href="misc.xml"/> </part> diff --git a/system/doc/system_principles/versions.xml b/system/doc/system_principles/versions.xml index b9f7fa4bf6..355c8fd2d5 100644 --- a/system/doc/system_principles/versions.xml +++ b/system/doc/system_principles/versions.xml @@ -32,9 +32,9 @@ <rev></rev> <file>versions.xml</file> </header> - <marker id="versions section"></marker> <section> + <marker id="versions section"></marker> <title>OTP Version</title> <p>As of OTP release 17, the OTP release number corresponds to the major part of the OTP version. The OTP version as a concept was @@ -136,8 +136,8 @@ </section> <section> - <title>Version Scheme</title> <marker id="version_scheme"/> + <title>Version Scheme</title> <note><p>The version scheme was changed as of OTP 17.0. This implies that application versions used prior to OTP 17.0 do not adhere to this version scheme. <seealso marker="#otp_17_0_app_versions">A list of @@ -207,8 +207,41 @@ </section> <section> - <title>OTP 17.0 Application Versions</title> + <marker id="releases_and_patches"/> + <title>Releases and Patches</title> + <p> + When a new OTP release is released it will have an OTP + version on the form <c><Major>.0</c> where the + major OTP version number equals the release number. + The major version number is increased one step since the + last major version. All other OTP versions with the same + major OTP version number are patches on that OTP release. + </p> + <p> + Patches are either released as maintenance patch packages + or emergency patch packages. The only difference is that + maintenance patch packages are planned and usually contain + more changes than emergency patch packages. Emergency patch + packages are released to solve one or more specific issues + when such are discovered. + </p> + <p> + The release of a maintenance patch package usually imply + an increase of the OTP <c><Minor></c> version while + the release of an emergency patch package usually imply an + increase of the OTP <c><Patch></c> version. This is + however not necessarily always the case since changes of + OTP versions are based on the actual changes in the code + and not based on whether the patch was planned or not. + For more information see the + <seealso marker="#version_scheme">Version Scheme</seealso> + section above. + </p> + </section> + + <section> <marker id="otp_17_0_app_versions"/> + <title>OTP 17.0 Application Versions</title> <p>The following list details the application versions that were part of OTP 17.0. If the normal part of an application version number compares diff --git a/system/doc/system_principles/xmlfiles.mk b/system/doc/system_principles/xmlfiles.mk index 77d6747414..353c2c7f7f 100644 --- a/system/doc/system_principles/xmlfiles.mk +++ b/system/doc/system_principles/xmlfiles.mk @@ -21,7 +21,8 @@ SYSTEM_PRINCIPLES_CHAPTER_FILES = \ system_principles.xml \ error_logging.xml \ upgrade.xml \ - versions.xml + versions.xml \ + misc.xml SYSTEM_PRINCIPLES_CHAPTER_GEN_FILES = \ create_target.xml |