aboutsummaryrefslogtreecommitdiffstats
path: root/lib/kernel/test
diff options
context:
space:
mode:
Diffstat (limited to 'lib/kernel/test')
-rw-r--r--lib/kernel/test/Makefile15
-rw-r--r--lib/kernel/test/application_SUITE.erl90
-rw-r--r--lib/kernel/test/code_SUITE.erl31
-rw-r--r--lib/kernel/test/disk_log_SUITE.erl99
-rw-r--r--lib/kernel/test/erl_distribution_SUITE.erl334
-rw-r--r--lib/kernel/test/file_SUITE.erl111
-rw-r--r--lib/kernel/test/file_name_SUITE.erl11
-rw-r--r--lib/kernel/test/gen_sctp_SUITE.erl10
-rw-r--r--lib/kernel/test/gen_tcp_api_SUITE_data/gen_tcp_api_SUITE.c1
-rw-r--r--lib/kernel/test/gen_tcp_misc_SUITE.erl439
-rw-r--r--lib/kernel/test/gen_udp_SUITE.erl176
-rw-r--r--lib/kernel/test/global_SUITE.erl30
-rw-r--r--lib/kernel/test/inet_SUITE.erl205
-rw-r--r--lib/kernel/test/inet_res_SUITE.erl9
-rw-r--r--lib/kernel/test/inet_sockopt_SUITE.erl9
-rw-r--r--lib/kernel/test/init_SUITE.erl25
-rw-r--r--lib/kernel/test/kernel.spec1
-rw-r--r--lib/kernel/test/kernel_bench.spec1
-rw-r--r--lib/kernel/test/kernel_config_SUITE.erl21
-rw-r--r--lib/kernel/test/logger.cover5
-rw-r--r--lib/kernel/test/logger.spec2
-rw-r--r--lib/kernel/test/logger_SUITE.erl392
-rw-r--r--lib/kernel/test/logger_disk_log_h_SUITE.erl475
-rw-r--r--lib/kernel/test/logger_env_var_SUITE.erl35
-rw-r--r--lib/kernel/test/logger_formatter_SUITE.erl103
-rw-r--r--lib/kernel/test/logger_olp_SUITE.erl90
-rw-r--r--lib/kernel/test/logger_proxy_SUITE.erl274
-rw-r--r--lib/kernel/test/logger_simple_h_SUITE.erl3
-rw-r--r--lib/kernel/test/logger_std_h_SUITE.erl827
-rw-r--r--lib/kernel/test/logger_stress_SUITE.erl550
-rw-r--r--lib/kernel/test/logger_test_lib.erl10
-rw-r--r--lib/kernel/test/prim_file_SUITE.erl3
-rw-r--r--lib/kernel/test/sendfile_SUITE.erl26
-rw-r--r--lib/kernel/test/seq_trace_SUITE.erl64
34 files changed, 3816 insertions, 661 deletions
diff --git a/lib/kernel/test/Makefile b/lib/kernel/test/Makefile
index 4a86265a4a..6763a04d9f 100644
--- a/lib/kernel/test/Makefile
+++ b/lib/kernel/test/Makefile
@@ -57,6 +57,7 @@ MODULES= \
prim_file_SUITE \
ram_file_SUITE \
gen_tcp_api_SUITE \
+ gen_tcp_dist \
gen_tcp_echo_SUITE \
gen_tcp_misc_SUITE \
gen_udp_SUITE \
@@ -76,8 +77,11 @@ MODULES= \
logger_filters_SUITE \
logger_formatter_SUITE \
logger_legacy_SUITE \
+ logger_olp_SUITE \
+ logger_proxy_SUITE \
logger_simple_h_SUITE \
logger_std_h_SUITE \
+ logger_stress_SUITE \
logger_test_lib \
os_SUITE \
pg2_SUITE \
@@ -127,11 +131,17 @@ ERL_COMPILE_FLAGS +=
EBIN = .
+TARGETS = $(MODULES:%=$(EBIN)/%.$(EMULATOR))
+
+
# ----------------------------------------------------
# Targets
# ----------------------------------------------------
-make_emakefile:
+gen_tcp_dist.erl: ../examples/gen_tcp_dist/src/gen_tcp_dist.erl
+ cp $< $@
+
+make_emakefile: $(ERL_FILES)
$(ERL_TOP)/make/make_emakefile $(ERL_COMPILE_FLAGS) -o$(EBIN) '*_SUITE_make' \
> $(EMAKEFILE)
$(ERL_TOP)/make/make_emakefile $(ERL_COMPILE_FLAGS) -o$(EBIN) $(MODULES) \
@@ -147,6 +157,9 @@ clean:
docs:
+targets: $(TARGETS)
+
+
# ----------------------------------------------------
# Release Target
# ----------------------------------------------------
diff --git a/lib/kernel/test/application_SUITE.erl b/lib/kernel/test/application_SUITE.erl
index 5c35b82207..1ab554db7c 100644
--- a/lib/kernel/test/application_SUITE.erl
+++ b/lib/kernel/test/application_SUITE.erl
@@ -31,6 +31,7 @@
otp_3002/1, otp_3184/1, otp_4066/1, otp_4227/1, otp_5363/1,
otp_5606/1,
start_phases/1, get_key/1, get_env/1,
+ set_env/1, set_env_persistent/1, set_env_errors/1,
permit_false_start_local/1, permit_false_start_dist/1, script_start/1,
nodedown_start/1, init2973/0, loop2973/0, loop5606/1]).
@@ -55,6 +56,7 @@ all() ->
load_use_cache, ensure_started, {group, reported_bugs}, start_phases,
script_start, nodedown_start, permit_false_start_local,
permit_false_start_dist, get_key, get_env, ensure_all_started,
+ set_env, set_env_persistent, set_env_errors,
{group, distr_changed}, config_change, shutdown_func, shutdown_timeout,
shutdown_deadlock, config_relative_paths,
persistent_env].
@@ -1944,6 +1946,94 @@ get_appls([_ | T], Res) ->
get_appls([], Res) ->
Res.
+%% Test set_env/1.
+set_env(Conf) when is_list(Conf) ->
+ ok = application:set_env([{appinc, [{own2, persist}, {not_in_app, persist}]},
+ {unknown_app, [{key, persist}]}]),
+
+ %% own_env1 and own2 are set in appinc
+ undefined = application:get_env(appinc, own_env1),
+ {ok, persist} = application:get_env(appinc, own2),
+ {ok, persist} = application:get_env(appinc, not_in_app),
+ {ok, persist} = application:get_env(unknown_app, key),
+
+ ok = application:load(appinc()),
+ {ok, value1} = application:get_env(appinc, own_env1),
+ {ok, val2} = application:get_env(appinc, own2),
+ {ok, persist} = application:get_env(appinc, not_in_app),
+ {ok, persist} = application:get_env(unknown_app, key),
+
+ %% On reload, values are lost
+ ok = application:unload(appinc),
+ ok = application:load(appinc()),
+ {ok, value1} = application:get_env(appinc, own_env1),
+ {ok, val2} = application:get_env(appinc, own2),
+ undefined = application:get_env(appinc, not_in_app),
+
+ %% Clean up
+ ok = application:unload(appinc).
+
+%% Test set_env/2 with persistent true.
+set_env_persistent(Conf) when is_list(Conf) ->
+ Opts = [{persistent, true}],
+ ok = application:set_env([{appinc, [{own2, persist}, {not_in_app, persist}]},
+ {unknown_app, [{key, persist}]}], Opts),
+
+ %% own_env1 and own2 are set in appinc
+ undefined = application:get_env(appinc, own_env1),
+ {ok, persist} = application:get_env(appinc, own2),
+ {ok, persist} = application:get_env(appinc, not_in_app),
+ {ok, persist} = application:get_env(unknown_app, key),
+
+ ok = application:load(appinc()),
+ {ok, value1} = application:get_env(appinc, own_env1),
+ {ok, persist} = application:get_env(appinc, own2),
+ {ok, persist} = application:get_env(appinc, not_in_app),
+ {ok, persist} = application:get_env(unknown_app, key),
+
+ %% On reload, values are not lost
+ ok = application:unload(appinc),
+ ok = application:load(appinc()),
+ {ok, value1} = application:get_env(appinc, own_env1),
+ {ok, persist} = application:get_env(appinc, own2),
+ {ok, persist} = application:get_env(appinc, not_in_app),
+
+ %% Clean up
+ ok = application:unload(appinc).
+
+set_env_errors(Conf) when is_list(Conf) ->
+ "application: 1; application name must be an atom" =
+ badarg_msg(fun() -> application:set_env([{1, []}]) end),
+
+ "application: foo; parameters must be a list" =
+ badarg_msg(fun() -> application:set_env([{foo, bar}]) end),
+
+ "invalid application config: foo_bar" =
+ badarg_msg(fun() -> application:set_env([foo_bar]) end),
+
+ "application: foo; invalid parameter name: 1" =
+ badarg_msg(fun() -> application:set_env([{foo, [{1, 2}]}]) end),
+
+ "application: foo; invalid parameter: config" =
+ badarg_msg(fun() -> application:set_env([{foo, [config]}]) end),
+
+ "application: kernel; erroneous parameter: distributed" =
+ badarg_msg(fun() -> application:set_env([{kernel, [{distributed, config}]}]) end),
+
+ "duplicate application config: foo" =
+ badarg_msg(fun() -> application:set_env([{foo, []}, {foo, []}]) end),
+
+ "application: foo; duplicate parameter: bar" =
+ badarg_msg(fun() -> application:set_env([{foo, [{bar, baz}, {bar, bat}]}]) end),
+
+ ok.
+
+badarg_msg(Fun) ->
+ try Fun() of
+ _ -> ct:fail(try_succeeded)
+ catch
+ error:{badarg, Msg} -> Msg
+ end.
%% Test set_env/4 and unset_env/3 with persistent true.
persistent_env(Conf) when is_list(Conf) ->
diff --git a/lib/kernel/test/code_SUITE.erl b/lib/kernel/test/code_SUITE.erl
index 1314316c13..4f0847084f 100644
--- a/lib/kernel/test/code_SUITE.erl
+++ b/lib/kernel/test/code_SUITE.erl
@@ -25,8 +25,8 @@
-export([all/0, suite/0,groups/0,init_per_group/2,end_per_group/2]).
-export([set_path/1, get_path/1, add_path/1, add_paths/1, del_path/1,
replace_path/1, load_file/1, load_abs/1, ensure_loaded/1,
- delete/1, purge/1, purge_many_exits/1, soft_purge/1, is_loaded/1,
- all_loaded/1,
+ delete/1, purge/1, purge_many_exits/0, purge_many_exits/1,
+ soft_purge/1, is_loaded/1, all_loaded/1,
load_binary/1, dir_req/1, object_code/1, set_path_file/1,
upgrade/1,
sticky_dir/1, pa_pz_option/1, add_del_path/1,
@@ -55,7 +55,7 @@
suite() ->
[{ct_hooks,[ts_install_cth]},
- {timetrap,{minutes,5}}].
+ {timetrap,{seconds,30}}].
all() ->
[set_path, get_path, add_path, add_paths, del_path,
@@ -140,6 +140,11 @@ end_per_testcase(on_load_embedded, Config) ->
LinkName = proplists:get_value(link_name, Config),
_ = del_link(LinkName),
end_per_testcase(Config);
+end_per_testcase(upgrade, Config) ->
+ %% Make sure tracing is turned off even if the test times out.
+ erlang:trace_pattern({error_handler,undefined_function,3}, false, [global]),
+ erlang:trace(self(), false, [call]),
+ end_per_testcase(Config);
end_per_testcase(_Func, Config) ->
end_per_testcase(Config).
@@ -391,6 +396,9 @@ purge(Config) when is_list(Config) ->
process_flag(trap_exit, OldFlag),
ok.
+purge_many_exits() ->
+ [{timetrap, {minutes, 2}}].
+
purge_many_exits(Config) when is_list(Config) ->
OldFlag = process_flag(trap_exit, true),
@@ -525,7 +533,7 @@ upgrade(Config) ->
T = [beam, hipe],
[upgrade_do(DataDir, Client, T) || Client <- T],
- case hipe:llvm_support_available() of
+ case hipe:erllvm_is_supported() of
false -> ok;
true ->
T2 = [beam, hipe_llvm],
@@ -1021,6 +1029,13 @@ mult_lib_remove_prefix([H|T1], [H|T2]) ->
mult_lib_remove_prefix([$/|T], []) -> T.
bad_erl_libs(Config) when is_list(Config) ->
+ %% Preserve ERL_LIBS if set.
+ BadLibs0 = "/no/such/dir",
+ BadLibs =
+ case os:getenv("ERL_LIBS") of
+ false -> BadLibs0;
+ Libs -> BadLibs0 ++ ":" ++ Libs
+ end,
{ok,Node} =
test_server:start_node(bad_erl_libs, slave, []),
Code = rpc:call(Node,code,get_path,[]),
@@ -1028,10 +1043,9 @@ bad_erl_libs(Config) when is_list(Config) ->
{ok,Node2} =
test_server:start_node(bad_erl_libs, slave,
- [{args,"-env ERL_LIBS /no/such/dir"}]),
+ [{args,"-env ERL_LIBS " ++ BadLibs}]),
Code2 = rpc:call(Node,code,get_path,[]),
test_server:stop_node(Node2),
-
%% Test that code path is not affected by the faulty ERL_LIBS
Code = Code2,
@@ -1550,6 +1564,11 @@ on_load_update_code_1(3, Mod) ->
%% Test -on_load while trace feature 'on_load' is enabled (OTP-14612)
on_load_trace_on_load(Config) ->
+ %% 'on_load' enables tracing for all newly loaded modules, so we make a dry
+ %% run to ensure that ancillary modules like 'merl' won't be loaded during
+ %% the actual test.
+ on_load_update(Config),
+
Papa = self(),
Tracer = spawn_link(fun F() -> receive M -> Papa ! M end, F() end),
{tracer,[]} = erlang:trace_info(self(),tracer),
diff --git a/lib/kernel/test/disk_log_SUITE.erl b/lib/kernel/test/disk_log_SUITE.erl
index 0709a6e766..9704c3b28c 100644
--- a/lib/kernel/test/disk_log_SUITE.erl
+++ b/lib/kernel/test/disk_log_SUITE.erl
@@ -1750,7 +1750,7 @@ block_queue(Conf) when is_list(Conf) ->
true = [{1,a},{2,b},{3,c},{4,d},{5,e},{6,f},{7,g},{8,h}] == Terms,
del(File, 2),
Q = qlen(),
- true = (P0 == pps()),
+ check_pps(P0),
ok.
%% OTP-4880. Blocked processes did not get disk_log_stopped message.
@@ -1782,7 +1782,7 @@ block_queue2(Conf) when is_list(Conf) ->
{ok,<<>>} = file:read_file(File ++ ".1"),
del(File, No),
Q = qlen(),
- true = (P0 == pps()),
+ check_pps(P0),
ok.
@@ -2119,7 +2119,7 @@ close_block(Conf) when is_list(Conf) ->
0 = sync_do(Pid2, users),
sync_do(Pid2, terminate),
{error, no_such_log} = disk_log:info(n),
- true = (P0 == pps()),
+ check_pps(P0),
%% Users terminate (no link...).
Pid3 = spawn_link(?MODULE, lserv, [n]),
@@ -2137,7 +2137,7 @@ close_block(Conf) when is_list(Conf) ->
disk_log:close(n),
disk_log:close(n),
{error, no_such_log} = disk_log:info(n),
- true = (P0 == pps()),
+ check_pps(P0),
%% Blocking owner terminates.
Pid5 = spawn_link(?MODULE, lserv, [n]),
@@ -2154,7 +2154,7 @@ close_block(Conf) when is_list(Conf) ->
1 = users(n),
ok = disk_log:close(n),
{error, no_such_log} = disk_log:info(n),
- true = (P0 == pps()),
+ check_pps(P0),
%% Blocking user terminates.
Pid6 = spawn_link(?MODULE, lserv, [n]),
@@ -2174,7 +2174,7 @@ close_block(Conf) when is_list(Conf) ->
1 = users(n),
ok = disk_log:close(n),
{error, no_such_log} = disk_log:info(n),
- true = (P0 == pps()),
+ check_pps(P0),
%% Blocking owner terminates.
Pid7 = spawn_link(?MODULE, lserv, [n]),
@@ -2192,7 +2192,7 @@ close_block(Conf) when is_list(Conf) ->
1 = users(n),
ok = disk_log:close(n),
{error, no_such_log} = disk_log:info(n),
- true = (P0 == pps()),
+ check_pps(P0),
%% Two owners, the blocking one terminates.
Pid8 = spawn_link(?MODULE, lserv, [n]),
@@ -2207,7 +2207,7 @@ close_block(Conf) when is_list(Conf) ->
0 = sync_do(Pid9, users),
sync_do(Pid9, terminate),
{error, no_such_log} = disk_log:info(n),
- true = (P0 == pps()),
+ check_pps(P0),
%% Blocking user closes.
Pid10 = spawn_link(?MODULE, lserv, [n]),
@@ -2225,7 +2225,7 @@ close_block(Conf) when is_list(Conf) ->
ok = disk_log:close(n),
sync_do(Pid10, terminate),
{error, no_such_log} = disk_log:info(n),
- true = (P0 == pps()),
+ check_pps(P0),
%% Blocking user unblocks and closes.
Pid11 = spawn_link(?MODULE, lserv, [n]),
@@ -2244,7 +2244,7 @@ close_block(Conf) when is_list(Conf) ->
ok = disk_log:close(n),
{error, no_such_log} = disk_log:info(n),
sync_do(Pid11, terminate),
- true = (P0 == pps()),
+ check_pps(P0),
%% Blocking owner closes.
Pid12 = spawn_link(?MODULE, lserv, [n]),
@@ -2263,7 +2263,7 @@ close_block(Conf) when is_list(Conf) ->
ok = disk_log:close(n),
{error, no_such_log} = disk_log:info(n),
sync_do(Pid12, terminate),
- true = (P0 == pps()),
+ check_pps(P0),
%% Blocking owner unblocks and closes.
Pid13 = spawn_link(?MODULE, lserv, [n]),
@@ -2283,7 +2283,7 @@ close_block(Conf) when is_list(Conf) ->
ok = disk_log:close(n),
{error, no_such_log} = disk_log:info(n),
sync_do(Pid13, terminate),
- true = (P0 == pps()),
+ check_pps(P0),
del(File, No), % cleanup
ok.
@@ -2487,7 +2487,7 @@ error_repair(Conf) when is_list(Conf) ->
P0 = pps(),
{error, {file_error, _, _}} =
disk_log:open([{name, n}, {file, File}, {type, wrap}, {size,{40,4}}]),
- true = (P0 == pps()),
+ check_pps(P0),
del(File, No),
ok = file:del_dir(Dir),
@@ -2506,7 +2506,7 @@ error_repair(Conf) when is_list(Conf) ->
disk_log:open([{name, n}, {file, File}, {type, wrap},
{format, internal}, {size, {40,No}}]),
ok = disk_log:close(n),
- true = (P1 == pps()),
+ check_pps(P1),
del(File, No),
receive {info_msg, _, "disk_log: repairing" ++ _, _} -> ok
after 1000 -> ct:fail(failed) end,
@@ -2524,7 +2524,7 @@ error_repair(Conf) when is_list(Conf) ->
disk_log:open([{name, n}, {file, File}, {type, wrap},
{format, internal}, {size, {4000,No}}]),
ok = disk_log:close(n),
- true = (P2 == pps()),
+ check_pps(P2),
del(File, No),
receive {info_msg, _, "disk_log: repairing" ++ _, _} -> ok
after 1000 -> ct:fail(failed) end,
@@ -2633,7 +2633,7 @@ error_log(Conf) when is_list(Conf) ->
{ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
{format, external},{size, {100, No}}]),
{error, {file_error, _, _}} = disk_log:truncate(n),
- true = (P0 == pps()),
+ check_pps(P0),
del(File, No),
%% OTP-4880.
@@ -2641,7 +2641,7 @@ error_log(Conf) when is_list(Conf) ->
{ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
{format, external},{size, 100000}]),
{error, {file_error, _, eisdir}} = disk_log:reopen(n, LDir),
- true = (P0 == pps()),
+ check_pps(P0),
file:delete(File),
B = mk_bytes(60),
@@ -3003,7 +3003,7 @@ error_index(Conf) when is_list(Conf) ->
{error, {invalid_index_file, _}} = disk_log:open(Args),
del(File, No),
- true = (P0 == pps()),
+ check_pps(P0),
true = (Q == qlen()),
ok.
@@ -4436,7 +4436,7 @@ dist_open2(Conf) when is_list(Conf) ->
timer:sleep(500),
file:delete(File),
- true = (P0 == pps()),
+ check_pps(P0),
%% This time the first process has a naughty head_func. This test
%% does not add very much. Perhaps it should be removed. However,
@@ -4482,7 +4482,7 @@ dist_open2(Conf) when is_list(Conf) ->
timer:sleep(100),
{error, no_such_log} = disk_log:close(Log),
file:delete(File),
- true = (P0 == pps()),
+ check_pps(P0),
No = 2,
Log2 = n2,
@@ -4511,7 +4511,7 @@ dist_open2(Conf) when is_list(Conf) ->
file:delete(File2),
del(File, No),
- true = (P0 == pps()),
+ check_pps(P0),
R.
@@ -4556,7 +4556,7 @@ dist_open2_1(Conf, Delay) ->
{error, no_such_log} = disk_log:info(Log),
file:delete(File),
- true = (P0 == pps()),
+ check_pps(P0),
ok.
@@ -4613,7 +4613,7 @@ dist_open2_2(Conf, Delay) ->
{[{Node1,{repaired,_,_,_}}],[]}} -> ok
end,
- true = (P0 == pps()),
+ check_pps(P0),
stop_node(Node1),
file:delete(File),
ok.
@@ -4791,10 +4791,59 @@ log(Name, N) ->
format_error(E) ->
lists:flatten(disk_log:format_error(E)).
+check_pps({Ports0,Procs0} = P0) ->
+ case pps() of
+ P0 ->
+ ok;
+ _ ->
+ timer:sleep(500),
+ case pps() of
+ P0 ->
+ ok;
+ {Ports1,Procs1} = P1 ->
+ case {Ports1 -- Ports0, Procs1 -- Procs0} of
+ {[], []} -> ok;
+ {PortsDiff,ProcsDiff} ->
+ io:format("failure, got ~p~n, expected ~p\n", [P1, P0]),
+ show("Old port", Ports0 -- Ports1),
+ show("New port", PortsDiff),
+ show("Old proc", Procs0 -- Procs1),
+ show("New proc", ProcsDiff),
+ ct:fail(failed)
+ end
+ end
+ end.
+
+show(_S, []) ->
+ ok;
+show(S, [{Pid, Name, InitCall}|Pids]) when is_pid(Pid) ->
+ io:format("~s: ~w (~w), ~w: ~p~n",
+ [S, Pid, proc_reg_name(Name), InitCall,
+ erlang:process_info(Pid)]),
+ show(S, Pids);
+show(S, [{Port, _}|Ports]) when is_port(Port)->
+ io:format("~s: ~w: ~p~n", [S, Port, erlang:port_info(Port)]),
+ show(S, Ports).
+
pps() ->
timer:sleep(100),
- {erlang:ports(), lists:filter(fun(P) -> erlang:is_process_alive(P) end,
- processes())}.
+ {port_list(), process_list()}.
+
+port_list() ->
+ [{P,safe_second_element(erlang:port_info(P, name))} ||
+ P <- erlang:ports()].
+
+process_list() ->
+ [{P,process_info(P, registered_name),
+ safe_second_element(process_info(P, initial_call))} ||
+ P <- processes(), erlang:is_process_alive(P)].
+
+proc_reg_name({registered_name, Name}) -> Name;
+proc_reg_name([]) -> no_reg_name.
+
+safe_second_element({_,Info}) -> Info;
+safe_second_element(Other) -> Other.
+
qlen() ->
{_, {_, N}} = lists:keysearch(message_queue_len, 1, process_info(self())),
diff --git a/lib/kernel/test/erl_distribution_SUITE.erl b/lib/kernel/test/erl_distribution_SUITE.erl
index 9c6712ad74..c3a022df0a 100644
--- a/lib/kernel/test/erl_distribution_SUITE.erl
+++ b/lib/kernel/test/erl_distribution_SUITE.erl
@@ -25,6 +25,7 @@
init_per_group/2,end_per_group/2]).
-export([tick/1, tick_change/1,
+ connect_node/1,
nodenames/1, hostnames/1,
illegal_nodenames/1, hidden_node/1,
setopts/1,
@@ -39,7 +40,8 @@
monitor_nodes_errors/1,
monitor_nodes_combinations/1,
monitor_nodes_cleanup/1,
- monitor_nodes_many/1]).
+ monitor_nodes_many/1,
+ dist_ctrl_proc_smoke/1]).
%% Performs the test at another node.
-export([get_socket_priorities/0,
@@ -51,7 +53,7 @@
-export([init_per_testcase/2, end_per_testcase/2]).
--export([start_node/2]).
+-export([dist_cntrlr_output_test/2]).
-export([pinger/1]).
@@ -66,10 +68,12 @@
suite() ->
[{ct_hooks,[ts_install_cth]},
- {timetrap,{minutes,4}}].
+ {timetrap,{minutes,12}}].
all() ->
- [tick, tick_change, nodenames, hostnames, illegal_nodenames,
+ [dist_ctrl_proc_smoke,
+ tick, tick_change, nodenames, hostnames, illegal_nodenames,
+ connect_node,
hidden_node, setopts,
table_waste, net_setuptime, inet_dist_options_options,
{group, monitor_nodes}].
@@ -87,6 +91,7 @@ init_per_suite(Config) ->
Config.
end_per_suite(_Config) ->
+ [slave:stop(N) || N <- nodes()],
ok.
init_per_group(_GroupName, Config) ->
@@ -106,11 +111,19 @@ init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
end_per_testcase(_Func, _Config) ->
ok.
+connect_node(Config) when is_list(Config) ->
+ Connected = nodes(connected),
+ true = net_kernel:connect_node(node()),
+ Connected = nodes(connected),
+ ok.
+
tick(Config) when is_list(Config) ->
- PaDir = filename:dirname(code:which(erl_distribution_SUITE)),
+ run_dist_configs(fun tick/2, Config).
+tick(DCfg, _Config) ->
%% First check that the normal case is OK!
- {ok, Node} = start_node(dist_test, "-pa " ++ PaDir),
+ [Name1, Name2] = get_nodenames(2, dist_test),
+ {ok, Node} = start_node(DCfg, Name1),
rpc:call(Node, erl_distribution_SUITE, tick_cli_test, [node()]),
erlang:monitor_node(Node, true),
@@ -134,14 +147,12 @@ tick(Config) when is_list(Config) ->
%% Set the ticktime on the server node to 100 secs so the server
%% node doesn't tick the client node within the interval ...
- {ok, ServNode} = start_node(dist_test_server,
- "-kernel net_ticktime 100 "
- "-pa " ++ PaDir),
+ {ok, ServNode} = start_node(DCfg, Name2,
+ "-kernel net_ticktime 100"),
rpc:call(ServNode, erl_distribution_SUITE, tick_serv_test, [Node, node()]),
- {ok, _} = start_node(dist_test,
- "-kernel net_ticktime 12 "
- "-pa " ++ PaDir),
+ {ok, Node} = start_node(DCfg, Name1,
+ "-kernel net_ticktime 12"),
rpc:call(Node, erl_distribution_SUITE, tick_cli_test, [ServNode]),
spawn_link(erl_distribution_SUITE, keep_conn, [Node]),
@@ -171,6 +182,9 @@ tick(Config) when is_list(Config) ->
%% Checks that pinging nonexistyent nodes does not waste space in distribution table.
table_waste(Config) when is_list(Config) ->
+ run_dist_configs(fun table_waste/2, Config).
+
+table_waste(DCfg, _Config) ->
{ok, HName} = inet:gethostname(),
F = fun(0,_F) -> [];
(N,F) ->
@@ -180,7 +194,7 @@ table_waste(Config) when is_list(Config) ->
F(N-1,F)
end,
F(256,F),
- {ok, N} = start_node(erl_distribution_300,""),
+ {ok, N} = start_node(DCfg, erl_distribution_300),
stop_node(N),
ok.
@@ -191,6 +205,9 @@ nodenames(Config) when is_list(Config) ->
legal("a-1@b"),
legal("a_1@b"),
+ %% Test that giving two -sname works as it should
+ test_node("a_1@b", false, long_or_short() ++ "a_0@b"),
+
illegal("cdé@a"),
illegal("te欢st@a").
@@ -244,8 +261,11 @@ illegal(Name) ->
test_node(Name) ->
test_node(Name, false).
test_node(Name, Illigal) ->
+ test_node(Name, Illigal, "").
+test_node(Name, Illigal, ExtraArgs) ->
ProgName = ct:get_progname(),
- Command = ProgName ++ " -noinput " ++ long_or_short() ++ Name ++
+ Command = ProgName ++ " -noinput " ++ ExtraArgs ++
+ long_or_short() ++ Name ++
" -eval \"net_adm:ping('" ++ atom_to_list(node()) ++ "')\"" ++
case Illigal of
true ->
@@ -286,13 +306,16 @@ gethostname() ->
%% Test that pinging an illegal nodename does not kill the node.
illegal_nodenames(Config) when is_list(Config) ->
- PaDir = filename:dirname(code:which(erl_distribution_SUITE)),
- {ok, Node}=start_node(illegal_nodenames, "-pa " ++ PaDir),
+ run_dist_configs(fun illegal_nodenames/2, Config).
+
+illegal_nodenames(DCfg, _Config) ->
+ {ok, Node}=start_node(DCfg, illegal_nodenames),
monitor_node(Node, true),
RPid=rpc:call(Node, erlang, spawn,
[?MODULE, pinger, [self()]]),
receive
{RPid, pinged} ->
+ monitor_node(Node, false),
ok;
{nodedown, Node} ->
ct:fail("Remote node died.")
@@ -309,22 +332,25 @@ pinger(Starter) ->
%% Test that you can set the net_setuptime properly.
net_setuptime(Config) when is_list(Config) ->
+ run_dist_configs(fun net_setuptime/2, Config).
+
+net_setuptime(DCfg, _Config) ->
+
%% In this test case, we reluctantly accept shorter times than the given
%% setup time, because the connection attempt can end in a
%% "Host unreachable" error before the timeout fires.
- Res0 = do_test_setuptime("2"),
+ Res0 = do_test_setuptime(DCfg, "2"),
io:format("Res0 = ~p", [Res0]),
true = (Res0 =< 4000),
- Res1 = do_test_setuptime("0.3"),
+ Res1 = do_test_setuptime(DCfg, "0.3"),
io:format("Res1 = ~p", [Res1]),
true = (Res1 =< 500),
ok.
-do_test_setuptime(Setuptime) when is_list(Setuptime) ->
- PaDir = filename:dirname(code:which(?MODULE)),
- {ok, Node} = start_node(dist_setuptime_test, "-pa " ++ PaDir ++
- " -kernel net_setuptime " ++ Setuptime),
+do_test_setuptime(DCfg, Setuptime) when is_list(Setuptime) ->
+ {ok, Node} = start_node(DCfg, dist_setuptime_test,
+ "-kernel net_setuptime " ++ Setuptime),
Res = rpc:call(Node,?MODULE,time_ping,[?DUMMY_NODE]),
stop_node(Node),
Res.
@@ -390,32 +416,36 @@ tick_cli_test1(Node) ->
end.
setopts(Config) when is_list(Config) ->
+ run_dist_configs(fun setopts/2, Config).
+
+setopts(DCfg, _Config) ->
register(setopts_regname, self()),
[N1,N2,N3,N4] = get_nodenames(4, setopts),
- {_N1F,Port1} = start_node_unconnected(N1, ?MODULE, run_remote_test,
+ {_N1F,Port1} = start_node_unconnected(DCfg, N1, ?MODULE, run_remote_test,
["setopts_do", atom_to_list(node()), "1", "ping"]),
0 = wait_for_port_exit(Port1),
- {_N2F,Port2} = start_node_unconnected(N2, ?MODULE, run_remote_test,
+ {_N2F,Port2} = start_node_unconnected(DCfg, N2, ?MODULE, run_remote_test,
["setopts_do", atom_to_list(node()), "2", "ping"]),
0 = wait_for_port_exit(Port2),
{ok, LSock} = gen_tcp:listen(0, [{packet,2}, {active,false}]),
{ok, LTcpPort} = inet:port(LSock),
- {N3F,Port3} = start_node_unconnected(N3, ?MODULE, run_remote_test,
+ {N3F,Port3} = start_node_unconnected(DCfg, N3, ?MODULE, run_remote_test,
["setopts_do", atom_to_list(node()),
"1", integer_to_list(LTcpPort)]),
wait_and_connect(LSock, N3F, Port3),
0 = wait_for_port_exit(Port3),
- {N4F,Port4} = start_node_unconnected(N4, ?MODULE, run_remote_test,
+ {N4F,Port4} = start_node_unconnected(DCfg, N4, ?MODULE, run_remote_test,
["setopts_do", atom_to_list(node()),
"2", integer_to_list(LTcpPort)]),
wait_and_connect(LSock, N4F, Port4),
0 = wait_for_port_exit(Port4),
+ unregister(setopts_regname),
ok.
wait_and_connect(LSock, NodeName, NodePort) ->
@@ -509,9 +539,9 @@ opt_from_nr("2") -> {nodelay, false}.
change_val(true) -> false;
change_val(false) -> true.
-start_node_unconnected(Name, Mod, Func, Args) ->
+start_node_unconnected(DCfg, Name, Mod, Func, Args) ->
FullName = full_node_name(Name),
- CmdLine = mk_node_cmdline(Name,Mod,Func,Args),
+ CmdLine = mk_node_cmdline(DCfg, Name,Mod,Func,Args),
io:format("Starting node ~p: ~s~n", [FullName, CmdLine]),
case open_port({spawn, CmdLine}, [exit_status]) of
Port when is_port(Port) ->
@@ -525,7 +555,7 @@ full_node_name(PreName) ->
atom_to_list(node())),
list_to_atom(atom_to_list(PreName) ++ HostSuffix).
-mk_node_cmdline(Name,Mod,Func,Args) ->
+mk_node_cmdline(DCfg, Name,Mod,Func,Args) ->
Static = "-noinput",
Pa = filename:dirname(code:which(?MODULE)),
Prog = case catch init:get_argument(progname) of
@@ -542,6 +572,7 @@ mk_node_cmdline(Name,Mod,Func,Args) ->
Prog ++ " "
++ Static ++ " "
++ NameSw ++ " " ++ NameStr
+ ++ " " ++ DCfg
++ " -pa " ++ Pa
++ " -env ERL_CRASH_DUMP " ++ Pwd ++ "/erl_crash_dump." ++ NameStr
++ " -setcookie " ++ atom_to_list(erlang:get_cookie())
@@ -551,7 +582,9 @@ mk_node_cmdline(Name,Mod,Func,Args) ->
%% OTP-4255.
tick_change(Config) when is_list(Config) ->
- PaDir = filename:dirname(code:which(?MODULE)),
+ run_dist_configs(fun tick_change/2, Config).
+
+tick_change(DCfg, _Config) ->
[BN, CN] = get_nodenames(2, tick_change),
DefaultTT = net_kernel:get_net_ticktime(),
unchanged = net_kernel:set_net_ticktime(DefaultTT, 60),
@@ -568,14 +601,13 @@ tick_change(Config) when is_list(Config) ->
end,
wait_until(fun () -> 10 == net_kernel:get_net_ticktime() end),
- {ok, B} = start_node(BN, "-kernel net_ticktime 10 -pa " ++ PaDir),
- {ok, C} = start_node(CN, "-kernel net_ticktime 10 -hidden -pa "
- ++ PaDir),
+ {ok, B} = start_node(DCfg, BN, "-kernel net_ticktime 10"),
+ {ok, C} = start_node(DCfg, CN, "-kernel net_ticktime 10 -hidden"),
OTE = process_flag(trap_exit, true),
case catch begin
- run_tick_change_test(B, C, 10, 1, PaDir),
- run_tick_change_test(B, C, 1, 10, PaDir)
+ run_tick_change_test(DCfg, B, C, 10, 1),
+ run_tick_change_test(DCfg, B, C, 1, 10)
end of
{'EXIT', Reason} ->
stop_node(B),
@@ -617,7 +649,7 @@ wait_for_nodedowns(Tester, Ref) ->
end,
wait_for_nodedowns(Tester, Ref).
-run_tick_change_test(B, C, PrevTT, TT, PaDir) ->
+run_tick_change_test(DCfg, B, C, PrevTT, TT) ->
[DN, EN] = get_nodenames(2, tick_change),
Tester = self(),
@@ -631,8 +663,8 @@ run_tick_change_test(B, C, PrevTT, TT, PaDir) ->
wait_for_nodedowns(Tester, Ref)
end,
- {ok, D} = start_node(DN, "-kernel net_ticktime "
- ++ integer_to_list(PrevTT) ++ " -pa " ++ PaDir),
+ {ok, D} = start_node(DCfg, DN, "-kernel net_ticktime "
+ ++ integer_to_list(PrevTT)),
NMA = spawn_link(fun () -> MonitorNodes([B, C, D]) end),
NMB = spawn_link(B, fun () -> MonitorNodes([node(), C, D]) end),
@@ -665,8 +697,8 @@ run_tick_change_test(B, C, PrevTT, TT, PaDir) ->
sleep(7),
change_initiated = rpc:call(C,net_kernel,set_net_ticktime,[TT,10]),
- {ok, E} = start_node(EN, "-kernel net_ticktime "
- ++ integer_to_list(TT) ++ " -pa " ++ PaDir),
+ {ok, E} = start_node(DCfg, EN, "-kernel net_ticktime "
+ ++ integer_to_list(TT)),
NME = spawn_link(E, fun () -> MonitorNodes([node(), B, C, D]) end),
NMA2 = spawn_link(fun () -> MonitorNodes([E]) end),
NMB2 = spawn_link(B, fun () -> MonitorNodes([E]) end),
@@ -726,12 +758,13 @@ run_tick_change_test(B, C, PrevTT, TT, PaDir) ->
%%
%% Basic test of hidden node.
hidden_node(Config) when is_list(Config) ->
- PaDir = filename:dirname(code:which(?MODULE)),
- VArgs = "-pa " ++ PaDir,
- HArgs = "-hidden -pa " ++ PaDir,
- {ok, V} = start_node(visible_node, VArgs),
+ run_dist_configs(fun hidden_node/2, Config).
+
+hidden_node(DCfg, _Config) ->
+ HArgs = "-hidden",
+ {ok, V} = start_node(DCfg, visible_node),
VMN = start_monitor_nodes_proc(V),
- {ok, H} = start_node(hidden_node, HArgs),
+ {ok, H} = start_node(DCfg, hidden_node, HArgs),
%% Connect visible_node -> hidden_node
connect_nodes(V, H),
test_nodes(V, H),
@@ -739,9 +772,9 @@ hidden_node(Config) when is_list(Config) ->
sleep(5),
check_monitor_nodes_res(VMN, H),
stop_node(V),
- {ok, H} = start_node(hidden_node, HArgs),
+ {ok, H} = start_node(DCfg, hidden_node, HArgs),
HMN = start_monitor_nodes_proc(H),
- {ok, V} = start_node(visible_node, VArgs),
+ {ok, V} = start_node(DCfg, visible_node),
%% Connect hidden_node -> visible_node
connect_nodes(H, V),
test_nodes(V, H),
@@ -841,9 +874,9 @@ do_inet_dist_options_options(Prio) ->
"-kernel inet_dist_connect_options "++PriorityString++" "
"-kernel inet_dist_listen_options "++PriorityString,
{ok,Node1} =
- start_node(inet_dist_options_1, InetDistOptions),
+ start_node("", inet_dist_options_1, InetDistOptions),
{ok,Node2} =
- start_node(inet_dist_options_2, InetDistOptions),
+ start_node("", inet_dist_options_2, InetDistOptions),
%%
pong =
rpc:call(Node1, net_adm, ping, [Node2]),
@@ -876,6 +909,9 @@ get_socket_priorities() ->
%%
monitor_nodes_nodedown_reason(Config) when is_list(Config) ->
+ run_dist_configs(fun monitor_nodes_nodedown_reason/2, Config).
+
+monitor_nodes_nodedown_reason(DCfg, _Config) ->
MonNodeState = monitor_node_state(),
ok = net_kernel:monitor_nodes(true),
ok = net_kernel:monitor_nodes(true, [nodedown_reason]),
@@ -883,10 +919,10 @@ monitor_nodes_nodedown_reason(Config) when is_list(Config) ->
Names = get_numbered_nodenames(5, node),
[NN1, NN2, NN3, NN4, NN5] = Names,
- {ok, N1} = start_node(NN1),
- {ok, N2} = start_node(NN2),
- {ok, N3} = start_node(NN3),
- {ok, N4} = start_node(NN4, "-hidden"),
+ {ok, N1} = start_node(DCfg, NN1),
+ {ok, N2} = start_node(DCfg, NN2),
+ {ok, N3} = start_node(DCfg, NN3),
+ {ok, N4} = start_node(DCfg, NN4, "-hidden"),
receive {nodeup, N1} -> ok end,
receive {nodeup, N2} -> ok end,
@@ -916,7 +952,7 @@ monitor_nodes_nodedown_reason(Config) when is_list(Config) ->
ok = net_kernel:monitor_nodes(false, [nodedown_reason]),
- {ok, N5} = start_node(NN5),
+ {ok, N5} = start_node(DCfg, NN5),
stop_node(N5),
receive {nodeup, N5} -> ok end,
@@ -929,11 +965,14 @@ monitor_nodes_nodedown_reason(Config) when is_list(Config) ->
monitor_nodes_complex_nodedown_reason(Config) when is_list(Config) ->
+ run_dist_configs(fun monitor_nodes_complex_nodedown_reason/2, Config).
+
+monitor_nodes_complex_nodedown_reason(DCfg, _Config) ->
MonNodeState = monitor_node_state(),
Me = self(),
ok = net_kernel:monitor_nodes(true, [nodedown_reason]),
[Name] = get_nodenames(1, monitor_nodes_complex_nodedown_reason),
- {ok, Node} = start_node(Name, ""),
+ {ok, Node} = start_node(DCfg, Name, ""),
Pid = spawn(Node,
fun() ->
Me ! {stuff,
@@ -972,16 +1011,19 @@ monitor_nodes_complex_nodedown_reason(Config) when is_list(Config) ->
%%
monitor_nodes_node_type(Config) when is_list(Config) ->
+ run_dist_configs(fun monitor_nodes_node_type/2, Config).
+
+monitor_nodes_node_type(DCfg, _Config) ->
MonNodeState = monitor_node_state(),
ok = net_kernel:monitor_nodes(true),
ok = net_kernel:monitor_nodes(true, [{node_type, all}]),
Names = get_numbered_nodenames(9, node),
[NN1, NN2, NN3, NN4, NN5, NN6, NN7, NN8, NN9] = Names,
- {ok, N1} = start_node(NN1),
- {ok, N2} = start_node(NN2),
- {ok, N3} = start_node(NN3, "-hidden"),
- {ok, N4} = start_node(NN4, "-hidden"),
+ {ok, N1} = start_node(DCfg, NN1),
+ {ok, N2} = start_node(DCfg, NN2),
+ {ok, N3} = start_node(DCfg, NN3, "-hidden"),
+ {ok, N4} = start_node(DCfg, NN4, "-hidden"),
receive {nodeup, N1} -> ok end,
receive {nodeup, N2} -> ok end,
@@ -1005,15 +1047,15 @@ monitor_nodes_node_type(Config) when is_list(Config) ->
receive {nodedown, N4, [{node_type, hidden}]} -> ok end,
ok = net_kernel:monitor_nodes(false, [{node_type, all}]),
- {ok, N5} = start_node(NN5),
+ {ok, N5} = start_node(DCfg, NN5),
receive {nodeup, N5} -> ok end,
stop_node(N5),
receive {nodedown, N5} -> ok end,
ok = net_kernel:monitor_nodes(true, [{node_type, hidden}]),
- {ok, N6} = start_node(NN6),
- {ok, N7} = start_node(NN7, "-hidden"),
+ {ok, N6} = start_node(DCfg, NN6),
+ {ok, N7} = start_node(DCfg, NN7, "-hidden"),
receive {nodeup, N6} -> ok end,
@@ -1028,8 +1070,8 @@ monitor_nodes_node_type(Config) when is_list(Config) ->
ok = net_kernel:monitor_nodes(false, [{node_type, hidden}]),
ok = net_kernel:monitor_nodes(false),
- {ok, N8} = start_node(NN8),
- {ok, N9} = start_node(NN9, "-hidden"),
+ {ok, N8} = start_node(DCfg, NN8),
+ {ok, N9} = start_node(DCfg, NN9, "-hidden"),
receive {nodeup, N8, [{node_type, visible}]} -> ok end,
stop_node(N8),
@@ -1049,6 +1091,9 @@ monitor_nodes_node_type(Config) when is_list(Config) ->
%%
monitor_nodes_misc(Config) when is_list(Config) ->
+ run_dist_configs(fun monitor_nodes_misc/2, Config).
+
+monitor_nodes_misc(DCfg, _Config) ->
MonNodeState = monitor_node_state(),
ok = net_kernel:monitor_nodes(true),
ok = net_kernel:monitor_nodes(true, [{node_type, all}, nodedown_reason]),
@@ -1056,8 +1101,8 @@ monitor_nodes_misc(Config) when is_list(Config) ->
Names = get_numbered_nodenames(3, node),
[NN1, NN2, NN3] = Names,
- {ok, N1} = start_node(NN1),
- {ok, N2} = start_node(NN2, "-hidden"),
+ {ok, N1} = start_node(DCfg, NN1),
+ {ok, N2} = start_node(DCfg, NN2, "-hidden"),
receive {nodeup, N1} -> ok end,
@@ -1083,7 +1128,7 @@ monitor_nodes_misc(Config) when is_list(Config) ->
ok = net_kernel:monitor_nodes(false, [{node_type, all}, nodedown_reason]),
- {ok, N3} = start_node(NN3),
+ {ok, N3} = start_node(DCfg, NN3),
receive {nodeup, N3} -> ok end,
stop_node(N3),
receive {nodedown, N3} -> ok end,
@@ -1098,15 +1143,18 @@ monitor_nodes_misc(Config) when is_list(Config) ->
%% messages from Node and that {nodedown, Node} messages are
%% received after messages from Node.
monitor_nodes_otp_6481(Config) when is_list(Config) ->
+ run_dist_configs(fun monitor_nodes_otp_6481/2, Config).
+
+monitor_nodes_otp_6481(DCfg, Config) ->
io:format("Testing nodedown...~n"),
- monitor_nodes_otp_6481_test(Config, nodedown),
+ monitor_nodes_otp_6481_test(DCfg, Config, nodedown),
io:format("ok~n"),
io:format("Testing nodeup...~n"),
- monitor_nodes_otp_6481_test(Config, nodeup),
+ monitor_nodes_otp_6481_test(DCfg, Config, nodeup),
io:format("ok~n"),
ok.
-monitor_nodes_otp_6481_test(Config, TestType) when is_list(Config) ->
+monitor_nodes_otp_6481_test(DCfg, Config, TestType) when is_list(Config) ->
MonNodeState = monitor_node_state(),
NodeMsg = make_ref(),
Me = self(),
@@ -1155,7 +1203,7 @@ monitor_nodes_otp_6481_test(Config, TestType) when is_list(Config) ->
end
++ MonNodeState,
- {ok, Node} = start_node(Name, "", this),
+ {ok, Node} = start_node(DCfg, Name, "", this),
receive {nodeup, Node} -> ok end,
RemotePid = spawn(Node,
@@ -1240,17 +1288,20 @@ monitor_nodes_errors(Config) when is_list(Config) ->
ok.
monitor_nodes_combinations(Config) when is_list(Config) ->
+ run_dist_configs(fun monitor_nodes_combinations/2, Config).
+
+monitor_nodes_combinations(DCfg, _Config) ->
MonNodeState = monitor_node_state(),
monitor_nodes_all_comb(true),
[VisibleName, HiddenName] = get_nodenames(2,
monitor_nodes_combinations),
- {ok, Visible} = start_node(VisibleName, ""),
+ {ok, Visible} = start_node(DCfg, VisibleName, ""),
receive_all_comb_nodeup_msgs(visible, Visible),
no_msgs(),
stop_node(Visible),
receive_all_comb_nodedown_msgs(visible, Visible, connection_closed),
no_msgs(),
- {ok, Hidden} = start_node(HiddenName, "-hidden"),
+ {ok, Hidden} = start_node(DCfg, HiddenName, "-hidden"),
receive_all_comb_nodeup_msgs(hidden, Hidden),
no_msgs(),
stop_node(Hidden),
@@ -1386,6 +1437,9 @@ monitor_nodes_cleanup(Config) when is_list(Config) ->
ok.
monitor_nodes_many(Config) when is_list(Config) ->
+ run_dist_configs(fun monitor_nodes_many/2, Config).
+
+monitor_nodes_many(DCfg, _Config) ->
MonNodeState = monitor_node_state(),
[Name] = get_nodenames(1, monitor_nodes_many),
%% We want to perform more than 2^16 net_kernel:monitor_nodes
@@ -1393,7 +1447,7 @@ monitor_nodes_many(Config) when is_list(Config) ->
No = (1 bsl 16) + 17,
repeat(fun () -> ok = net_kernel:monitor_nodes(true) end, No),
No = length(monitor_node_state()) - length(MonNodeState),
- {ok, Node} = start_node(Name),
+ {ok, Node} = start_node(DCfg, Name),
repeat(fun () -> receive {nodeup, Node} -> ok end end, No),
stop_node(Node),
repeat(fun () -> receive {nodedown, Node} -> ok end end, No),
@@ -1402,8 +1456,118 @@ monitor_nodes_many(Config) when is_list(Config) ->
MonNodeState = monitor_node_state(),
ok.
+dist_ctrl_proc_smoke(Config) when is_list(Config) ->
+ ThisNode = node(),
+ [Name1, Name2] = get_nodenames(2, dist_ctrl_proc_example_smoke),
+ GetSizeArg = " -gen_tcp_dist_output_loop "
+ ++ atom_to_list(?MODULE) ++ " "
+ ++ "dist_cntrlr_output_test",
+ {ok, Node1} = start_node("", Name1, "-proto_dist gen_tcp"),
+ {ok, Node2} = start_node("", Name2, "-proto_dist gen_tcp" ++ GetSizeArg),
+ pong = rpc:call(Node1, net_adm, ping, [Node2]),
+ NL1 = lists:sort([ThisNode, Node2]),
+ NL2 = lists:sort([ThisNode, Node1]),
+ NL1 = lists:sort(rpc:call(Node1, erlang, nodes, [])),
+ NL2 = lists:sort(rpc:call(Node2, erlang, nodes, [])),
+
+ %% Verify that we actually are executing the distribution
+ %% module we expect and also massage message passing over
+ %% it a bit...
+ Ps1 = rpc:call(Node1, erlang, processes, []),
+ try
+ lists:foreach(
+ fun (P) ->
+ case rpc:call(Node1, erlang, process_info, [P, current_stacktrace]) of
+ undefined ->
+ ok;
+ {current_stacktrace, StkTrace} ->
+ lists:foreach(fun ({gen_tcp_dist,
+ dist_cntrlr_output_loop,
+ 2, _}) ->
+ io:format("~p ~p~n", [P, StkTrace]),
+ throw(found_it);
+ (_) ->
+ ok
+ end, StkTrace)
+ end
+ end, Ps1),
+ exit({missing, dist_cntrlr_output_loop})
+ catch
+ throw:found_it -> ok
+ end,
+
+ Ps2 = rpc:call(Node2, erlang, processes, []),
+ try
+ lists:foreach(
+ fun (P) ->
+ case rpc:call(Node2, erlang, process_info, [P, current_stacktrace]) of
+ undefined ->
+ ok;
+ {current_stacktrace, StkTrace} ->
+ lists:foreach(fun ({erl_distribution_SUITE,
+ dist_cntrlr_output_loop,
+ 2, _}) ->
+ io:format("~p ~p~n", [P, StkTrace]),
+ throw(found_it);
+ (_) ->
+ ok
+ end, StkTrace)
+ end
+ end, Ps2),
+ exit({missing, dist_cntrlr_output_loop})
+ catch
+ throw:found_it -> ok
+ end,
+
+ stop_node(Node1),
+ stop_node(Node2),
+ ok.
+
%% Misc. functions
+run_dist_configs(Func, Config) ->
+ GetSizeArg = " -gen_tcp_dist_output_loop "
+ ++ atom_to_list(?MODULE) ++ " "
+ ++ "dist_cntrlr_output_test",
+ lists:map(fun ({DCfgName, DCfg}) ->
+ io:format("~n~n=== Running ~s configuration ===~n~n",
+ [DCfgName]),
+ Func(DCfg, Config)
+ end,
+ [{"default", ""},
+ {"gen_tcp_dist", "-proto_dist gen_tcp"},
+ {"gen_tcp_dist (get_size)", "-proto_dist gen_tcp" ++ GetSizeArg}]).
+
+dist_cntrlr_output_test(DHandle, Socket) ->
+ false = erlang:dist_ctrl_get_opt(DHandle, get_size),
+ false = erlang:dist_ctrl_set_opt(DHandle, get_size, true),
+ true = erlang:dist_ctrl_get_opt(DHandle, get_size),
+ true = erlang:dist_ctrl_set_opt(DHandle, get_size, false),
+ false = erlang:dist_ctrl_get_opt(DHandle, get_size),
+ false = erlang:dist_ctrl_set_opt(DHandle, get_size, true),
+ true = erlang:dist_ctrl_get_opt(DHandle, get_size),
+ dist_cntrlr_output_loop(DHandle, Socket).
+
+dist_cntrlr_send_data(DHandle, Socket) ->
+ case erlang:dist_ctrl_get_data(DHandle) of
+ none ->
+ erlang:dist_ctrl_get_data_notification(DHandle);
+ {Size, Data} ->
+ Size = erlang:iolist_size(Data),
+ ok = gen_tcp:send(Socket, Data),
+ dist_cntrlr_send_data(DHandle, Socket)
+ end.
+
+dist_cntrlr_output_loop(DHandle, Socket) ->
+ receive
+ dist_data ->
+ %% Outgoing data from this node...
+ dist_cntrlr_send_data(DHandle, Socket);
+ _ ->
+ ok %% Drop garbage message...
+ end,
+ dist_cntrlr_output_loop(DHandle, Socket).
+
monitor_node_state() ->
erts_debug:set_internal_state(available_internal_state, true),
MonitoringNodes = erts_debug:get_internal_state(monitoring_nodes),
@@ -1429,25 +1593,25 @@ print_my_messages() ->
sleep(T) -> receive after T * 1000 -> ok end.
-start_node(Name, Param, this) ->
+start_node(DCfg, Name, Param, this) ->
NewParam = Param ++ " -pa " ++ filename:dirname(code:which(?MODULE)),
test_server:start_node(Name, peer, [{args, NewParam}, {erl, [this]}]);
-start_node(Name, Param, "this") ->
- NewParam = Param ++ " -pa " ++ filename:dirname(code:which(?MODULE)),
+start_node(DCfg, Name, Param, "this") ->
+ NewParam = Param ++ " -pa " ++ filename:dirname(code:which(?MODULE)) ++ " " ++ DCfg,
test_server:start_node(Name, peer, [{args, NewParam}, {erl, [this]}]);
-start_node(Name, Param, Rel) when is_atom(Rel) ->
- NewParam = Param ++ " -pa " ++ filename:dirname(code:which(?MODULE)),
+start_node(DCfg, Name, Param, Rel) when is_atom(Rel) ->
+ NewParam = Param ++ " -pa " ++ filename:dirname(code:which(?MODULE)) ++ " " ++ DCfg,
test_server:start_node(Name, peer, [{args, NewParam}, {erl, [{release, atom_to_list(Rel)}]}]);
-start_node(Name, Param, Rel) when is_list(Rel) ->
- NewParam = Param ++ " -pa " ++ filename:dirname(code:which(?MODULE)),
+start_node(DCfg, Name, Param, Rel) when is_list(Rel) ->
+ NewParam = Param ++ " -pa " ++ filename:dirname(code:which(?MODULE)) ++ " " ++ DCfg,
test_server:start_node(Name, peer, [{args, NewParam}, {erl, [{release, Rel}]}]).
-start_node(Name, Param) ->
- NewParam = Param ++ " -pa " ++ filename:dirname(code:which(?MODULE)),
+start_node(DCfg, Name, Param) ->
+ NewParam = Param ++ " -pa " ++ filename:dirname(code:which(?MODULE)) ++ " " ++ DCfg,
test_server:start_node(Name, slave, [{args, NewParam}]).
-start_node(Name) ->
- start_node(Name, "").
+start_node(DCfg, Name) ->
+ start_node(DCfg, Name, "").
stop_node(Node) ->
test_server:stop_node(Node).
diff --git a/lib/kernel/test/file_SUITE.erl b/lib/kernel/test/file_SUITE.erl
index 23913ac56a..3bc8e6e828 100644
--- a/lib/kernel/test/file_SUITE.erl
+++ b/lib/kernel/test/file_SUITE.erl
@@ -100,7 +100,7 @@
-export([unicode_mode/1]).
--export([volume_relative_paths/1]).
+-export([volume_relative_paths/1,unc_paths/1]).
-export([tiny_writes/1, tiny_writes_delayed/1,
large_writes/1, large_writes_delayed/1,
@@ -129,7 +129,7 @@ suite() ->
all() ->
[unicode, altname, read_write_file, {group, dirs},
- {group, files}, delete, rename, names, volume_relative_paths,
+ {group, files}, delete, rename, names, volume_relative_paths, unc_paths,
{group, errors}, {group, compression}, {group, links}, copy,
delayed_write, read_ahead, segment_read, segment_write,
ipread, pid2name, interleaved_read_write, otp_5814, otp_10852,
@@ -1367,6 +1367,10 @@ file_info_basic_file(Config) when is_list(Config) ->
io:put_chars(Fd1, "foo bar"),
ok = ?FILE_MODULE:close(Fd1),
+ %% Don't crash the file server when passing incorrect arguments.
+ {error,badarg} = ?FILE_MODULE:read_file_info(Name, [{time, gurka}]),
+ {error,badarg} = ?FILE_MODULE:read_file_info([#{} | gaffel]),
+
%% Test that the file has the expected attributes.
%% The times are tricky, so we will save them to a separate test case.
{ok,FileInfo} = ?FILE_MODULE:read_file_info(Name),
@@ -2178,6 +2182,33 @@ volume_relative_paths(Config) when is_list(Config) ->
{skip, "This test is Windows-specific."}
end.
+unc_paths(Config) when is_list(Config) ->
+ case os:type() of
+ {win32, _} ->
+ %% We assume administrative shares are set up and reachable, and we
+ %% settle for testing presence as some of the returned data is
+ %% different.
+ {ok, _} = file:read_file_info("C:\\Windows\\explorer.exe"),
+ {ok, _} = file:read_file_info("\\\\localhost\\c$\\Windows\\explorer.exe"),
+
+ {ok, Files} = file:list_dir("C:\\Windows\\"),
+ {ok, Files} = file:list_dir("\\\\localhost\\c$\\Windows\\"),
+
+ {ok, Cwd} = file:get_cwd(),
+
+ try
+ ok = file:set_cwd("\\\\localhost\\c$\\Windows\\"),
+ {ok, _} = file:read_file_info("explorer.exe")
+ after
+ file:set_cwd(Cwd)
+ end,
+
+ [] = flush(),
+ ok;
+ _ ->
+ {skip, "This test is Windows-specific."}
+ end.
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -2206,7 +2237,8 @@ e_delete(Config) when is_list(Config) ->
case os:type() of
{win32, _} ->
%% Remove a character device.
- {error, eacces} = ?FILE_MODULE:delete("nul");
+ expect({error, eacces}, {error, einval},
+ ?FILE_MODULE:delete("nul"));
_ ->
?FILE_MODULE:write_file_info(
Base, #file_info {mode=0}),
@@ -3712,19 +3744,33 @@ otp_10852(Config) when is_list(Config) ->
ok = rpc_call(Node, read_file, [B]),
ok = rpc_call(Node, make_link, [B,B]),
case rpc_call(Node, make_symlink, [B,B]) of
- ok -> ok;
- {error, E} when (E =:= enotsup) or (E =:= eperm) ->
- {win32,_} = os:type()
+ {error, eilseq} ->
+ %% Some versions of OS X refuse to create files with illegal names.
+ {unix,darwin} = os:type();
+ {error, eperm} ->
+ %% The test user might not have permission to create symlinks.
+ {win32,_} = os:type();
+ ok ->
+ ok
end,
ok = rpc_call(Node, delete, [B]),
- ok = rpc_call(Node, make_dir, [B]),
+ case rpc_call(Node, make_dir, [B]) of
+ {error, eilseq} ->
+ {unix,darwin} = os:type();
+ ok ->
+ ok
+ end,
ok = rpc_call(Node, del_dir, [B]),
- ok = rpc_call(Node, write_file, [B,B]),
- {ok, Fd} = rpc_call(Node, open, [B,[read]]),
- ok = rpc_call(Node, close, [Fd]),
- {ok,0} = rpc_call(Node, copy, [B,B]),
- {ok, Fd2, B} = rpc_call(Node, path_open, [["."], B, [read]]),
- ok = rpc_call(Node, close, [Fd2]),
+ case rpc_call(Node, write_file, [B,B]) of
+ {error, eilseq} ->
+ {unix,darwin} = os:type();
+ ok ->
+ {ok, Fd} = rpc_call(Node, open, [B,[read]]),
+ ok = rpc_call(Node, close, [Fd]),
+ {ok,0} = rpc_call(Node, copy, [B,B]),
+ {ok, Fd2, B} = rpc_call(Node, path_open, [["."], B, [read]]),
+ ok = rpc_call(Node, close, [Fd2])
+ end,
true = test_server:stop_node(Node),
ok.
@@ -4468,15 +4514,18 @@ run_large_file_test(Config, Run, Name) ->
{{unix,sunos},OsVersion} when OsVersion < {5,5,1} ->
{skip,"Only supported on Win32, Unix or SunOS >= 5.5.1"};
{{unix,_},_} ->
- N = disc_free(proplists:get_value(priv_dir, Config)),
- io:format("Free disk: ~w KByte~n", [N]),
- if N < 5 * (1 bsl 20) ->
- %% Less than 5 GByte free
- {skip,"Less than 5 GByte free"};
- true ->
- do_run_large_file_test(Config, Run, Name)
- end;
- _ ->
+ case disc_free(proplists:get_value(priv_dir, Config)) of
+ error ->
+ {skip, "Failed to query disk space for priv_dir. "
+ "Is it on a remote file system?~n"};
+ N when N >= 5 * (1 bsl 20) ->
+ ct:pal("Free disk: ~w KByte~n", [N]),
+ do_run_large_file_test(Config, Run, Name);
+ N when N < 5 * (1 bsl 20) ->
+ ct:pal("Free disk: ~w KByte~n", [N]),
+ {skip,"Less than 5 GByte free"}
+ end;
+ _ ->
{skip,"Only supported on Win32, Unix or SunOS >= 5.5.1"}
end.
@@ -4510,12 +4559,18 @@ do_run_large_file_test(Config, Run, Name0) ->
disc_free(Path) ->
Data = disksup:get_disk_data(),
- {_,Tot,Perc} = hd(lists:filter(
- fun({P,_Size,_Full}) ->
- lists:prefix(filename:nativename(P),
- filename:nativename(Path))
- end, lists:reverse(lists:sort(Data)))),
- round(Tot * (1-(Perc/100))).
+
+ %% What partitions could Data be mounted on?
+ Partitions =
+ [D || {P, _Tot, _Perc}=D <- Data,
+ lists:prefix(filename:nativename(P), filename:nativename(Path))],
+
+ %% Sorting in descending order places the partition with the most specific
+ %% path first.
+ case lists:sort(fun erlang:'>='/2, Partitions) of
+ [{_,Tot, Perc} | _] -> round(Tot * (1-(Perc/100)));
+ [] -> error
+ end.
memsize() ->
{Tot,_Used,_} = memsup:get_memory_data(),
diff --git a/lib/kernel/test/file_name_SUITE.erl b/lib/kernel/test/file_name_SUITE.erl
index 3afc647081..26cfd187c7 100644
--- a/lib/kernel/test/file_name_SUITE.erl
+++ b/lib/kernel/test/file_name_SUITE.erl
@@ -632,10 +632,13 @@ make_icky_dir(Mod, IckyDirName) ->
hopeless_darwin() ->
case {os:type(),os:version()} of
- {{unix,darwin},{Major,_,_}} when Major < 9 ->
- true;
- _ ->
- false
+ {{unix,darwin},{Major,_,_}} ->
+ %% icky file names worked between 10 and 17, but started returning
+ %% EILSEQ in 18. The check against 18 is exact in case newer
+ %% versions of Darwin support them again.
+ Major < 9 orelse Major =:= 18;
+ _ ->
+ false
end.
make_very_icky_dir(Mod, DirName) ->
diff --git a/lib/kernel/test/gen_sctp_SUITE.erl b/lib/kernel/test/gen_sctp_SUITE.erl
index a0ae792ba9..e4c489bd10 100644
--- a/lib/kernel/test/gen_sctp_SUITE.erl
+++ b/lib/kernel/test/gen_sctp_SUITE.erl
@@ -1459,11 +1459,11 @@ do_open_and_connect(ServerAddresses, AddressToConnectTo) ->
do_open_and_connect(ServerAddresses, AddressToConnectTo, Fun).
%%
do_open_and_connect(ServerAddresses, AddressToConnectTo, Fun) ->
- ServerFamily = get_family_by_addrs(ServerAddresses),
+ {ServerFamily, ServerOpts} = get_family_by_addrs(ServerAddresses),
io:format("Serving ~p addresses: ~p~n",
[ServerFamily, ServerAddresses]),
S1 = ok(gen_sctp:open(0, [{ip,Addr} || Addr <- ServerAddresses] ++
- [ServerFamily])),
+ [ServerFamily|ServerOpts])),
ok = gen_sctp:listen(S1, true),
P1 = ok(inet:port(S1)),
ClientFamily = get_family_by_addr(AddressToConnectTo),
@@ -1493,9 +1493,9 @@ do_open_and_connect(ServerAddresses, AddressToConnectTo, Fun) ->
%% If at least one of the addresses is an ipv6 address, return inet6, else inet.
get_family_by_addrs(Addresses) ->
case lists:usort([get_family_by_addr(Addr) || Addr <- Addresses]) of
- [inet, inet6] -> inet6;
- [inet] -> inet;
- [inet6] -> inet6
+ [inet, inet6] -> {inet6, [{ipv6_v6only, false}]};
+ [inet] -> {inet, []};
+ [inet6] -> {inet6, []}
end.
get_family_by_addr(Addr) when tuple_size(Addr) =:= 4 -> inet;
diff --git a/lib/kernel/test/gen_tcp_api_SUITE_data/gen_tcp_api_SUITE.c b/lib/kernel/test/gen_tcp_api_SUITE_data/gen_tcp_api_SUITE.c
index b91dca61d4..96938f9071 100644
--- a/lib/kernel/test/gen_tcp_api_SUITE_data/gen_tcp_api_SUITE.c
+++ b/lib/kernel/test/gen_tcp_api_SUITE_data/gen_tcp_api_SUITE.c
@@ -30,6 +30,7 @@
#define sock_close(s) closesocket(s)
#else
#include <sys/socket.h>
+#include <unistd.h>
#define sock_close(s) close(s)
#endif
diff --git a/lib/kernel/test/gen_tcp_misc_SUITE.erl b/lib/kernel/test/gen_tcp_misc_SUITE.erl
index 950f5bea6f..edf30448c4 100644
--- a/lib/kernel/test/gen_tcp_misc_SUITE.erl
+++ b/lib/kernel/test/gen_tcp_misc_SUITE.erl
@@ -41,6 +41,7 @@
busy_send/1, busy_disconnect_passive/1, busy_disconnect_active/1,
fill_sendq/1, partial_recv_and_close/1,
partial_recv_and_close_2/1,partial_recv_and_close_3/1,so_priority/1,
+ recvtos/1, recvttl/1, recvtosttl/1, recvtclass/1,
%% Accept tests
primitive_accept/1,multi_accept_close_listen/1,accept_timeout/1,
accept_timeouts_in_order/1,accept_timeouts_in_order2/1,
@@ -51,7 +52,8 @@
several_accepts_in_one_go/1, accept_system_limit/1,
active_once_closed/1, send_timeout/1, send_timeout_active/1,
otp_7731/1, zombie_sockets/1, otp_7816/1, otp_8102/1,
- wrapping_oct/0, wrapping_oct/1, otp_9389/1, otp_13939/1]).
+ wrapping_oct/0, wrapping_oct/1, otp_9389/1, otp_13939/1,
+ otp_12242/1, delay_send_error/1]).
%% Internal exports.
-export([sender/3, not_owner/1, passive_sockets_server/2, priority_server/1,
@@ -83,7 +85,8 @@ all() ->
busy_disconnect_passive, busy_disconnect_active,
fill_sendq, partial_recv_and_close,
partial_recv_and_close_2, partial_recv_and_close_3,
- so_priority, primitive_accept,
+ so_priority, recvtos, recvttl, recvtosttl,
+ recvtclass, primitive_accept,
multi_accept_close_listen, accept_timeout,
accept_timeouts_in_order, accept_timeouts_in_order2,
accept_timeouts_in_order3, accept_timeouts_in_order4,
@@ -93,7 +96,8 @@ all() ->
killing_multi_acceptors2, several_accepts_in_one_go, accept_system_limit,
active_once_closed, send_timeout, send_timeout_active, otp_7731,
wrapping_oct,
- zombie_sockets, otp_7816, otp_8102, otp_9389].
+ zombie_sockets, otp_7816, otp_8102, otp_9389,
+ otp_12242, delay_send_error].
groups() ->
[].
@@ -1914,6 +1918,264 @@ so_priority(Config) when is_list(Config) ->
end
end.
+
+
+%% IP_RECVTOS and IP_RECVTCLASS for IP_PKTOPTIONS
+%% does not seem to be implemented in Linux until kernel 3.1
+%%
+%% It seems pktoptions does not return valid values
+%% for IPv4 connect sockets. On the accept socket
+%% we get valid values, but on the connect socket we get
+%% the default values for TOS and TTL.
+%%
+%% Therefore the argument CheckConnect that enables
+%% checking the returned values for the connect socket.
+%% It is only used for recvtclass that is an IPv6 option
+%% and there we get valid values from both socket ends.
+
+recvtos(_Config) ->
+ test_pktoptions(
+ inet, [{recvtos,tos,96}],
+ fun recvtos_ok/2,
+ false).
+
+recvtosttl(_Config) ->
+ test_pktoptions(
+ inet, [{recvtos,tos,96},{recvttl,ttl,33}],
+ fun (OSType, OSVer) ->
+ recvtos_ok(OSType, OSVer) andalso recvttl_ok(OSType, OSVer)
+ end,
+ false).
+
+recvttl(_Config) ->
+ test_pktoptions(
+ inet, [{recvttl,ttl,33}],
+ fun recvttl_ok/2,
+ false).
+
+recvtclass(_Config) ->
+ {ok,IFs} = inet:getifaddrs(),
+ case
+ [Name ||
+ {Name,Opts} <- IFs,
+ lists:member({addr,{0,0,0,0,0,0,0,1}}, Opts)]
+ of
+ [_] ->
+ test_pktoptions(
+ inet6, [{recvtclass,tclass,224}],
+ fun recvtclass_ok/2,
+ true);
+ [] ->
+ {skip,{ipv6_not_supported,IFs}}
+ end.
+
+%% These version numbers are above the highest noted
+%% in daily tests where the test fails for a plausible reason,
+%% so skip on platforms of lower version, i.e they are future
+%% versions where it is possible that it might not fail.
+%%
+%% When machines with newer versions gets installed,
+%% if the test still fails for a plausible reason these
+%% version numbers simply should be increased.
+%% Or maybe we should change to only test on known good
+%% platforms - change {unix,_} to false?
+
+%% pktoptions is not supported for IPv4
+recvtos_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,4,0});
+recvtos_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {19,0,0});
+%% Using the option returns einval, so it is not implemented.
+recvtos_ok({unix,freebsd}, OSVer) -> not semver_lt(OSVer, {11,2,0});
+recvtos_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
+%% Does not return any value - not implemented for pktoptions
+recvtos_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {3,1,0});
+%%
+recvtos_ok({unix,_}, _) -> true;
+recvtos_ok(_, _) -> false.
+
+%% pktoptions is not supported for IPv4
+recvttl_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,4,0});
+recvttl_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {19,0,0});
+%% Using the option returns einval, so it is not implemented.
+recvttl_ok({unix,freebsd}, OSVer) -> not semver_lt(OSVer, {11,2,0});
+recvttl_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
+%% Does not return any value - not implemented for pktoptions
+recvttl_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {2,7,0});
+%%
+recvttl_ok({unix,_}, _) -> true;
+recvttl_ok(_, _) -> false.
+
+%% pktoptions is not supported for IPv6
+recvtclass_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,4,0});
+recvtclass_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {19,0,0});
+recvtclass_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
+%% Using the option returns einval, so it is not implemented.
+recvtclass_ok({unix,freebsd}, OSVer) -> not semver_lt(OSVer, {11,2,0});
+%% Does not return any value - not implemented for pktoptions
+recvtclass_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {3,1,0});
+%%
+recvtclass_ok({unix,_}, _) -> true;
+recvtclass_ok(_, _) -> false.
+
+semver_lt({X1,Y1,Z1}, {X2,Y2,Z2}) ->
+ if
+ X1 > X2 -> false;
+ X1 < X2 -> true;
+ Y1 > Y2 -> false;
+ Y1 < Y2 -> true;
+ Z1 > Z2 -> false;
+ Z1 < Z2 -> true;
+ true -> false
+ end;
+semver_lt(_, {_,_,_}) -> false.
+
+test_pktoptions(Family, Spec, OSFilter, CheckConnect) ->
+ OSType = os:type(),
+ OSVer = os:version(),
+ case OSFilter(OSType, OSVer) of
+ true ->
+ io:format("Os: ~p, ~p~n", [OSType,OSVer]),
+ test_pktoptions(Family, Spec, CheckConnect, OSType, OSVer);
+ false ->
+ {skip,{not_supported_for_os_version,{OSType,OSVer}}}
+ end.
+%%
+test_pktoptions(Family, Spec, CheckConnect, OSType, OSVer) ->
+ Timeout = 5000,
+ RecvOpts = [RecvOpt || {RecvOpt,_,_} <- Spec],
+ TrueRecvOpts = [{RecvOpt,true} || {RecvOpt,_,_} <- Spec],
+ FalseRecvOpts = [{RecvOpt,false} || {RecvOpt,_,_} <- Spec],
+ Opts = [Opt || {_,Opt,_} <- Spec],
+ OptsVals = [{Opt,Val} || {_,Opt,Val} <- Spec],
+ Address =
+ case Family of
+ inet ->
+ {127,0,0,1};
+ inet6 ->
+ {0,0,0,0,0,0,0,1}
+ end,
+ %%
+ %% Set RecvOpts on listen socket
+ {ok,L} =
+ gen_tcp:listen(
+ 0,
+ [Family,binary,{active,false},{send_timeout,Timeout}
+ |TrueRecvOpts]),
+ {ok,P} = inet:port(L),
+ {ok,TrueRecvOpts} = inet:getopts(L, RecvOpts),
+ {ok,OptsValsDefault} = inet:getopts(L, Opts),
+ %%
+ %% Set RecvOpts and Option values on connect socket
+ {ok,S2} =
+ gen_tcp:connect(
+ Address, P,
+ [Family,binary,{active,false},{send_timeout,Timeout}
+ |TrueRecvOpts ++ OptsVals],
+ Timeout),
+ {ok,TrueRecvOpts} = inet:getopts(S2, RecvOpts),
+ {ok,OptsVals} = inet:getopts(S2, Opts),
+ %%
+ %% Accept socket inherits the options from listen socket
+ {ok,S1} = gen_tcp:accept(L, Timeout),
+ {ok,TrueRecvOpts} = inet:getopts(S1, RecvOpts),
+ {ok,OptsValsDefault} = inet:getopts(S1, Opts),
+%%% %%
+%%% %% Handshake
+%%% ok = gen_tcp:send(S1, <<"hello">>),
+%%% {ok,<<"hello">>} = gen_tcp:recv(S2, 5, Timeout),
+%%% ok = gen_tcp:send(S2, <<"hi">>),
+%%% {ok,<<"hi">>} = gen_tcp:recv(S1, 2, Timeout),
+ %%
+ %% Verify returned remote options
+ VerifyRemOpts =
+ fun(S, Role) ->
+ case inet:getopts(S, [pktoptions]) of
+ {ok, [{pktoptions, PktOpts1}]} ->
+ PktOpts1;
+ {ok, UnexpOK1} ->
+ io:format("Unexpected OK (~w): "
+ "~n ~p"
+ "~n", [Role, UnexpOK1]),
+ exit({unexpected_getopts_ok,
+ Role,
+ Spec,
+ TrueRecvOpts,
+ OptsVals,
+ OptsValsDefault,
+ UnexpOK1});
+ {error, UnexpERR1} ->
+ io:format("Unexpected ERROR (~w): "
+ "~n ~p"
+ "~n", [Role, UnexpERR1]),
+ exit({unexpected_getopts_failure,
+ Role,
+ Spec,
+ TrueRecvOpts,
+ OptsVals,
+ OptsValsDefault,
+ UnexpERR1})
+ end
+ end,
+ OptsVals1 = VerifyRemOpts(S1, dest),
+ OptsVals2 = VerifyRemOpts(S2, orig),
+ %% {ok,[{pktoptions,OptsVals1}]} = inet:getopts(S1, [pktoptions]),
+ %% {ok,[{pktoptions,OptsVals2}]} = inet:getopts(S2, [pktoptions]),
+ (Result1 = sets_eq(OptsVals1, OptsVals))
+ orelse io:format(
+ "Accept differs: ~p neq ~p~n", [OptsVals1,OptsVals]),
+ (Result2 = sets_eq(OptsVals2, OptsValsDefault))
+ orelse io:format(
+ "Connect differs: ~p neq ~p~n",
+ [OptsVals2,OptsValsDefault]),
+ %%
+ ok = gen_tcp:close(S2),
+ ok = gen_tcp:close(S1),
+ %%
+ %%
+ %% Clear RecvOpts on listen socket and set Option values
+ ok = inet:setopts(L, FalseRecvOpts ++ OptsVals),
+ {ok,FalseRecvOpts} = inet:getopts(L, RecvOpts),
+ {ok,OptsVals} = inet:getopts(L, Opts),
+ %%
+ %% Set RecvOpts on connecting socket
+ %%
+ {ok,S4} =
+ gen_tcp:connect(
+ Address, P,
+ [Family,binary,{active,false},{send_timeout,Timeout}
+ |TrueRecvOpts],
+ Timeout),
+ {ok,TrueRecvOpts} = inet:getopts(S4, RecvOpts),
+ {ok,OptsValsDefault} = inet:getopts(S4, Opts),
+ %%
+ %% Accept socket inherits the options from listen socket
+ {ok,S3} = gen_tcp:accept(L, Timeout),
+ {ok,FalseRecvOpts} = inet:getopts(S3, RecvOpts),
+ {ok,OptsVals} = inet:getopts(S3, Opts),
+ %%
+ %% Verify returned remote options
+ {ok,[{pktoptions,[]}]} = inet:getopts(S3, [pktoptions]),
+ {ok,[{pktoptions,OptsVals4}]} = inet:getopts(S4, [pktoptions]),
+ (Result3 = sets_eq(OptsVals4, OptsVals))
+ orelse io:format(
+ "Accept2 differs: ~p neq ~p~n", [OptsVals4,OptsVals]),
+ %%
+ ok = gen_tcp:close(S4),
+ ok = gen_tcp:close(S3),
+ ok = gen_tcp:close(L),
+ (Result1 and ((not CheckConnect) or (Result2 and Result3)))
+ orelse
+ exit({failed,
+ [{OptsVals1,OptsVals4,OptsVals},
+ {OptsVals2,OptsValsDefault}],
+ {OSType,OSVer}}),
+%% exit({{OSType,OSVer},success}), % In search for the truth
+ ok.
+
+sets_eq(L1, L2) ->
+ lists:sort(L1) == lists:sort(L2).
+
+
+
%% Accept test utilities (suites are below)
millis() ->
@@ -2205,7 +2467,7 @@ wait_until_accepting(Proc,0) ->
exit({timeout_waiting_for_accepting,Proc});
wait_until_accepting(Proc,N) ->
case process_info(Proc,current_function) of
- {current_function,{prim_inet,accept0,2}} ->
+ {current_function,{prim_inet,accept0,3}} ->
case process_info(Proc,status) of
{status,waiting} ->
ok;
@@ -3056,3 +3318,172 @@ otp_13939(Config) when is_list(Config) ->
exit(Pid, normal),
ct:fail("Server process blocked on send.")
end.
+
+otp_12242(Config) when is_list(Config) ->
+ case os:type() of
+ {win32,_} ->
+ %% Even if we set sndbuf and recbuf to small sizes
+ %% Windows either happily accepts to send GBytes of data
+ %% in no time, so the second send below that is supposed
+ %% to time out just succedes, or the first send that
+ %% is supposed to fill the inet_drv I/O queue and
+ %% start waiting for when more data can be sent
+ %% instead sends all data but suffers a send
+ %% failure that closes the socket
+ {skipped,backpressure_broken_on_win32};
+ _ ->
+ %% Find the IPv4 address of an up and running interface
+ %% that is not loopback nor pointtopoint
+ {ok,IFList} = inet:getifaddrs(),
+ ct:pal("IFList ~p~n", [IFList]),
+ case
+ lists:flatten(
+ [lists:filtermap(
+ fun ({addr,Addr}) when tuple_size(Addr) =:= 4 ->
+ {true,Addr};
+ (_) ->
+ false
+ end, Opts)
+ || {_,Opts} <- IFList,
+ case lists:keyfind(flags, 1, Opts) of
+ {_,Flags} ->
+ lists:member(up, Flags)
+ andalso
+ lists:member(running, Flags)
+ andalso
+ not lists:member(loopback, Flags)
+ andalso
+ not lists:member(pointtopoint, Flags);
+ false ->
+ false
+ end])
+ of
+ [Addr|_] ->
+ otp_12242(Addr);
+ Other ->
+ {skipped,{no_external_address,Other}}
+ end
+ end;
+%%
+otp_12242(Addr) when tuple_size(Addr) =:= 4 ->
+ ct:timetrap(30000),
+ ct:pal("Using address ~p~n", [Addr]),
+ Bufsize = 16 * 1024,
+ Datasize = 128 * 1024 * 1024, % At least 1 s on GBit interface
+ Blob = binary:copy(<<$x>>, Datasize),
+ LOpts =
+ [{backlog,4},{reuseaddr,true},{ip,Addr},
+ binary,{active,false},
+ {recbuf,Bufsize},{sndbuf,Bufsize},{buffer,Bufsize}],
+ COpts =
+ [binary,{active,false},{ip,Addr},
+ {linger,{true,1}}, % 1 s
+ {send_timeout,500},
+ {recbuf,Bufsize},{sndbuf,Bufsize},{buffer,Bufsize}],
+ Dir = filename:dirname(code:which(?MODULE)),
+ {ok,ListenerNode} =
+ test_server:start_node(
+ ?UNIQ_NODE_NAME, slave, [{args,"-pa " ++ Dir}]),
+ Tester = self(),
+ Listener =
+ spawn(
+ ListenerNode,
+ fun () ->
+ {ok,L} = gen_tcp:listen(0, LOpts),
+ {ok,LPort} = inet:port(L),
+ Tester ! {self(),port,LPort},
+ {ok,A} = gen_tcp:accept(L),
+ ok = gen_tcp:close(L),
+ receive
+ {Tester,stop} ->
+ ok = gen_tcp:close(A)
+ end
+ end),
+ ListenerMref = monitor(process, Listener),
+ LPort = receive {Listener,port,P} -> P end,
+ {ok,C} = gen_tcp:connect(Addr, LPort, COpts, infinity),
+ {ok,ReadCOpts} = inet:getopts(C, [recbuf,sndbuf,buffer]),
+ ct:pal("ReadCOpts ~p~n", [ReadCOpts]),
+ %%
+ %% Fill the buffers
+ ct:pal("Sending ~p bytes~n", [Datasize]),
+ ok = gen_tcp:send(C, Blob),
+ ct:pal("Sent ~p bytes~n", [Datasize]),
+ %% Spawn the Closer,
+ %% try to ensure that the close call is in progress
+ %% before the owner proceeds with sending
+ Owner = self(),
+ {_Closer,CloserMref} =
+ spawn_opt(
+ fun () ->
+ Owner ! {tref, erlang:start_timer(50, Owner, closing)},
+ ct:pal("Calling gen_tcp:close(C)~n"),
+ try gen_tcp:close(C) of
+ Result ->
+ ct:pal("gen_tcp:close(C) -> ~p~n", [Result]),
+ ok = Result
+ catch
+ Class:Reason:Stacktrace ->
+ ct:pal(
+ "gen_tcp:close(C) >< ~p:~p~n ~p~n",
+ [Class,Reason,Stacktrace]),
+ erlang:raise(Class, Reason, Stacktrace)
+ end
+ end, [link,monitor]),
+ receive
+ {tref,Tref} ->
+ receive {timeout,Tref,_} -> ok end,
+ ct:pal("Sending ~p bytes again~n", [Datasize]),
+ %% Now should the close be in progress...
+ %% All buffers are full, remote end is not reading,
+ %% and the send timeout is 1 s so this will timeout:
+ {error,timeout} = gen_tcp:send(C, Blob),
+ ct:pal("Sending ~p bytes again timed out~n", [Datasize]),
+ ok = inet:setopts(C, [{send_timeout,10000}]),
+ %% There is a hidden timeout here. Port close is sampled
+ %% every 5 s by prim_inet:send_recv_reply.
+ %% Linger is 3 s so the Closer will finish this send:
+ ct:pal("Sending ~p bytes with 10 s timeout~n", [Datasize]),
+ {error,closed} = gen_tcp:send(C, Blob),
+ ct:pal("Sending ~p bytes with 10 s timeout was closed~n",
+ [Datasize]),
+ normal = wait(CloserMref),
+ ct:pal("The Closer has exited~n"),
+ Listener ! {Tester,stop},
+ receive {'DOWN',ListenerMref,_,_,_} -> ok end,
+ ct:pal("The Listener has exited~n"),
+ test_server:stop_node(ListenerNode),
+ ok
+ end.
+
+wait(Mref) ->
+ receive {'DOWN',Mref,_,_,Reason} -> Reason end.
+
+%% OTP-15536
+%% Test that send error works correctly for delay_send
+delay_send_error(_Config) ->
+ {ok, LS} = gen_tcp:listen(0, [{reuseaddr, true}, {packet, 1}, {active, false}]),
+ {ok,{{0,0,0,0},PortNum}}=inet:sockname(LS),
+ P = spawn_link(
+ fun() ->
+ {ok, S} = gen_tcp:accept(LS),
+ receive die -> gen_tcp:close(S) end
+ end),
+ erlang:monitor(process, P),
+ {ok, S} = gen_tcp:connect("localhost", PortNum,
+ [{packet, 1}, {active, false}, {delay_send, true}]),
+
+ %% Do a couple of sends first to see that it works
+ ok = gen_tcp:send(S, "hello"),
+ ok = gen_tcp:send(S, "hello"),
+ ok = gen_tcp:send(S, "hello"),
+
+ %% Make the receiver close
+ P ! die,
+ receive _Down -> ok end,
+
+ ok = gen_tcp:send(S, "hello"),
+ timer:sleep(500), %% Sleep in order for delay_send to have time to trigger
+
+ %% This used to result in a double free
+ {error, closed} = gen_tcp:send(S, "hello").
diff --git a/lib/kernel/test/gen_udp_SUITE.erl b/lib/kernel/test/gen_udp_SUITE.erl
index b39399b18a..af9985de45 100644
--- a/lib/kernel/test/gen_udp_SUITE.erl
+++ b/lib/kernel/test/gen_udp_SUITE.erl
@@ -34,8 +34,9 @@
-export([init_per_testcase/2, end_per_testcase/2]).
-export([send_to_closed/1, active_n/1,
- buffer_size/1, binary_passive_recv/1, bad_address/1,
+ buffer_size/1, binary_passive_recv/1, max_buffer_size/1, bad_address/1,
read_packets/1, open_fd/1, connect/1, implicit_inet6/1,
+ recvtos/1, recvtosttl/1, recvttl/1, recvtclass/1,
local_basic/1, local_unbound/1,
local_fdopen/1, local_fdopen_unbound/1, local_abstract/1]).
@@ -44,9 +45,10 @@ suite() ->
{timetrap,{minutes,1}}].
all() ->
- [send_to_closed, buffer_size, binary_passive_recv,
+ [send_to_closed, buffer_size, binary_passive_recv, max_buffer_size,
bad_address, read_packets, open_fd, connect,
implicit_inet6, active_n,
+ recvtos, recvtosttl, recvttl, recvtclass,
{group, local}].
groups() ->
@@ -237,6 +239,14 @@ buffer_size_server_recv(Socket, IP, Port, Cnt) ->
end.
+%%-------------------------------------------------------------
+%% OTP-15206: Keep buffer small for udp
+%%-------------------------------------------------------------
+max_buffer_size(Config) when is_list(Config) ->
+ {ok, Socket} = gen_udp:open(0, [binary]),
+ ok = inet:setopts(Socket,[{recbuf, 1 bsl 20}]),
+ {ok, [{buffer, 65536}]} = inet:getopts(Socket,[buffer]),
+ gen_udp:close(Socket).
%%-------------------------------------------------------------
%% OTP-3823 gen_udp:recv does not return address in binary mode
@@ -564,6 +574,168 @@ active_n(Config) when is_list(Config) ->
ok.
+
+recvtos(_Config) ->
+ test_recv_opts(
+ inet, [{recvtos,tos,96}],
+ fun recvtos_ok/2).
+
+recvtosttl(_Config) ->
+ test_recv_opts(
+ inet, [{recvtos,tos,96},{recvttl,ttl,33}],
+ fun (OSType, OSVer) ->
+ recvtos_ok(OSType, OSVer) andalso recvttl_ok(OSType, OSVer)
+ end).
+
+recvttl(_Config) ->
+ test_recv_opts(
+ inet, [{recvttl,ttl,33}],
+ fun recvttl_ok/2).
+
+recvtclass(_Config) ->
+ {ok,IFs} = inet:getifaddrs(),
+ case
+ [Name ||
+ {Name,Opts} <- IFs,
+ lists:member({addr,{0,0,0,0,0,0,0,1}}, Opts)]
+ of
+ [_] ->
+ test_recv_opts(
+ inet6, [{recvtclass,tclass,224}],
+ fun recvtclass_ok/2);
+ [] ->
+ {skip,ipv6_not_supported,IFs}
+ end.
+
+%% These version numbers are just above the highest noted in daily tests
+%% where the test fails for a plausible reason, that is the lowest
+%% where we can expect that the test mighe succeed, so
+%% skip on platforms lower than this.
+%%
+%% On newer versions it might be fixed, but we'll see about that
+%% when machines with newer versions gets installed...
+%% If the test still fails for a plausible reason these
+%% version numbers simply should be increased.
+%% Or maybe we should change to only test on known good platforms?
+
+%% Using the option returns einval, so it is not implemented.
+recvtos_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {17,6,0});
+%% Using the option returns einval, so it is not implemented.
+recvtos_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,4,0});
+%% Using the option returns einval, so it is not implemented.
+recvtos_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
+%%
+recvtos_ok({unix,_}, _) -> true;
+recvtos_ok(_, _) -> false.
+
+recvttl_ok({unix,_}, _) -> true;
+recvttl_ok(_, _) -> false.
+
+%% Using the option returns einval, so it is not implemented.
+recvtclass_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {9,9,0});
+recvtclass_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {2,6,11});
+%%
+recvtclass_ok({unix,_}, _) -> true;
+recvtclass_ok(_, _) -> false.
+
+semver_lt({X1,Y1,Z1}, {X2,Y2,Z2}) ->
+ if
+ X1 > X2 -> false;
+ X1 < X2 -> true;
+ Y1 > Y2 -> false;
+ Y1 < Y2 -> true;
+ Z1 > Z2 -> false;
+ Z1 < Z2 -> true;
+ true -> false
+ end;
+semver_lt(_, {_,_,_}) -> false.
+
+test_recv_opts(Family, Spec, OSFilter) ->
+ OSType = os:type(),
+ OSVer = os:version(),
+ case OSFilter(OSType, OSVer) of
+ true ->
+ io:format("Os: ~p, ~p~n", [OSType,OSVer]),
+ test_recv_opts(Family, Spec, OSType, OSVer);
+ false ->
+ {skip,{not_supported_for_os_version,{OSType,OSVer}}}
+ end.
+%%
+test_recv_opts(Family, Spec, _OSType, _OSVer) ->
+ Timeout = 5000,
+ RecvOpts = [RecvOpt || {RecvOpt,_,_} <- Spec],
+ TrueRecvOpts = [{RecvOpt,true} || {RecvOpt,_,_} <- Spec],
+ FalseRecvOpts = [{RecvOpt,false} || {RecvOpt,_,_} <- Spec],
+ Opts = [Opt || {_,Opt,_} <- Spec],
+ OptsVals = [{Opt,Val} || {_,Opt,Val} <- Spec],
+ TrueRecvOpts_OptsVals = TrueRecvOpts ++ OptsVals,
+ Addr =
+ case Family of
+ inet ->
+ {127,0,0,1};
+ inet6 ->
+ {0,0,0,0,0,0,0,1}
+ end,
+ %%
+ {ok,S1} =
+ gen_udp:open(0, [Family,binary,{active,false}|TrueRecvOpts]),
+ {ok,P1} = inet:port(S1),
+ {ok,TrueRecvOpts} = inet:getopts(S1, RecvOpts),
+ ok = inet:setopts(S1, FalseRecvOpts),
+ {ok,FalseRecvOpts} = inet:getopts(S1, RecvOpts),
+ ok = inet:setopts(S1, TrueRecvOpts_OptsVals),
+ {ok,TrueRecvOpts_OptsVals} = inet:getopts(S1, RecvOpts ++ Opts),
+ %%
+ {ok,S2} =
+ gen_udp:open(0, [Family,binary,{active,true}|FalseRecvOpts]),
+ {ok,P2} = inet:port(S2),
+ {ok,FalseRecvOpts_OptsVals2} = inet:getopts(S2, RecvOpts ++ Opts),
+ OptsVals2 = FalseRecvOpts_OptsVals2 -- FalseRecvOpts,
+ %%
+ ok = gen_udp:send(S2, Addr, P1, <<"abcde">>),
+ ok = gen_udp:send(S1, Addr, P2, <<"fghij">>),
+ {ok,{_,P2,OptsVals3,<<"abcde">>}} = gen_udp:recv(S1, 0, Timeout),
+ verify_sets_eq(OptsVals3, OptsVals2),
+ receive
+ {udp,S2,_,P1,<<"fghij">>} ->
+ ok;
+ Other1 ->
+ exit({unexpected,Other1})
+ after Timeout ->
+ exit(timeout)
+ end,
+ %%
+ ok = inet:setopts(S1, FalseRecvOpts),
+ {ok,FalseRecvOpts} = inet:getopts(S1, RecvOpts),
+ ok = inet:setopts(S2, TrueRecvOpts),
+ {ok,TrueRecvOpts} = inet:getopts(S2, RecvOpts),
+ %%
+ ok = gen_udp:send(S2, Addr, P1, <<"klmno">>),
+ ok = gen_udp:send(S1, Addr, P2, <<"pqrst">>),
+ {ok,{_,P2,<<"klmno">>}} = gen_udp:recv(S1, 0, Timeout),
+ receive
+ {udp,S2,_,P1,OptsVals4,<<"pqrst">>} ->
+ verify_sets_eq(OptsVals4, OptsVals);
+ Other2 ->
+ exit({unexpected,Other2})
+ after Timeout ->
+ exit(timeout)
+ end,
+ ok = gen_udp:close(S1),
+ ok = gen_udp:close(S2),
+%% exit({{OSType,OSVer},success}), % In search for the truth
+ ok.
+
+verify_sets_eq(L1, L2) ->
+ L = lists:sort(L1),
+ case lists:sort(L2) of
+ L ->
+ ok;
+ _ ->
+ exit({sets_neq,L1,L2})
+ end.
+
+
local_basic(_Config) ->
SFile = local_filename(server),
SAddr = {local,bin_filename(SFile)},
diff --git a/lib/kernel/test/global_SUITE.erl b/lib/kernel/test/global_SUITE.erl
index 0e7b7adc47..8eab36e308 100644
--- a/lib/kernel/test/global_SUITE.erl
+++ b/lib/kernel/test/global_SUITE.erl
@@ -1383,7 +1383,7 @@ ring(Config) when is_list(Config) ->
rpc_cast(Cp8, ?MODULE, single_node, [Time, Cp7, Config]),
%% sleep to make the partitioned net ready
- ct:sleep(Time - msec()),
+ sleep(Time - msec()),
pong = net_adm:ping(Cp0),
pong = net_adm:ping(Cp1),
@@ -1466,7 +1466,7 @@ simple_ring(Config) when is_list(Config) ->
rpc_cast(Cp5, ?MODULE, single_node, [Time, Cp4, Config]),
%% sleep to make the partitioned net ready
- ct:sleep(Time - msec()),
+ sleep(Time - msec()),
pong = net_adm:ping(Cp0),
pong = net_adm:ping(Cp1),
@@ -1542,7 +1542,7 @@ line(Config) when is_list(Config) ->
rpc_cast(Cp8, ?MODULE, single_node, [Time, Cp7, Config]),
%% Sleep to make the partitioned net ready
- ct:sleep(Time - msec()),
+ sleep(Time - msec()),
pong = net_adm:ping(Cp0),
pong = net_adm:ping(Cp1),
@@ -1626,7 +1626,7 @@ simple_line(Config) when is_list(Config) ->
rpc_cast(Cp5, ?MODULE, single_node, [Time, Cp4, Config]),
%% sleep to make the partitioned net ready
- ct:sleep(Time - msec()),
+ sleep(Time - msec()),
pong = net_adm:ping(Cp0),
pong = net_adm:ping(Cp1),
@@ -3555,7 +3555,7 @@ single_node(Time, Node, Config) ->
lists:foreach(fun(N) -> _ = erlang:disconnect_node(N) end, nodes()),
?UNTIL(get_known(node()) =:= [node()]),
spawn(?MODULE, init_2, []),
- ct:sleep(Time - msec()),
+ sleep(Time - msec()),
net_adm:ping(Node).
init_2() ->
@@ -4009,13 +4009,6 @@ collect_nodes(N, Max) ->
[Node | collect_nodes(N+1, Max)]
end.
-only_element(_E, []) ->
- true;
-only_element(E, [E|R]) ->
- only_element(E, R);
-only_element(_E, _) ->
- false.
-
exit_p(Pid) ->
Ref = erlang:monitor(process, Pid),
Pid ! die,
@@ -4038,6 +4031,11 @@ wait_for_exit_fast(Pid) ->
ok
end.
+sleep(Time) when Time > 0 ->
+ ct:sleep(Time);
+sleep(_Time) ->
+ ok.
+
check_everywhere(Nodes, Name, Config) ->
?UNTIL(begin
case rpc:multicall(Nodes, global, whereis_name, [Name]) of
@@ -4162,10 +4160,10 @@ rpc_cast(Node, Module, Function, Args, File) ->
%% The emulator now ensures that the node has been removed from
%% nodes().
-rpc_disconnect_node(Node, DisconnectedNode, _Config) ->
- True = rpc:call(Node, erlang, disconnect_node, [DisconnectedNode]),
- False = lists:member(DisconnectedNode, rpc:call(Node, erlang, nodes, [])),
- {true, false} = {True, False}.
+rpc_disconnect_node(Node, DisconnectedNode, Config) ->
+ true = rpc:call(Node, erlang, disconnect_node, [DisconnectedNode]),
+ ?UNTIL
+ (not lists:member(DisconnectedNode, rpc:call(Node, erlang, nodes, []))).
%%%
%%% Utility
diff --git a/lib/kernel/test/inet_SUITE.erl b/lib/kernel/test/inet_SUITE.erl
index 2e5f8c7d2c..44ec7e7076 100644
--- a/lib/kernel/test/inet_SUITE.erl
+++ b/lib/kernel/test/inet_SUITE.erl
@@ -21,6 +21,7 @@
-include_lib("common_test/include/ct.hrl").
-include_lib("kernel/include/inet.hrl").
+-include_lib("kernel/src/inet_res.hrl").
-include_lib("kernel/src/inet_dns.hrl").
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
@@ -34,7 +35,7 @@
ipv4_to_ipv6/0, ipv4_to_ipv6/1,
host_and_addr/0, host_and_addr/1,
t_gethostnative/1,
- gethostnative_parallell/1, cname_loop/1,
+ gethostnative_parallell/1, cname_loop/1, missing_hosts_reload/1,
gethostnative_soft_restart/0, gethostnative_soft_restart/1,
gethostnative_debug_level/0, gethostnative_debug_level/1,
lookup_bad_search_option/1,
@@ -56,7 +57,7 @@ all() ->
[t_gethostbyaddr, t_gethostbyname, t_getaddr,
t_gethostbyaddr_v6, t_gethostbyname_v6, t_getaddr_v6,
ipv4_to_ipv6, host_and_addr, {group, parse},
- t_gethostnative, gethostnative_parallell, cname_loop,
+ t_gethostnative, gethostnative_parallell, cname_loop, missing_hosts_reload,
gethostnative_debug_level, gethostnative_soft_restart,
lookup_bad_search_option,
getif, getif_ifr_name_overflow, getservbyname_overflow,
@@ -97,7 +98,7 @@ end_per_group(_GroupName, Config) ->
init_per_testcase(lookup_bad_search_option, Config) ->
Db = inet_db,
Key = res_lookup,
- %% The bad option can not enter through inet_db:set_lookup/1,
+ %% The bad option cannot enter through inet_db:set_lookup/1,
%% but through e.g .inetrc.
Prev = ets:lookup(Db, Key),
ets:delete(Db, Key),
@@ -840,6 +841,32 @@ cname_loop(Config) when is_list(Config) ->
ok.
+%% Test that hosts file gets reloaded correctly in case when it
+% was missing during initial startup
+missing_hosts_reload(Config) when is_list(Config) ->
+ RootDir = proplists:get_value(priv_dir,Config),
+ HostsFile = filename:join(RootDir, atom_to_list(?MODULE) ++ ".hosts"),
+ InetRc = filename:join(RootDir, "inetrc"),
+ ok = file:write_file(InetRc, "{hosts_file, \"" ++ HostsFile ++ "\"}.\n"),
+ {error, enoent} = file:read_file_info(HostsFile),
+ % start a node
+ Pa = filename:dirname(code:which(?MODULE)),
+ {ok, TestNode} = test_server:start_node(?MODULE, slave,
+ [{args, "-pa " ++ Pa ++ " -kernel inetrc '\"" ++ InetRc ++ "\"'"}]),
+ % ensure it has our RC
+ Rc = rpc:call(TestNode, inet_db, get_rc, []),
+ {hosts_file, HostsFile} = lists:keyfind(hosts_file, 1, Rc),
+ % ensure it does not resolve
+ {error, nxdomain} = rpc:call(TestNode, inet_hosts, gethostbyname, ["somehost"]),
+ % write hosts file
+ ok = file:write_file(HostsFile, "1.2.3.4 somehost"),
+ % wait for cached timestamp to expire
+ timer:sleep(?RES_FILE_UPDATE_TM * 1000 + 100),
+ % ensure it DOES resolve
+ {ok,{hostent,"somehost",[],inet,4,[{1,2,3,4}]}} =
+ rpc:call(TestNode, inet_hosts, gethostbyname, ["somehost"]),
+ % cleanup
+ true = test_server:stop_node(TestNode).
%% These must be run in the whole suite since they need
%% the host list and require inet_gethost_native to be started.
@@ -1060,28 +1087,26 @@ getservbyname_overflow(Config) when is_list(Config) ->
getifaddrs(Config) when is_list (Config) ->
{ok,IfAddrs} = inet:getifaddrs(),
io:format("IfAddrs = ~p.~n", [IfAddrs]),
- case
- {os:type(),
- [If ||
- {If,Opts} <- IfAddrs,
- lists:keymember(hwaddr, 1, Opts)]} of
- {{unix,sunos},[]} -> ok;
- {OT,[]} ->
- ct:fail({should_have_hwaddr,OT});
- _ -> ok
+ case [If || {If,Opts} <- IfAddrs, lists:keymember(hwaddr, 1, Opts)] of
+ [] ->
+ case os:type() of
+ {unix,sunos} -> ok;
+ OT ->
+ ct:fail({should_have_hwaddr,OT})
+ end;
+ [_|_] -> ok
end,
- Addrs =
- [element(1, A) || A <- ifaddrs(IfAddrs)],
+ Addrs = ifaddrs(IfAddrs),
io:format("Addrs = ~p.~n", [Addrs]),
[check_addr(Addr) || Addr <- Addrs],
ok.
-check_addr({addr,Addr})
+check_addr(Addr)
when tuple_size(Addr) =:= 8,
element(1, Addr) band 16#FFC0 =:= 16#FE80 ->
io:format("Addr: ~p link local; SKIPPED!~n", [Addr]),
ok;
-check_addr({addr,Addr}) ->
+check_addr(Addr) ->
io:format("Addr: ~p.~n", [Addr]),
Ping = "ping",
Pong = "pong",
@@ -1097,78 +1122,86 @@ check_addr({addr,Addr}) ->
ok = gen_tcp:close(S2),
ok = gen_tcp:close(L).
--record(ifopts, {name,flags,addrs=[],hwaddr}).
-
-ifaddrs([]) -> [];
-ifaddrs([{If,Opts}|IOs]) ->
- #ifopts{flags=F} = Ifopts = check_ifopts(Opts, #ifopts{name=If}),
- case F of
- {flags,Flags} ->
- case lists:member(running, Flags) of
- true -> Ifopts#ifopts.addrs;
- false -> []
- end ++ ifaddrs(IOs);
- undefined ->
- ifaddrs(IOs)
+ifaddrs(IfOpts) ->
+ IfMap = collect_ifopts(IfOpts),
+ ChkFun =
+ fun Self({{_,Flags} = Key, Opts}, ok) ->
+ Broadcast = lists:member(broadcast, Flags),
+ P2P = lists:member(pointtopoint, Flags),
+ case Opts of
+ [{addr,_},{netmask,_},{broadaddr,_}|Os]
+ when Broadcast ->
+ Self({Key, Os}, ok);
+ [{addr,_},{netmask,_},{dstaddr,_}|Os]
+ when P2P ->
+ Self({Key, Os}, ok);
+ [{addr,_},{netmask,_}|Os] ->
+ Self({Key, Os}, ok);
+ [{hwaddr,_}|Os] ->
+ Self({Key, Os}, ok);
+ [] ->
+ ok
+ end
+ end,
+ fold_ifopts(ChkFun, ok, IfMap),
+ AddrsFun =
+ fun ({{_,Flags}, Opts}, Acc) ->
+ case
+ lists:member(running, Flags)
+ andalso (not lists:member(pointtopoint, Flags))
+ of
+ true ->
+ lists:reverse(
+ [Addr || {addr,Addr} <- Opts],
+ Acc);
+ false ->
+ Acc
+ end
+ end,
+ fold_ifopts(AddrsFun, [], IfMap).
+
+collect_ifopts(IfOpts) ->
+ collect_ifopts(IfOpts, #{}).
+%%
+collect_ifopts(IfOpts, IfMap) ->
+ case IfOpts of
+ [{If,[{flags,Flags}|Opts]}|IfOs] ->
+ Key = {If,Flags},
+ case maps:is_key(Key, IfMap) of
+ true ->
+ ct:fail({unexpected_ifopts,IfOpts,IfMap});
+ false ->
+ collect_ifopts(IfOs, IfMap, Opts, Key, [])
+ end;
+ [] ->
+ IfMap;
+ _ ->
+ ct:fail({unexpected_ifopts,IfOpts,IfMap})
+ end.
+%%
+collect_ifopts(IfOpts, IfMap, Opts, Key, R) ->
+ case Opts of
+ [{flags,_}|_] ->
+ {If,_} = Key,
+ collect_ifopts(
+ [{If,Opts}|IfOpts], maps:put(Key, lists:reverse(R), IfMap));
+ [OptVal|Os] ->
+ collect_ifopts(IfOpts, IfMap, Os, Key, [OptVal|R]);
+ [] ->
+ collect_ifopts(IfOpts, maps:put(Key, lists:reverse(R), IfMap))
end.
-check_ifopts([], #ifopts{flags=F,addrs=Raddrs}=Ifopts) ->
- Addrs = lists:reverse(Raddrs),
- R = Ifopts#ifopts{addrs=Addrs},
- io:format("~p.~n", [R]),
- %% See how we did...
- {flags,Flags} = F,
- case lists:member(broadcast, Flags) of
- true ->
- [case A of
- {{addr,_},{netmask,_},{broadaddr,_}} ->
- A;
- {{addr,T},{netmask,_}} when tuple_size(T) =:= 8 ->
- A
- end || A <- Addrs];
- false ->
- case lists:member(pointtopoint, Flags) of
- true ->
- [case A of
- {{addr,_},{netmask,_},{dstaddr,_}} ->
- A
- end || A <- Addrs];
- false ->
- [case A of
- {{addr,_},{netmask,_}} ->
- A
- end || A <- Addrs]
- end
- end,
- R;
-check_ifopts([{flags,_}=F|Opts], #ifopts{flags=undefined}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{flags=F});
-check_ifopts([{flags,_}=F|Opts], #ifopts{flags=Flags}=Ifopts) ->
- case F of
- Flags ->
- check_ifopts(Opts, Ifopts);
- _ ->
- ct:fail({multiple_flags,F,Ifopts})
- end;
-check_ifopts(
- [{addr,_}=A,{netmask,_}=N,{dstaddr,_}=D|Opts],
- #ifopts{addrs=Addrs}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{addrs=[{A,N,D}|Addrs]});
-check_ifopts(
- [{addr,_}=A,{netmask,_}=N,{broadaddr,_}=B|Opts],
- #ifopts{addrs=Addrs}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{addrs=[{A,N,B}|Addrs]});
-check_ifopts(
- [{addr,_}=A,{netmask,_}=N|Opts],
- #ifopts{addrs=Addrs}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{addrs=[{A,N}|Addrs]});
-check_ifopts([{addr,_}=A|Opts], #ifopts{addrs=Addrs}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{addrs=[{A}|Addrs]});
-check_ifopts([{hwaddr,Hwaddr}=H|Opts], #ifopts{hwaddr=undefined}=Ifopts)
- when is_list(Hwaddr) ->
- check_ifopts(Opts, Ifopts#ifopts{hwaddr=H});
-check_ifopts([{hwaddr,_}=H|_], #ifopts{}=Ifopts) ->
- ct:fail({multiple_hwaddrs,H,Ifopts}).
+fold_ifopts(Fun, Acc, IfMap) ->
+ fold_ifopts(Fun, Acc, IfMap, maps:keys(IfMap)).
+%%
+fold_ifopts(Fun, Acc, IfMap, Keys) ->
+ case Keys of
+ [Key|Ks] ->
+ Opts = maps:get(Key, IfMap),
+ fold_ifopts(Fun, Fun({Key,Opts}, Acc), IfMap, Ks);
+ [] ->
+ Acc
+ end.
%% Works just like lists:member/2, except that any {127,_,_,_} tuple
%% matches any other {127,_,_,_}. We do this to handle Linux systems
diff --git a/lib/kernel/test/inet_res_SUITE.erl b/lib/kernel/test/inet_res_SUITE.erl
index df6e48abae..6b545fa414 100644
--- a/lib/kernel/test/inet_res_SUITE.erl
+++ b/lib/kernel/test/inet_res_SUITE.erl
@@ -46,15 +46,16 @@
%% a temporary local nameserver BIND 8 or 9 that must be installed
%% on your machine.
%%
-%% For example, on Ubuntu 14.04, as root:
+%% For example, on Ubuntu 16.04 / 18.04, as root:
%% apt-get install bind9
%% Now, that is not enough since Apparmor will not allow
%% the nameserver daemon /usr/sbin/named to read from the test directory.
%% Assuming that you run tests in /ldisk/daily_build, and still on
-%% Ubuntu 14.04, make /usr/apparmor.d/local/usr.sbin.named contain:
+%% Ubuntu 14.04, make /etc/apparmor.d/local/usr.sbin.named contain:
%% /ldisk/daily_build/** r,
%% And yes; the trailing comma must be there...
-
+%% And yes; create the file if it does not exist.
+%% And yes; restart the apparmor daemon using "service apparmor restart"
suite() ->
@@ -531,7 +532,7 @@ edns0(Config) when is_list(Config) ->
case os:version() of
{M,V,_} when M < 5; M == 5, V =< 8 ->
%% In our test park only known platform
- %% with an DNS resolver that can not do
+ %% with an DNS resolver that cannot do
%% EDNS0.
{comment,"No EDNS0"}
end
diff --git a/lib/kernel/test/inet_sockopt_SUITE.erl b/lib/kernel/test/inet_sockopt_SUITE.erl
index ada9c2689c..27ff74e309 100644
--- a/lib/kernel/test/inet_sockopt_SUITE.erl
+++ b/lib/kernel/test/inet_sockopt_SUITE.erl
@@ -110,9 +110,14 @@ simple(Config) when is_list(Config) ->
{S1,S2} = create_socketpair(Opt, Opt),
{ok,Opt} = inet:getopts(S1,OptTags),
{ok,Opt} = inet:getopts(S2,OptTags),
- COpt = [{X,case X of nodelay -> false;_ -> Y end} || {X,Y} <- Opt],
+ NoPushOpt = case os:type() of
+ {unix, Osname} when Osname =:= linux; Osname =:= freebsd -> {nopush, true};
+ {_,_} -> {nopush, false}
+ end,
+ COpt = [{X,case X of nodelay -> false;_ -> Y end} || {X,Y} <- [NoPushOpt|Opt]],
+ COptTags = [X || {X,_} <- COpt],
inet:setopts(S1,COpt),
- {ok,COpt} = inet:getopts(S1,OptTags),
+ {ok,COpt} = inet:getopts(S1,COptTags),
{ok,Opt} = inet:getopts(S2,OptTags),
gen_tcp:close(S1),
gen_tcp:close(S2),
diff --git a/lib/kernel/test/init_SUITE.erl b/lib/kernel/test/init_SUITE.erl
index 6a006cdc01..a0154b2694 100644
--- a/lib/kernel/test/init_SUITE.erl
+++ b/lib/kernel/test/init_SUITE.erl
@@ -295,7 +295,7 @@ is_real_system(KernelVsn, StdlibVsn) ->
%% before restart.
%% ------------------------------------------------
many_restarts() ->
- [{timetrap,{minutes,8}}].
+ [{timetrap,{minutes,16}}].
many_restarts(Config) when is_list(Config) ->
{ok, Node} = loose_node:start(init_test, "", ?DEFAULT_TIMEOUT_SEC),
@@ -315,7 +315,7 @@ loop_restart(N,Node,EHPid) ->
loose_node:stop(Node),
ct:fail(not_stopping)
end,
- ok = wait_for(30, Node, EHPid),
+ ok = wait_for(60, Node, EHPid),
loop_restart(N-1,Node,rpc:call(Node,erlang,whereis,[logger])).
wait_for(0,Node,_) ->
@@ -367,7 +367,8 @@ restart(Config) when is_list(Config) ->
SysProcs0 = rpc:call(Node, ?MODULE, find_system_processes, []),
io:format("SysProcs0=~p~n", [SysProcs0]),
[InitPid, PurgerPid, LitCollectorPid,
- DirtySigNPid, DirtySigHPid, DirtySigMPid] = SysProcs0,
+ DirtySigNPid, DirtySigHPid, DirtySigMPid,
+ PrimFilePid] = SysProcs0,
InitPid = rpc:call(Node, erlang, whereis, [init]),
PurgerPid = rpc:call(Node, erlang, whereis, [erts_code_purger]),
Procs = rpc:call(Node, erlang, processes, []),
@@ -385,7 +386,8 @@ restart(Config) when is_list(Config) ->
SysProcs1 = rpc:call(Node, ?MODULE, find_system_processes, []),
io:format("SysProcs1=~p~n", [SysProcs1]),
[InitPid1, PurgerPid1, LitCollectorPid1,
- DirtySigNPid1, DirtySigHPid1, DirtySigMPid1] = SysProcs1,
+ DirtySigNPid1, DirtySigHPid1, DirtySigMPid1,
+ PrimFilePid1] = SysProcs1,
%% Still the same init process!
InitPid1 = rpc:call(Node, erlang, whereis, [init]),
@@ -411,6 +413,10 @@ restart(Config) when is_list(Config) ->
DirtySigMP = pid_to_list(DirtySigMPid),
DirtySigMP = pid_to_list(DirtySigMPid1),
+ %% and same prim_file helper process!
+ PrimFileP = pid_to_list(PrimFilePid),
+ PrimFileP = pid_to_list(PrimFilePid1),
+
NewProcs0 = rpc:call(Node, erlang, processes, []),
NewProcs = NewProcs0 -- SysProcs1,
case check_processes(NewProcs, MaxPid) of
@@ -437,7 +443,8 @@ restart(Config) when is_list(Config) ->
literal_collector,
dirty_sig_handler_normal,
dirty_sig_handler_high,
- dirty_sig_handler_max}).
+ dirty_sig_handler_max,
+ prim_file}).
find_system_processes() ->
find_system_procs(processes(), #sys_procs{}).
@@ -448,10 +455,11 @@ find_system_procs([], SysProcs) ->
SysProcs#sys_procs.literal_collector,
SysProcs#sys_procs.dirty_sig_handler_normal,
SysProcs#sys_procs.dirty_sig_handler_high,
- SysProcs#sys_procs.dirty_sig_handler_max];
+ SysProcs#sys_procs.dirty_sig_handler_max,
+ SysProcs#sys_procs.prim_file];
find_system_procs([P|Ps], SysProcs) ->
case process_info(P, [initial_call, priority]) of
- [{initial_call,{otp_ring0,start,2}},_] ->
+ [{initial_call,{erl_init,start,2}},_] ->
undefined = SysProcs#sys_procs.init,
find_system_procs(Ps, SysProcs#sys_procs{init = P});
[{initial_call,{erts_code_purger,start,0}},_] ->
@@ -472,6 +480,9 @@ find_system_procs([P|Ps], SysProcs) ->
{priority,max}] ->
undefined = SysProcs#sys_procs.dirty_sig_handler_max,
find_system_procs(Ps, SysProcs#sys_procs{dirty_sig_handler_max = P});
+ [{initial_call,{prim_file,start,0}},_] ->
+ undefined = SysProcs#sys_procs.prim_file,
+ find_system_procs(Ps, SysProcs#sys_procs{prim_file = P});
_ ->
find_system_procs(Ps, SysProcs)
end.
diff --git a/lib/kernel/test/kernel.spec b/lib/kernel/test/kernel.spec
index 62afc9f97b..eaa17f3a59 100644
--- a/lib/kernel/test/kernel.spec
+++ b/lib/kernel/test/kernel.spec
@@ -2,3 +2,4 @@
{config, "../test_server/ts.unix.config"}.
{suites,"../kernel_test", all}.
+{skip_suites,"../kernel_test",[logger_stress_SUITE],"Benchmarks only"}.
diff --git a/lib/kernel/test/kernel_bench.spec b/lib/kernel/test/kernel_bench.spec
index 4de133f21b..898ceb59e0 100644
--- a/lib/kernel/test/kernel_bench.spec
+++ b/lib/kernel/test/kernel_bench.spec
@@ -1,2 +1,3 @@
{groups,"../kernel_test",zlib_SUITE,[bench]}.
{groups,"../kernel_test",file_SUITE,[bench]}.
+{suites,"../kernel_test",[logger_stress_SUITE]}.
diff --git a/lib/kernel/test/kernel_config_SUITE.erl b/lib/kernel/test/kernel_config_SUITE.erl
index 9207025a2c..57c44c2498 100644
--- a/lib/kernel/test/kernel_config_SUITE.erl
+++ b/lib/kernel/test/kernel_config_SUITE.erl
@@ -21,7 +21,8 @@
-include_lib("common_test/include/ct.hrl").
--export([all/0, suite/0,groups/0,init_per_group/2,end_per_group/2, sync/1]).
+-export([all/0, suite/0,groups/0,init_per_group/2,end_per_group/2,
+ start_distribution_false/1, sync/1]).
-export([init_per_suite/1, end_per_suite/1]).
@@ -30,7 +31,7 @@ suite() ->
{timetrap,{minutes,2}}].
all() ->
- [sync].
+ [sync, start_distribution_false].
groups() ->
[].
@@ -59,12 +60,9 @@ from(H, [H | T]) -> T;
from(H, [_ | T]) -> from(H, T);
from(_, []) -> [].
-%%-----------------------------------------------------------------
-%% Test suite for sync_nodes. This is quite tricky.
-%%
+%% Test sync_nodes. This is quite tricky.
%% Should be started in a CC view with:
%% erl -sname XXX where XX not in [cp1, cp2]
-%%-----------------------------------------------------------------
sync(Conf) when is_list(Conf) ->
%% Write a config file
Dir = proplists:get_value(priv_dir,Conf),
@@ -106,9 +104,18 @@ wait_for_node(Node) ->
_Other -> wait_for_node(Node)
end.
-
stop_node(Node) ->
M = list_to_atom(lists:concat([Node,
[$@],
from($@,atom_to_list(node()))])),
rpc:cast(M, erlang, halt, []).
+
+start_distribution_false(Config) when is_list(Config) ->
+ %% When distribution is disabled, -sname/-name has no effect
+ Str = os:cmd(ct:get_progname()
+ ++ " -kernel start_distribution false"
+ ++ " -sname no_distribution"
+ ++ " -eval \"erlang:display(node())\""
+ ++ " -noshell -s erlang halt"),
+ "'nonode@nohost'" ++ _ = Str,
+ ok.
diff --git a/lib/kernel/test/logger.cover b/lib/kernel/test/logger.cover
index 960bc0abff..9691aa295e 100644
--- a/lib/kernel/test/logger.cover
+++ b/lib/kernel/test/logger.cover
@@ -4,9 +4,12 @@
logger_backend,
logger_config,
logger_disk_log_h,
- logger_h_common,
logger_filters,
logger_formatter,
+ logger_handler_watcher,
+ logger_h_common,
+ logger_olp,
+ logger_proxy,
logger_server,
logger_simple_h,
logger_std_h,
diff --git a/lib/kernel/test/logger.spec b/lib/kernel/test/logger.spec
index 1ab90b3e93..3aec37951d 100644
--- a/lib/kernel/test/logger.spec
+++ b/lib/kernel/test/logger.spec
@@ -7,5 +7,7 @@
logger_filters_SUITE,
logger_formatter_SUITE,
logger_legacy_SUITE,
+ logger_olp_SUITE,
+ logger_proxy_SUITE,
logger_simple_h_SUITE,
logger_std_h_SUITE]}.
diff --git a/lib/kernel/test/logger_SUITE.erl b/lib/kernel/test/logger_SUITE.erl
index b367b4dd54..035e5d8974 100644
--- a/lib/kernel/test/logger_SUITE.erl
+++ b/lib/kernel/test/logger_SUITE.erl
@@ -37,7 +37,8 @@
suite() ->
- [{timetrap,{seconds,30}}].
+ [{timetrap,{seconds,30}},
+ {ct_hooks,[logger_test_lib]}].
init_per_suite(Config) ->
case logger:get_handler_config(?STANDARD_HANDLER) of
@@ -87,6 +88,7 @@ all() ->
macros,
set_level,
set_module_level,
+ set_application_level,
cache_module_level,
format_report,
filter_failed,
@@ -97,7 +99,10 @@ all() ->
via_logger_process,
other_node,
compare_levels,
- process_metadata].
+ process_metadata,
+ app_config,
+ kernel_config,
+ pretty_print].
start_stop(_Config) ->
S = whereis(logger),
@@ -210,6 +215,7 @@ add_remove_filter(cleanup,_Config) ->
change_config(_Config) ->
%% Overwrite handler config - check that defaults are added
+ {error,{not_found,h1}} = logger:set_handler_config(h1,#{}),
ok = logger:add_handler(h1,?MODULE,#{level=>notice,custom=>custom}),
{ok,#{module:=?MODULE,level:=notice,filter_default:=log,custom:=custom}} =
logger:get_handler_config(h1),
@@ -241,6 +247,18 @@ change_config(_Config) ->
{ok,C4} = logger:get_handler_config(h1),
C4 = C3#{custom:=new_custom},
+ %% Change handler config: Id and module can not be changed
+ {error,{illegal_config_change,Old,New}} =
+ logger:set_handler_config(h1,id,newid),
+ %% Check that only the faulty field is included in return
+ [{id,h1}] = maps:to_list(Old),
+ [{id,newid}] = maps:to_list(New),
+ %% Check that both fields are included when both are changed
+ {error,{illegal_config_change,
+ #{id:=h1,module:=?MODULE},
+ #{id:=newid,module:=newmodule}}} =
+ logger:set_handler_config(h1,#{id=>newid,module=>newmodule}),
+
%% Change primary config: Single key
PConfig0 = logger:get_primary_config(),
ok = logger:set_primary_config(level,warning),
@@ -258,7 +276,7 @@ change_config(_Config) ->
logger:get_primary_config(),
3 = maps:size(PC1),
%% Check that internal 'handlers' field has not been changed
- MS = [{{{?HANDLER_KEY,'$1'},'_','_','_'},[],['$1']}],
+ MS = [{{{?HANDLER_KEY,'$1'},'_','_'},[],['$1']}],
HIds1 = lists:sort(ets:select(?LOGGER_TABLE,MS)), % dirty, internal data
HIds2 = lists:sort(logger:get_handler_ids()),
HIds1 = HIds2,
@@ -355,6 +373,7 @@ log_all_levels_api(cleanup,_Config) ->
macros(_Config) ->
ok = logger:add_handler(h1,?MODULE,#{level=>all,filter_default=>log}),
test_macros(emergency),
+ test_log_macro(alert),
ok.
macros(cleanup,_Config) ->
@@ -387,6 +406,8 @@ set_module_level(_Config) ->
{error,{invalid_level,bad}} = logger:set_module_level(?MODULE,bad),
{error,{not_a_list_of_modules,{bad}}} =
logger:set_module_level({bad},warning),
+ {error,{not_a_list_of_modules,[{bad}]}} =
+ logger:set_module_level([{bad}],warning),
ok = logger:set_module_level(?MODULE,warning),
[{?MODULE,warning}] = logger:get_module_level([?MODULE,other]),
[{?MODULE,warning}] = logger:get_module_level(?MODULE),
@@ -403,6 +424,7 @@ set_module_level(_Config) ->
ok = check_logged(notice,M2,?MY_LOC(1)),
{error,{not_a_list_of_modules,{bad}}} = logger:unset_module_level({bad}),
+ {error,{not_a_list_of_modules,[{bad}]}} = logger:unset_module_level([{bad}]),
ok = logger:unset_module_level(?MODULE),
[] = logger:get_module_level([?MODULE,other]),
[] = logger:get_module_level(?MODULE),
@@ -422,6 +444,39 @@ set_module_level(cleanup,_Config) ->
logger:unset_module_level(?MODULE),
ok.
+set_application_level(_Config) ->
+
+ {error,{not_loaded,mnesia}} = logger:set_application_level(mnesia, warning),
+ {error,{not_loaded,mnesia}} = logger:unset_application_level(mnesia),
+
+ case application:load(mnesia) of
+ ok ->
+ {ok, Modules} = application:get_key(mnesia, modules),
+ [] = logger:get_module_level(Modules),
+
+ {error,{invalid_level,warn}} =
+ logger:set_application_level(mnesia, warn),
+
+ ok = logger:set_application_level(mnesia, debug),
+ DebugModules = lists:sort([{M,debug} || M <- Modules]),
+ DebugModules = lists:sort(logger:get_module_level(Modules)),
+
+ ok = logger:set_application_level(mnesia, warning),
+
+ WarnModules = lists:sort([{M,warning} || M <- Modules]),
+ WarnModules = lists:sort(logger:get_module_level(Modules)),
+
+ ok = logger:unset_application_level(mnesia),
+ [] = logger:get_module_level(Modules);
+ {error,{"no such file or directory","mnesia.app"}} ->
+ {skip, "Cannot load mnesia, does not exist"}
+ end.
+
+set_application_level(cleanup,_Config) ->
+ _ = logger:unset_application_level(mnesia),
+ _ = application:unload(mnesia),
+ ok.
+
cache_module_level(_Config) ->
ok = logger:unset_module_level(?MODULE),
[] = ets:lookup(?LOGGER_TABLE,?MODULE), %dirty - add API in logger_config?
@@ -469,7 +524,11 @@ filter_failed(_Config) ->
%% Logger filters
{error,{invalid_filter,_}} =
logger:add_primary_filter(lf,{fun(_) -> ok end,args}),
- ok = logger:add_primary_filter(lf,{fun(_,_) -> a=b end,args}),
+ ok = logger:add_primary_filter(lf,
+ {fun(_,_) ->
+ erlang:error({badmatch,b})
+ end,
+ args}),
#{filters:=[_]} = logger:get_primary_config(),
ok = logger:notice(M1=?map_rep),
ok = check_logged(notice,M1,#{}),
@@ -487,7 +546,11 @@ filter_failed(_Config) ->
{error,{not_found,h0}} = logger:remove_handler_filter(h0,hf),
{error,{invalid_filter,_}} =
logger:add_handler_filter(h1,hf,{fun(_) -> ok end,args}),
- ok = logger:add_handler_filter(h1,hf,{fun(_,_) -> a=b end,args}),
+ ok = logger:add_handler_filter(h1,hf,
+ {fun(_,_) ->
+ erlang:error({badmatch,b})
+ end,
+ args}),
{ok,#{filters:=[_]}} = logger:get_handler_config(h1),
ok = logger:notice(M3=?map_rep),
ok = check_logged(notice,M3,#{}),
@@ -509,7 +572,7 @@ handler_failed(_Config) ->
register(callback_receiver,self()),
{error,{invalid_id,1}} = logger:add_handler(1,?MODULE,#{}),
{error,{invalid_module,"nomodule"}} = logger:add_handler(h1,"nomodule",#{}),
- {error,{invalid_handler_config,bad}} = logger:add_handler(h1,?MODULE,bad),
+ {error,{invalid_config,bad}} = logger:add_handler(h1,?MODULE,bad),
{error,{invalid_filters,false}} =
logger:add_handler(h1,?MODULE,#{filters=>false}),
{error,{invalid_filter_default,true}} =
@@ -523,7 +586,11 @@ handler_failed(_Config) ->
false = lists:search(fun(#{id:=h1}) -> true; (_) -> false end,H1),
{error,{not_found,h1}} = logger:remove_handler(h1),
- ok = logger:add_handler(h2,?MODULE,#{filter_default=>log,log_call=>fun() -> a = b end}),
+ ok = logger:add_handler(h2,?MODULE,
+ #{filter_default => log,
+ log_call => fun() ->
+ erlang:error({badmatch,b})
+ end}),
{error,{already_exist,h2}} = logger:add_handler(h2,othermodule,#{}),
[add] = test_server:messages_get(),
@@ -534,7 +601,7 @@ handler_failed(_Config) ->
{error,{not_found,h2}} = logger:remove_handler(h2),
CallAddHandler = fun() -> logger:add_handler(h2,?MODULE,#{}) end,
- CrashHandler = fun() -> a = b end,
+ CrashHandler = fun() -> erlang:error({badmatch,b}) end,
KillHandler = fun() -> exit(self(), die) end,
{error,{handler_not_added,{attempting_syncronous_call_to_self,_}}} =
@@ -580,7 +647,8 @@ handler_failed(cleanup,_Config) ->
ok.
config_sanity_check(_Config) ->
- %% Logger config
+ %% Primary config
+ {error,{invalid_config,bad}} = logger:set_primary_config(bad),
{error,{invalid_filter_default,bad}} =
logger:set_primary_config(filter_default,bad),
{error,{invalid_level,bad}} = logger:set_primary_config(level,bad),
@@ -831,14 +899,14 @@ process_metadata(_Config) ->
undefined = logger:get_process_metadata(),
{error,badarg} = ?TRY(logger:set_process_metadata(bad)),
ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}),
- Time = erlang:system_time(microsecond),
+ Time = logger:timestamp(),
ProcMeta = #{time=>Time,line=>0,custom=>proc},
ok = logger:set_process_metadata(ProcMeta),
S1 = ?str,
?LOG_NOTICE(S1,#{custom=>macro}),
check_logged(notice,S1,#{time=>Time,line=>0,custom=>macro}),
- Time2 = erlang:system_time(microsecond),
+ Time2 = logger:timestamp(),
S2 = ?str,
?LOG_NOTICE(S2,#{time=>Time2,line=>1,custom=>macro}),
check_logged(notice,S2,#{time=>Time2,line=>1,custom=>macro}),
@@ -853,12 +921,286 @@ process_metadata(_Config) ->
ok = logger:unset_process_metadata(),
undefined = logger:get_process_metadata(),
+ ok = logger:update_process_metadata(#{custom=>added_again}),
+ {error,badarg} = ?TRY(logger:update_process_metadata(bad)),
+ #{custom:=added_again} = logger:get_process_metadata(),
+
ok.
process_metadata(cleanup,_Config) ->
logger:remove_handler(h1),
ok.
+app_config(Config) ->
+ %% Start a node with default configuration
+ {ok,_,Node} = logger_test_lib:setup(Config,[]),
+
+ App1Name = app1,
+ App1 = {application, App1Name,
+ [{description, "Test of app with logger config"},
+ {applications, [kernel]}]},
+ ok = rpc:call(Node,application,load,[App1]),
+ ok = rpc:call(Node,application,set_env,
+ [App1Name,logger,[{handler,default,logger_std_h,#{}}]]),
+
+ %% Try to add an own default handler
+ {error,{bad_config,{handler,{app1,{already_exist,default}}}}} =
+ rpc:call(Node,logger,add_handlers,[App1Name]),
+
+ %% Add a different handler
+ ok = rpc:call(Node,application,set_env,[App1Name,logger,
+ [{handler,myh,logger_std_h,#{}}]]),
+ ok = rpc:call(Node,logger,add_handlers,[App1Name]),
+
+ {ok,#{filters:=DF}} = rpc:call(Node,logger,get_handler_config,[default]),
+ {ok,#{filters:=[]}} = rpc:call(Node,logger,get_handler_config,[myh]),
+
+ true = test_server:stop_node(Node),
+
+ %% Start a node with no default handler, then add an own default handler
+ {ok,#{handlers:=[#{id:=simple}]},Node} =
+ logger_test_lib:setup(Config,[{logger,[{handler,default,undefined}]}]),
+
+ ok = rpc:call(Node,application,load,[App1]),
+ ok = rpc:call(Node,application,set_env,
+ [App1Name,logger,[{handler,default,logger_std_h,#{}}]]),
+ ok = rpc:call(Node,logger,add_handlers,[App1Name]),
+
+ #{handlers:=[#{id:=default,filters:=DF}]} =
+ rpc:call(Node,logger,get_config,[]),
+
+ true = test_server:stop_node(Node),
+
+ %% Start a silent node, then add an own default handler
+ {ok,#{handlers:=[]},Node} =
+ logger_test_lib:setup(Config,[{error_logger,silent}]),
+
+ {error,{bad_config,{handler,[{some,bad,config}]}}} =
+ rpc:call(Node,logger,add_handlers,[[{some,bad,config}]]),
+ ok = rpc:call(Node,logger,add_handlers,
+ [[{handler,default,logger_std_h,#{}}]]),
+
+ #{handlers:=[#{id:=default,filters:=DF}]} =
+ rpc:call(Node,logger,get_config,[]),
+
+ ok.
+
+%% This test case is maintly to see code coverage. Note that
+%% logger_env_var_SUITE tests a lot of the same, and checks the
+%% functionality more thoroughly, but since it all happens at node
+%% start, it is not possible to see code coverage in that test.
+kernel_config(Config) ->
+ %% Start a node with simple handler only, then simulate kernel
+ %% start by calling internally exported
+ %% internal_init_logger(). This is to test all variants of kernel
+ %% config, including bad config, and see the code coverage.
+ {ok,#{handlers:=[#{id:=simple,filters:=DF}]}=LC,Node} =
+ logger_test_lib:setup(Config,[{error_logger,false}]),
+
+ %% Same once more, to get coverage
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ LC = rpc:call(Node,logger,get_config,[]),
+
+ %% This shall mean the same as above, but using 'logger' parameter
+ %% instead of 'error_logger'
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{handler,default,undefined}]]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ LC = rpc:call(Node,logger,get_config,[]),
+
+ %% Silent
+ ok = rpc:call(Node,application,unset_env,[kernel,logger]),
+ ok = rpc:call(Node,application,set_env,[kernel,error_logger,silent]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% Default
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ ok = rpc:call(Node,application,unset_env,[kernel,logger]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,config:=#{type:=standard_io}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% error_logger=tty (same as default)
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ ok = rpc:call(Node,application,set_env,[kernel,error_logger,tty]),
+ ok = rpc:call(Node,application,unset_env,[kernel,logger]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,config:=#{type:=standard_io}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% error_logger={file,File}
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ F = filename:join(?config(priv_dir,Config),
+ atom_to_list(?FUNCTION_NAME)++".log"),
+ ok = rpc:call(Node,application,set_env,[kernel,error_logger,{file,F}]),
+ ok = rpc:call(Node,application,unset_env,[kernel,logger]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,
+ config:=#{type:=file,file:=F,modes:=Modes}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+ [append,delayed_write,raw] = lists:sort(Modes),
+
+
+ %% Same, but using 'logger' parameter instead of 'error_logger'
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ ok = rpc:call(Node,application,set_env,[kernel,logger,
+ [{handler,default,logger_std_h,
+ #{config=>#{type=>{file,F}}}}]]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,
+ config:=#{type:=file,file:=F,modes:=Modes}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% Same, but with type={file,File,Modes}
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ M = [raw,write],
+ ok = rpc:call(Node,application,set_env,[kernel,logger,
+ [{handler,default,logger_std_h,
+ #{config=>#{type=>{file,F,M}}}}]]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,
+ config:=#{type:=file,file:=F,modes:=[delayed_write|M]}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% Same, but with disk_log handler
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ ok = rpc:call(Node,application,set_env,[kernel,logger,
+ [{handler,default,logger_disk_log_h,
+ #{config=>#{file=>F}}}]]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,config:=#{file:=F}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% Set primary filters and module level. No default handler.
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{handler,default,undefined},
+ {filters,stop,[{f1,{fun(_,_) -> log end,ok}}]},
+ {module_level,debug,[?MODULE]}]]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=stop,filters:=[_]},
+ handlers:=[],
+ module_levels:=[{?MODULE,debug}]} = rpc:call(Node,logger,get_config,[]),
+
+ %% Bad config
+ ok = rpc:call(Node,application,unset_env,[kernel,logger]),
+
+ ok = rpc:call(Node,application,set_env,[kernel,error_logger,bad]),
+ {error,{bad_config,{kernel,{error_logger,bad}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ ok = rpc:call(Node,application,set_env,[kernel,logger_level,bad]),
+ {error,{bad_config,{kernel,{logger_level,bad}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,unset_env,[kernel,logger_level]),
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{filters,stop,[bad]}]]),
+ {error,{bad_config,{kernel,{invalid_filters,[bad]}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{filters,stop,[bad]}]]),
+ {error,{bad_config,{kernel,{invalid_filters,[bad]}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{filters,stop,[{f1,bad}]}]]),
+ {error,{bad_config,{kernel,{invalid_filter,{f1,bad}}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,MF=[{filters,stop,[]},{filters,log,[]}]]),
+ {error,{bad_config,{kernel,{multiple_filters,MF}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{module_level,bad,[?MODULE]}]]),
+ {error,{bad_config,{kernel,{invalid_level,bad}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok.
+
+pretty_print(Config) ->
+ ok = logger:add_handler(?FUNCTION_NAME,logger_std_h,#{}),
+ ok = logger:set_module_level([module1,module2],debug),
+
+ ct:capture_start(),
+ logger:i(),
+ ct:capture_stop(),
+ I0 = ct:capture_get(),
+
+ ct:capture_start(),
+ logger:i(primary),
+ ct:capture_stop(),
+ IPrim = ct:capture_get(),
+
+ ct:capture_start(),
+ logger:i(handlers),
+ ct:capture_stop(),
+ IHs = ct:capture_get(),
+
+ ct:capture_start(),
+ logger:i(proxy),
+ ct:capture_stop(),
+ IProxy = ct:capture_get(),
+
+ ct:capture_start(),
+ logger:i(modules),
+ ct:capture_stop(),
+ IMs = ct:capture_get(),
+
+ I02 = lists:append([IPrim,IHs,IProxy,IMs]),
+ %% ct:log("~p~n",[I0]),
+ %% ct:log("~p~n",[I02]),
+ I0 = I02,
+
+ ct:capture_start(),
+ logger:i(handlers),
+ ct:capture_stop(),
+ IHs = ct:capture_get(),
+
+ Ids = logger:get_handler_ids(),
+ IHs2 =
+ lists:append(
+ [begin
+ ct:capture_start(),
+ logger:i(Id),
+ ct:capture_stop(),
+ [_|IH] = ct:capture_get(),
+ IH
+ end || Id <- Ids]),
+
+ %% ct:log("~p~n",[IHs]),
+ %% ct:log("~p~n",[["Handler configuration: \n"|IHs2]]),
+ IHs = ["Handler configuration: \n"|IHs2],
+ ok.
+
%%%-----------------------------------------------------------------
%%% Internal
check_logged(Level,Format,Args,Meta) ->
@@ -1009,6 +1351,34 @@ test_macros(emergency=Level) ->
[{F2,x},{error,fun_that_crashes}],#{}),
ok.
+test_log_macro(Level) ->
+ ?LOG(Level,#{Level=>rep}),
+ ok = check_logged(Level,#{Level=>rep},?MY_LOC(1)),
+ ?LOG(Level,#{Level=>rep},#{my=>meta}),
+ ok = check_logged(Level,#{Level=>rep},(?MY_LOC(1))#{my=>meta}),
+ ?LOG(Level,"~w: ~w",[Level,fa]),
+ ok = check_logged(Level,"~w: ~w",[Level,fa],?MY_LOC(1)),
+ ?LOG(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],(?MY_LOC(1))#{my=>meta}),
+ ?LOG(Level,fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end,
+ x, #{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],
+ (?MY_LOC(3))#{my=>meta}),
+ ?LOG(Level,fun(x) -> #{Level=>fun_to_r,meta=>true} end, x, #{my=>meta}),
+ ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},
+ (?MY_LOC(2))#{my=>meta}),
+ ?LOG(Level,fun(x) -> <<"fun_to_s">> end,x,#{}),
+ ok = check_logged(Level,<<"fun_to_s">>,?MY_LOC(1)),
+ F1=fun(x) -> {fun_to_bad} end,
+ ?LOG(Level,F1,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{F1,x},{fun_to_bad}],#{}),
+ F2=fun(x) -> erlang:error(fun_that_crashes) end,
+ ?LOG(Level,F2,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{F2,x},{error,fun_that_crashes}],#{}),
+ ok.
+
%%%-----------------------------------------------------------------
%%% Called by macro ?TRY(X)
my_try(Fun) ->
diff --git a/lib/kernel/test/logger_disk_log_h_SUITE.erl b/lib/kernel/test/logger_disk_log_h_SUITE.erl
index 7e5b574869..36d98eaa25 100644
--- a/lib/kernel/test/logger_disk_log_h_SUITE.erl
+++ b/lib/kernel/test/logger_disk_log_h_SUITE.erl
@@ -24,6 +24,7 @@
-include_lib("common_test/include/ct.hrl").
-include_lib("kernel/include/logger.hrl").
-include_lib("kernel/src/logger_internal.hrl").
+-include_lib("kernel/src/logger_olp.hrl").
-include_lib("kernel/src/logger_h_common.hrl").
-include_lib("stdlib/include/ms_transform.hrl").
-include_lib("kernel/include/file.hrl").
@@ -92,11 +93,11 @@ all() ->
disk_log_opts,
default_formatter,
logging,
+ filter_config,
errors,
formatter_fail,
config_fail,
bad_input,
- info_and_reset,
reconfig,
sync,
disk_log_full,
@@ -292,7 +293,7 @@ logging(Config) ->
ok = start_and_add(Name, #{filter_default=>log,
formatter=>{?MODULE,self()}},
#{file => LogFile}),
- MsgFormatter = fun(Term) -> {io_lib:format("Term:~p",[Term]),[]} end,
+ MsgFormatter = fun(Term) -> {"Term:~p",[Term]} end,
logger:notice([{x,y}], #{report_cb => MsgFormatter}),
logger:notice([{x,y}], #{}),
ct:pal("Checking contents of ~p", [?log_no(LogFile,1)]),
@@ -302,6 +303,20 @@ logging(cleanup, _Config) ->
Name = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
remove_and_stop(Name).
+filter_config(_Config) ->
+ ok = logger:add_handler(?MODULE,logger_disk_log_h,#{}),
+ {ok,#{config:=HConfig}=Config} = logger:get_handler_config(?MODULE),
+ HConfig = maps:without([olp],HConfig),
+
+ FakeFullHConfig = HConfig#{olp=>{regname,self(),erlang:make_ref()}},
+ #{config:=HConfig} =
+ logger_disk_log_h:filter_config(Config#{config=>FakeFullHConfig}),
+ ok.
+
+filter_config(cleanup,_Config) ->
+ logger:remove_handler(?MODULE),
+ ok.
+
errors(Config) ->
PrivDir = ?config(priv_dir,Config),
Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
@@ -316,13 +331,29 @@ errors(Config) ->
%%! TODO:
%%! Check how bad log_opts are handled!
- {error,{illegal_config_change,_,_}} =
- logger:set_handler_config(Name1,
- config,
- #{file=>LogFile1,
- type=>halt}),
- {error,{illegal_config_change,_,_}} =
- logger:set_handler_config(Name1,id,new),
+ {error,{illegal_config_change,
+ logger_disk_log_h,
+ #{type:=wrap},
+ #{type:=halt}}} =
+ logger:update_handler_config(Name1,
+ config,
+ #{type=>halt,
+ file=>LogFile1}),
+
+ {error,{illegal_config_change,
+ logger_disk_log_h,
+ #{file:=LogFile1},
+ #{file:="newfilename"}}} =
+ logger:update_handler_config(Name1,
+ config,
+ #{file=>"newfilename"}),
+
+ %% Read-only fields may (accidentially) be included in the change,
+ %% but it won't take effect
+ {ok,C} = logger:get_handler_config(Name1),
+ ok = logger:set_handler_config(Name1,config,#{olp=>dummyvalue}),
+ {ok,C} = logger:get_handler_config(Name1),
+
ok = logger:remove_handler(Name1),
{error,{not_found,Name1}} = logger:remove_handler(Name1),
@@ -367,7 +398,7 @@ formatter_fail(Config) ->
ok = logger:set_handler_config(Name,formatter,{?MODULE,bad_return}),
logger:notice(?msg,?domain),
try_match_file(?log_no(LogFile,1),
- escape(Got3)++"FORMATTER ERROR: bad_return_value",
+ escape(Got3)++"FORMATTER ERROR: bad return value",
5000),
%% Check that handler is still alive and was never dead
@@ -380,20 +411,22 @@ formatter_fail(cleanup,_Config) ->
ok.
config_fail(_Config) ->
- {error,{handler_not_added,{invalid_config,logger_disk_log_h,{bad,bad}}}} =
+ {error,{handler_not_added,{invalid_config,logger_disk_log_h,#{bad:=bad}}}} =
logger:add_handler(?MODULE,logger_disk_log_h,
#{config => #{bad => bad},
filter_default=>log,
formatter=>{?MODULE,self()}}),
- {error,{handler_not_added,{invalid_levels,{_,1,_}}}} =
+ {error,{handler_not_added,{invalid_olp_levels,#{drop_mode_qlen:=1}}}} =
logger:add_handler(?MODULE,logger_disk_log_h,
#{config => #{drop_mode_qlen=>1}}),
- {error,{handler_not_added,{invalid_levels,{43,42,_}}}} =
+ {error,{handler_not_added,{invalid_olp_levels,#{sync_mode_qlen:=43,
+ drop_mode_qlen:=42}}}} =
logger:add_handler(?MODULE,logger_disk_log_h,
#{config => #{sync_mode_qlen=>43,
drop_mode_qlen=>42}}),
- {error,{handler_not_added,{invalid_levels,{_,43,42}}}} =
+ {error,{handler_not_added,{invalid_olp_levels,#{drop_mode_qlen:=43,
+ flush_qlen:=42}}}} =
logger:add_handler(?MODULE,logger_disk_log_h,
#{config => #{drop_mode_qlen=>43,
flush_qlen=>42}}),
@@ -402,40 +435,26 @@ config_fail(_Config) ->
#{filter_default=>log,
formatter=>{?MODULE,self()}}),
%% can't change the disk log options for a log already in use
- {error,{illegal_config_change,_,_}} =
- logger:set_handler_config(?MODULE,config,
- #{max_no_files=>2}),
- %% can't change name of an existing handler
- {error,{illegal_config_change,_,_}} =
- logger:set_handler_config(?MODULE,id,bad),
+ {error,{illegal_config_change,logger_disk_log_h,_,_}} =
+ logger:update_handler_config(?MODULE,config,
+ #{max_no_files=>2}),
%% incorrect values of OP params
{ok,#{config := HConfig}} = logger:get_handler_config(?MODULE),
- {error,{invalid_levels,_}} =
- logger:set_handler_config(?MODULE,config,
- HConfig#{sync_mode_qlen=>100,
- flush_qlen=>99}),
+ {error,{invalid_olp_levels,_}} =
+ logger:update_handler_config(?MODULE,config,
+ HConfig#{sync_mode_qlen=>100,
+ flush_qlen=>99}),
%% invalid name of config parameter
- {error,{invalid_config,logger_disk_log_h,{filesync_rep_int,2000}}} =
- logger:set_handler_config(?MODULE, config,
- HConfig#{filesync_rep_int => 2000}),
+ {error,{invalid_config,logger_disk_log_h,#{filesync_rep_int:=2000}}} =
+ logger:update_handler_config(?MODULE, config,
+ HConfig#{filesync_rep_int => 2000}),
ok.
config_fail(cleanup,_Config) ->
logger:remove_handler(?MODULE).
bad_input(_Config) ->
{error,{badarg,{filesync,["BadType"]}}} =
- logger_disk_log_h:filesync("BadType"),
- {error,{badarg,{info,["BadType"]}}} = logger_disk_log_h:info("BadType"),
- {error,{badarg,{reset,["BadType"]}}} = logger_disk_log_h:reset("BadType").
-
-info_and_reset(_Config) ->
- ok = logger:add_handler(?MODULE,logger_disk_log_h,
- #{filter_default=>log,
- formatter=>{?MODULE,self()}}),
- #{id := ?MODULE} = logger_disk_log_h:info(?MODULE),
- ok = logger_disk_log_h:reset(?MODULE).
-info_and_reset(cleanup,_Config) ->
- logger:remove_handler(?MODULE).
+ logger_disk_log_h:filesync("BadType").
reconfig(Config) ->
Dir = ?config(priv_dir,Config),
@@ -444,7 +463,7 @@ reconfig(Config) ->
#{filter_default=>log,
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,self()}}),
- #{id := ?MODULE,
+ #{%id := ?MODULE,
sync_mode_qlen := ?SYNC_MODE_QLEN,
drop_mode_qlen := ?DROP_MODE_QLEN,
flush_qlen := ?FLUSH_QLEN,
@@ -455,14 +474,32 @@ reconfig(Config) ->
overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
- filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL,
- log_opts := #{type := ?DISK_LOG_TYPE,
- max_no_files := ?DISK_LOG_MAX_NO_FILES,
- max_no_bytes := ?DISK_LOG_MAX_NO_BYTES,
- file := _DiskLogFile}} =
- logger_disk_log_h:info(?MODULE),
-
- {ok,#{config := HConfig0}} = logger:get_handler_config(?MODULE),
+ cb_state :=
+ #{handler_state :=
+ #{log_opts := #{type := ?DISK_LOG_TYPE,
+ max_no_files := ?DISK_LOG_MAX_NO_FILES,
+ max_no_bytes := ?DISK_LOG_MAX_NO_BYTES,
+ file := DiskLogFile}},
+ filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL}} =
+ logger_olp:info(h_proc_name()),
+ {ok,#{config :=
+ #{sync_mode_qlen := ?SYNC_MODE_QLEN,
+ drop_mode_qlen := ?DROP_MODE_QLEN,
+ flush_qlen := ?FLUSH_QLEN,
+ burst_limit_enable := ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable := ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
+ filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL,
+ file := DiskLogFile,
+ max_no_files := ?DISK_LOG_MAX_NO_FILES,
+ max_no_bytes := ?DISK_LOG_MAX_NO_BYTES,
+ type := wrap} = HConfig0}} =
+ logger:get_handler_config(?MODULE),
+
HConfig1 = HConfig0#{sync_mode_qlen => 1,
drop_mode_qlen => 2,
flush_qlen => 3,
@@ -475,7 +512,7 @@ reconfig(Config) ->
overload_kill_restart_after => infinity,
filesync_repeat_interval => no_repeat},
ok = logger:set_handler_config(?MODULE, config, HConfig1),
- #{id := ?MODULE,
+ #{%id := ?MODULE,
sync_mode_qlen := 1,
drop_mode_qlen := 2,
flush_qlen := 3,
@@ -486,8 +523,31 @@ reconfig(Config) ->
overload_kill_qlen := 100000,
overload_kill_mem_size := 10000000,
overload_kill_restart_after := infinity,
- filesync_repeat_interval := no_repeat} =
- logger_disk_log_h:info(?MODULE),
+ cb_state := #{filesync_repeat_interval := no_repeat}} =
+ logger_olp:info(h_proc_name()),
+ {ok,#{config:=HConfig1}} = logger:get_handler_config(?MODULE),
+
+ ok = logger:update_handler_config(?MODULE, config,
+ #{flush_qlen => ?FLUSH_QLEN}),
+ {ok,#{config:=C1}} = logger:get_handler_config(?MODULE),
+ ct:log("C1: ~p",[C1]),
+ C1 = HConfig1#{flush_qlen => ?FLUSH_QLEN},
+
+ ok = logger:set_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C2}} = logger:get_handler_config(?MODULE),
+ ct:log("C2: ~p",[C2]),
+ C2 = HConfig0#{sync_mode_qlen => 1},
+
+ ok = logger:set_handler_config(?MODULE, config, #{drop_mode_qlen => 100}),
+ {ok,#{config:=C3}} = logger:get_handler_config(?MODULE),
+ ct:log("C3: ~p",[C3]),
+ C3 = HConfig0#{drop_mode_qlen => 100},
+
+ ok = logger:update_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C4}} = logger:get_handler_config(?MODULE),
+ ct:log("C4: ~p",[C4]),
+ C4 = HConfig0#{sync_mode_qlen => 1,
+ drop_mode_qlen => 100},
ok = logger:remove_handler(?MODULE),
@@ -502,11 +562,50 @@ reconfig(Config) ->
max_no_files => 1,
max_no_bytes => 1024,
file => File}}),
- #{log_opts := #{type := halt,
- max_no_files := 1,
- max_no_bytes := 1024,
- file := File}} =
- logger_disk_log_h:info(?MODULE),
+ #{cb_state :=
+ #{handler_state :=
+ #{log_opts := #{type := halt,
+ max_no_files := 1,
+ max_no_bytes := 1024,
+ file := File}}}} =
+ logger_olp:info(h_proc_name()),
+ {ok,#{config :=
+ #{type := halt,
+ max_no_files := 1,
+ max_no_bytes := 1024,
+ file := File}=HaltHConfig} = Config2} =
+ logger:get_handler_config(?MODULE),
+
+ ok = logger:update_handler_config(?MODULE, level, notice),
+ {ok,C5} = logger:get_handler_config(?MODULE),
+ ct:log("C5: ~p",[C5]),
+ C5 = Config2#{level => notice},
+
+ ok = logger:set_handler_config(?MODULE, level, info),
+ {ok,C6} = logger:get_handler_config(?MODULE),
+ ct:log("C6: ~p",[C6]),
+ C6 = Config2#{level => info},
+
+ %% You are not allowed to actively set the write once fields
+ %% (type, max_no_files, max_no_bytes, file) in runtime.
+ {error, {illegal_config_change,_,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{type=>wrap}),
+ {error, {illegal_config_change,_,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{max_no_files=>2}),
+ {error, {illegal_config_change,_,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{max_no_bytes=>2048}),
+ {error, {illegal_config_change,_,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{file=>"otherfile.log"}),
+ {ok,C7} = logger:get_handler_config(?MODULE),
+ ct:log("C7: ~p",[C7]),
+ C7 = C6,
+
+ %% ... but if you don't specify the write once fields, then
+ %% set_handler_config shall NOT reset them to their default value
+ ok = logger:set_handler_config(?MODULE,config,#{sync_mode_qlen=>1}),
+ {ok,#{config:=C8}} = logger:get_handler_config(?MODULE),
+ ct:log("C8: ~p",[C8]),
+ C8 = HaltHConfig#{sync_mode_qlen=>1},
ok.
reconfig(cleanup, _Config) ->
@@ -523,67 +622,55 @@ sync(Config) ->
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,nl}}),
- start_tracer([{disk_log,blog,2},
+ start_tracer([{logger_disk_log_h,disk_log_write,3},
{disk_log,sync,1}],
- [{disk_log,blog,<<"first\n">>},
+ [{logger_disk_log_h,disk_log_write,<<"first\n">>},
{disk_log,sync}]),
logger:notice("first", ?domain),
%% wait for automatic disk_log_sync
check_tracer(?FILESYNC_REPEAT_INTERVAL*2),
- start_tracer([{disk_log,blog,2},
- {disk_log,sync,1}],
- [{disk_log,blog,<<"second\n">>},
- {disk_log,blog,<<"third\n">>},
- {disk_log,sync}]),
- %% two log requests in fast succession will make the handler skip
- %% an automatic disk log sync
- logger:notice("second", ?domain),
- logger:notice("third", ?domain),
- %% do explicit sync
- logger_disk_log_h:filesync(?MODULE),
- check_tracer(100),
-
- %% check that if there's no repeated disk_log_sync active,
+ %% check that if there's no repeated filesync active,
%% a disk_log_sync is still performed when handler goes idle
{ok,#{config := HConfig}} = logger:get_handler_config(?MODULE),
HConfig1 = HConfig#{filesync_repeat_interval => no_repeat},
- ok = logger:set_handler_config(?MODULE, config, HConfig1),
-
+ ok = logger:update_handler_config(?MODULE, config, HConfig1),
no_repeat = maps:get(filesync_repeat_interval,
- logger_disk_log_h:info(?MODULE)),
+ maps:get(cb_state,logger_olp:info(h_proc_name()))),
- start_tracer([{disk_log,blog,2},
+ start_tracer([{logger_disk_log_h,disk_log_write,3},
{disk_log,sync,1}],
- [{disk_log,blog,<<"fourth\n">>},
- {disk_log,blog,<<"fifth\n">>},
+ [{logger_disk_log_h,disk_log_write,<<"second\n">>},
+ {disk_log,sync},
+ {logger_disk_log_h,disk_log_write,<<"third\n">>},
{disk_log,sync}]),
- logger:notice("fourth", ?domain),
- timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
- logger:notice("fifth", ?domain),
+ logger:notice("second", ?domain),
+ timer:sleep(?IDLE_DETECT_TIME*2),
+ logger:notice("third", ?domain),
%% wait for automatic disk_log_sync
- check_tracer(?IDLE_DETECT_TIME_MSEC*2),
+ check_tracer(?IDLE_DETECT_TIME*2),
- try_read_file(Log, {ok,<<"first\nsecond\nthird\nfourth\nfifth\n">>}, 1000),
+ try_read_file(Log, {ok,<<"first\nsecond\nthird\n">>}, 1000),
- %% switch repeated disk_log_sync on and verify that the looping works
+ %% switch repeated filesync on and verify that the looping works
SyncInt = 1000,
WaitT = 4500,
- OneSync = {logger_disk_log_h,handle_cast,repeated_disk_log_sync},
- %% receive 1 initial repeated_disk_log_sync, then 1 per sec
- start_tracer([{logger_disk_log_h,handle_cast,2}],
- [OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]),
+ OneSync = {logger_h_common,handle_cast,repeated_filesync},
+ %% receive 1 repeated_filesync per sec
+ start_tracer([{{logger_h_common,handle_cast,2},
+ [{[repeated_filesync,'_'],[],[{message,{caller}}]}]}],
+ [OneSync || _ <- lists:seq(1, trunc(WaitT/SyncInt))]),
HConfig2 = HConfig#{filesync_repeat_interval => SyncInt},
- ok = logger:set_handler_config(?MODULE, config, HConfig2),
+ ok = logger:update_handler_config(?MODULE, config, HConfig2),
SyncInt = maps:get(filesync_repeat_interval,
- logger_disk_log_h:info(?MODULE)),
+ maps:get(cb_state,logger_olp:info(h_proc_name()))),
timer:sleep(WaitT),
HConfig3 = HConfig#{filesync_repeat_interval => no_repeat},
- ok = logger:set_handler_config(?MODULE, config, HConfig3),
+ ok = logger:update_handler_config(?MODULE, config, HConfig3),
check_tracer(100),
ok.
sync(cleanup,_Config) ->
@@ -617,7 +704,7 @@ disk_log_wrap(Config) ->
end,
{ok,_} = dbg:tracer(process, {TraceFun, Tester}),
{ok,_} = dbg:p(whereis(h_proc_name()), [c]),
- {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 3, []),
Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,MaxBytes)],
ct:pal("String = ~p (~w)", [Text, erts_debug:size(Text)]),
@@ -635,7 +722,7 @@ disk_log_wrap(Config) ->
timer:sleep(1000),
dbg:stop_clear(),
Received = lists:flatmap(fun({trace,_M,handle_info,
- [{disk_log,_Node,_Name,What},_]}) ->
+ [_,{disk_log,_Node,_Name,What},_]}) ->
[{trace,What}];
({log,_}) ->
[]
@@ -671,7 +758,7 @@ disk_log_full(Config) ->
end,
{ok,_} = dbg:tracer(process, {TraceFun, Tester}),
{ok,_} = dbg:p(whereis(h_proc_name()), [c]),
- {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 3, []),
NoOfChars = 5,
Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,NoOfChars)],
@@ -681,20 +768,24 @@ disk_log_full(Config) ->
timer:sleep(2000),
dbg:stop_clear(),
Received = lists:flatmap(fun({trace,_M,handle_info,
- [{disk_log,_Node,_Name,What},_]}) ->
+ [_,{disk_log,_Node,_Name,What},_]}) ->
[{trace,What}];
({log,_}) ->
[]
end, test_server:messages_get()),
ct:pal("Trace =~n~p", [Received]),
- [{trace,full},
- {trace,{error_status,{error,{full,_}}}}] = Received,
+
+ %% The tail here could be an error_status notification, if the
+ %% last write was synchronous, but in most cases it will not be
+ [{trace,full}|_] = Received,
+ %% [{trace,full},
+ %% {trace,{error_status,{error,{full,_}}}}] = Received,
ok.
disk_log_full(cleanup, _Config) ->
dbg:stop_clear(),
logger:remove_handler(?MODULE).
-disk_log_events(Config) ->
+disk_log_events(_Config) ->
Node = node(),
Log = ?MODULE,
ok = logger:add_handler(?MODULE,
@@ -720,14 +811,14 @@ disk_log_events(Config) ->
end,
{ok,_} = dbg:tracer(process, {TraceFun, Tester}),
{ok,_} = dbg:p(whereis(h_proc_name()), [c]),
- {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 3, []),
[whereis(h_proc_name()) ! E || E <- Events],
%% wait for trace messages
timer:sleep(2000),
dbg:stop_clear(),
Received = lists:map(fun({trace,_M,handle_info,
- [Got,_]}) -> Got
+ [_,Got,_]}) -> Got
end, test_server:messages_get()),
ct:pal("Trace =~n~p", [Received]),
NoOfEvents = length(Events),
@@ -750,13 +841,17 @@ write_failure(Config) ->
false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
- rpc:call(Node, ?MODULE, set_result, [disk_log_blog,ok]),
- HState = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]),
- ct:pal("LogOpts = ~p", [LogOpts = maps:get(log_opts, HState)]),
-
+ rpc:call(Node, ?MODULE, set_result, [disk_log_write,ok]),
+ HState = rpc:call(Node, logger_olp, info, [h_proc_name(?STANDARD_HANDLER)]),
+ LogOpts = maps:get(log_opts,
+ maps:get(handler_state,
+ maps:get(cb_state,HState))),
+ ct:pal("LogOpts = ~p", [LogOpts]),
+
+ %% ?check and ?check_no_log in this test only check for internal log events
ok = log_on_remote_node(Node, "Logged1"),
rpc:call(Node, logger_disk_log_h, filesync, [?STANDARD_HANDLER]),
- ?check_no_log,
+ ?check_no_log, % no internal log when write ok
SyncRepInt = case (fun() -> is_atom(?FILESYNC_REPEAT_INTERVAL) end)() of
true -> 5500;
@@ -765,24 +860,26 @@ write_failure(Config) ->
try_read_file(Log, {ok,<<"Logged1\n">>}, SyncRepInt),
- rpc:call(Node, ?MODULE, set_result, [disk_log_blog,{error,no_such_log}]),
+ rpc:call(Node, ?MODULE, set_result, [disk_log_write,{error,no_such_log}]),
ok = log_on_remote_node(Node, "Cause simple error printout"),
+ %% this should have caused an internal log
?check({error,{?STANDARD_HANDLER,log,LogOpts,{error,no_such_log}}}),
-
+
ok = log_on_remote_node(Node, "No second error printout"),
- ?check_no_log,
+ ?check_no_log, % but don't log same error twice
- rpc:call(Node, ?MODULE, set_result, [disk_log_blog,
+ rpc:call(Node, ?MODULE, set_result, [disk_log_write,
{error,{full,?STANDARD_HANDLER}}]),
ok = log_on_remote_node(Node, "Cause simple error printout"),
+ %% this was a different error, so it should be logged
?check({error,{?STANDARD_HANDLER,log,LogOpts,
{error,{full,?STANDARD_HANDLER}}}}),
- rpc:call(Node, ?MODULE, set_result, [disk_log_blog,ok]),
+ rpc:call(Node, ?MODULE, set_result, [disk_log_write,ok]),
ok = log_on_remote_node(Node, "Logged2"),
rpc:call(Node, logger_disk_log_h, filesync, [?STANDARD_HANDLER]),
- ?check_no_log,
+ ?check_no_log, % no internal log when write ok
try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, SyncRepInt),
ok.
write_failure(cleanup, _Config) ->
@@ -801,15 +898,16 @@ sync_failure(Config) ->
rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]),
- HState = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]),
- LogOpts = maps:get(log_opts, HState),
+ HState = rpc:call(Node, logger_olp, info, [h_proc_name(?STANDARD_HANDLER)]),
+ LogOpts = maps:get(log_opts, maps:get(handler_state,
+ maps:get(cb_state,HState))),
SyncInt = 500,
- ok = rpc:call(Node, logger, set_handler_config,
+ ok = rpc:call(Node, logger, update_handler_config,
[?STANDARD_HANDLER, config,
#{filesync_repeat_interval => SyncInt}]),
- Info = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]),
- SyncInt = maps:get(filesync_repeat_interval, Info),
+ Info = rpc:call(Node, logger_olp, info, [h_proc_name(?STANDARD_HANDLER)]),
+ SyncInt = maps:get(filesync_repeat_interval, maps:get(cb_state, Info)),
ok = log_on_remote_node(Node, "Logged1"),
?check_no_log,
@@ -880,7 +978,7 @@ op_switch_to_sync(Config) ->
drop_mode_qlen => NumOfReqs+1,
flush_qlen => 2*NumOfReqs,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Lines = count_lines(Log),
NumOfReqs = Lines,
@@ -905,7 +1003,7 @@ op_switch_to_drop(Config) ->
drop_mode_qlen => 2,
flush_qlen => Procs*NumOfReqs*Bursts,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
%% It sometimes happens that the handler either gets
%% the requests in a slow enough pace so that dropping
%% never occurs. Therefore, lets generate a number of
@@ -951,7 +1049,7 @@ op_switch_to_flush(Config) ->
drop_mode_qlen => 300,
flush_qlen => 300,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 1500,
Procs = 10,
Bursts = 10,
@@ -993,7 +1091,7 @@ limit_burst_disabled(Config) ->
burst_limit_window_time => 2000,
drop_mode_qlen => 200,
flush_qlen => 300}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -1013,7 +1111,7 @@ limit_burst_enabled_one(Config) ->
burst_limit_window_time => 2000,
drop_mode_qlen => 200,
flush_qlen => 300}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -1034,7 +1132,7 @@ limit_burst_enabled_period(Config) ->
burst_limit_window_time => BurstTWin,
drop_mode_qlen => 20000,
flush_qlen => 20001}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
Windows = 3,
Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, notice),
@@ -1054,7 +1152,7 @@ kill_disabled(Config) ->
HConfig#{config=>DLHConfig#{overload_kill_enable=>false,
overload_kill_qlen=>10,
overload_kill_mem_size=>100}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -1076,7 +1174,7 @@ qlen_kill_new(Config) ->
overload_kill_qlen=>10,
overload_kill_mem_size=>Mem0+50000,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
MRef = erlang:monitor(process, Pid0),
NumOfReqs = 100,
Procs = 4,
@@ -1085,7 +1183,7 @@ qlen_kill_new(Config) ->
receive
{'DOWN', MRef, _, _, Info} ->
case Info of
- {shutdown,{overloaded,?MODULE,QLen,Mem}} ->
+ {shutdown,{overloaded,QLen,Mem}} ->
ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
killed ->
ct:pal("Slow shutdown, handler process was killed!", [])
@@ -1095,7 +1193,7 @@ qlen_kill_new(Config) ->
ok
after
5000 ->
- Info = logger_disk_log_h:info(?MODULE),
+ Info = logger_olp:info(h_proc_name()),
ct:pal("Handler state = ~p", [Info]),
ct:fail("Handler not dead! It should not have survived this!")
end.
@@ -1113,7 +1211,7 @@ mem_kill_new(Config) ->
overload_kill_qlen=>50000,
overload_kill_mem_size=>Mem0+500,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
MRef = erlang:monitor(process, Pid0),
NumOfReqs = 100,
Procs = 4,
@@ -1122,7 +1220,7 @@ mem_kill_new(Config) ->
receive
{'DOWN', MRef, _, _, Info} ->
case Info of
- {shutdown,{overloaded,?MODULE,QLen,Mem}} ->
+ {shutdown,{overloaded,QLen,Mem}} ->
ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
killed ->
ct:pal("Slow shutdown, handler process was killed!", [])
@@ -1132,7 +1230,7 @@ mem_kill_new(Config) ->
ok
after
5000 ->
- Info = logger_disk_log_h:info(?MODULE),
+ Info = logger_olp:info(h_proc_name()),
ct:pal("Handler state = ~p", [Info]),
ct:fail("Handler not dead! It should not have survived this!")
end.
@@ -1145,12 +1243,12 @@ restart_after(Config) ->
{Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
NewHConfig1 =
HConfig#{config=>DLHConfig#{overload_kill_enable=>true,
- overload_kill_qlen=>10,
+ overload_kill_qlen=>4,
overload_kill_restart_after=>infinity}},
- ok = logger:set_handler_config(?MODULE, NewHConfig1),
+ ok = logger:update_handler_config(?MODULE, NewHConfig1),
MRef1 = erlang:monitor(process, whereis(h_proc_name())),
%% kill handler
- send_burst({n,100}, {spawn,4,0}, {chars,79}, notice),
+ send_burst({n,100}, {spawn,5,0}, {chars,79}, notice),
receive
{'DOWN', MRef1, _, _, _Reason1} ->
file_delete(Log),
@@ -1158,7 +1256,7 @@ restart_after(Config) ->
ok
after
5000 ->
- Info1 = logger_std_h:info(?MODULE),
+ Info1 = logger_olp:info(h_proc_name()),
ct:pal("Handler state = ~p", [Info1]),
ct:fail("Handler not dead! It should not have survived this!")
end,
@@ -1167,13 +1265,13 @@ restart_after(Config) ->
RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER,
NewHConfig2 =
HConfig#{config=>DLHConfig#{overload_kill_enable=>true,
- overload_kill_qlen=>10,
+ overload_kill_qlen=>4,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig2),
+ ok = logger:update_handler_config(?MODULE, NewHConfig2),
Pid0 = whereis(h_proc_name()),
MRef2 = erlang:monitor(process, Pid0),
%% kill handler
- send_burst({n,100}, {spawn,4,0}, {chars,79}, notice),
+ send_burst({n,100}, {spawn,5,0}, {chars,79}, notice),
receive
{'DOWN', MRef2, _, _, _Reason2} ->
file_delete(Log),
@@ -1182,7 +1280,7 @@ restart_after(Config) ->
ok
after
5000 ->
- Info2 = logger_std_h:info(?MODULE),
+ Info2 = logger_olp:info(h_proc_name()),
ct:pal("Handler state = ~p", [Info2]),
ct:fail("Handler not dead! It should not have survived this!")
end,
@@ -1194,7 +1292,7 @@ restart_after(cleanup, _Config) ->
%% during high load to verify that sync, dropping and flushing is
%% handled correctly.
handler_requests_under_load() ->
- [{timetrap,{minutes,3}}].
+ [{timetrap,{minutes,5}}].
handler_requests_under_load(Config) ->
{Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
NewHConfig =
@@ -1202,12 +1300,16 @@ handler_requests_under_load(Config) ->
drop_mode_qlen => 1000,
flush_qlen => 2000,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
- Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]},
- {info,[]},
- {reset,[]},
- {change_config,[]}])
- end),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ Pid = spawn_link(
+ fun() -> send_requests(1,[{logger_disk_log_h,filesync,[?MODULE],[]},
+ {logger_olp,info,[h_proc_name()],[]},
+ {logger_olp,reset,[h_proc_name()],[]},
+ {logger,update_handler_config,
+ [?MODULE, config,
+ #{overload_kill_enable => false}],
+ []}])
+ end),
Procs = 100,
Sent = Procs * send_burst({n,5000}, {spawn,Procs,10}, {chars,79}, notice),
Pid ! {self(),finish},
@@ -1219,29 +1321,22 @@ handler_requests_under_load(Config) ->
[E || E <- Res,
is_tuple(E) andalso (element(1,E) == error)]
end,
- Errors = [{Req,FindError(Res)} || {Req,Res} <- ReqResult],
- NoOfReqs = lists:foldl(fun({_,Res}, N) -> N + length(Res) end, 0, ReqResult),
+ Errors = [{Func,FindError(Res)} || {_,Func,_,Res} <- ReqResult],
+ NoOfReqs = lists:foldl(fun({_,_,_,Res}, N) -> N + length(Res) end,
+ 0, ReqResult),
ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]),
ok = file_delete(Log).
handler_requests_under_load(cleanup, _Config) ->
ok = stop_handler(?MODULE).
-send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) ->
+send_requests(TO, Reqs = [{Mod,Func,Args,Res}|Rs]) ->
receive
{From,finish} ->
From ! {self(),Reqs}
after
TO ->
- Result =
- case Req of
- change_config ->
- logger:set_handler_config(HName, logger_disk_log_h,
- #{overload_kill_enable =>
- false});
- Func ->
- logger_disk_log_h:Func(HName)
- end,
- send_requests(HName, TO, Rs ++ [{Req,[Result|Res]}])
+ Result = apply(Mod,Func,Args),
+ send_requests(TO, Rs ++ [{Mod,Func,Args,[Result|Res]}])
end.
%%%-----------------------------------------------------------------
@@ -1359,15 +1454,6 @@ format(Msg,Tag) ->
erlang:display(Error),
exit(Error).
-remove(Handler, LogName) ->
- logger_disk_log_h:remove(Handler, LogName),
- HState = #{log_names := Logs} = logger_disk_log_h:info(),
- false = maps:is_key(LogName, HState),
- false = lists:member(LogName, Logs),
- false = logger_config:exist(?LOGGER_TABLE, LogName),
- {error,no_such_log} = disk_log:info(LogName),
- ok.
-
start_and_add(Name, Config, LogOpts) ->
HConfig = maps:get(config, Config, #{}),
HConfig1 = maps:merge(HConfig, LogOpts),
@@ -1494,7 +1580,9 @@ start_tracer(Trace,Expected) ->
ok.
tpl([{M,F,A}|Trace]) ->
- {ok,Match} = dbg:tpl(M,F,A,[]),
+ tpl([{{M,F,A},c}|Trace]);
+tpl([{{M,F,A},MS}|Trace]) ->
+ {ok,Match} = dbg:tpl(M,F,A,MS),
case lists:keyfind(matched,1,Match) of
{_,_,1} ->
ok;
@@ -1507,23 +1595,23 @@ tpl([{M,F,A}|Trace]) ->
tpl([]) ->
ok.
-tracer({trace,_,call,{logger_disk_log_h,handle_cast,[Op|_]}},
+tracer({trace,_,call,{logger_h_common,handle_cast,[Op|_]},Caller},
{Pid,[{Mod,Func,Op}|Expected]}) ->
- maybe_tracer_done(Pid,Expected,{Mod,Func,Op});
-tracer({trace,_,call,{Mod=disk_log,Func=blog,[_,Data]}}, {Pid,[{Mod,Func,Data}|Expected]}) ->
- maybe_tracer_done(Pid,Expected,{Mod,Func,Data});
-tracer({trace,_,call,{Mod,Func,_}}, {Pid,[{Mod,Func}|Expected]}) ->
- maybe_tracer_done(Pid,Expected,{Mod,Func});
-tracer({trace,_,call,Call}, {Pid,Expected}) ->
- ct:log("Tracer got unexpected: ~p~nExpected: ~p~n",[Call,Expected]),
+ maybe_tracer_done(Pid,Expected,{Mod,Func,Op},Caller);
+tracer({trace,_,call,{Mod=logger_disk_log_h,Func=disk_log_write,[_,_,Data]},Caller}, {Pid,[{Mod,Func,Data}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func,Data},Caller);
+tracer({trace,_,call,{Mod,Func,_},Caller}, {Pid,[{Mod,Func}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func},Caller);
+tracer({trace,_,call,Call,Caller}, {Pid,Expected}) ->
+ ct:log("Tracer got unexpected: ~p~nCaller: ~p~nExpected: ~p~n",[Call,Caller,Expected]),
Pid ! {tracer_got_unexpected,Call,Expected},
{Pid,Expected}.
-maybe_tracer_done(Pid,[],Got) ->
- ct:log("Tracer got: ~p~n",[Got]),
+maybe_tracer_done(Pid,[],Got,Caller) ->
+ ct:log("Tracer got: ~p~nCaller: ~p~n",[Got,Caller]),
Pid ! tracer_done;
-maybe_tracer_done(Pid,Expected,Got) ->
- ct:log("Tracer got: ~p~n",[Got]),
+maybe_tracer_done(Pid,Expected,Got,Caller) ->
+ ct:log("Tracer got: ~p~nCaller: ~p~n",[Got,Caller]),
{Pid,Expected}.
check_tracer(T) ->
@@ -1551,25 +1639,30 @@ h_proc_name() ->
h_proc_name(Name) ->
list_to_atom(lists:concat([logger_disk_log_h,"_",Name])).
-file_delete(Log) ->
- file:delete(Log).
-
wait_for_process_up(T) ->
- wait_for_process_up(h_proc_name(),T).
+ wait_for_process_up(?MODULE, h_proc_name(), T).
-wait_for_process_up(Name,T) ->
+wait_for_process_up(Name, RegName, T) ->
N = (T div 500) + 1,
- wait_for_process_up1(Name,N).
+ wait_for_process_up1(Name, RegName, N).
-wait_for_process_up1(Name,0) ->
+wait_for_process_up1(_Name, _RegName, 0) ->
error;
-wait_for_process_up1(Name,N) ->
+wait_for_process_up1(Name, RegName, N) ->
timer:sleep(500),
- case whereis(Name) of
+ case whereis(RegName) of
Pid when is_pid(Pid) ->
- %% ct:pal("Process ~p up (~p tries left)",[Name,N]),
- {ok,Pid};
+ case logger:get_handler_config(Name) of
+ {ok,_} ->
+ %% ct:pal("Process ~p up (~p tries left)",[Name,N]),
+ {ok,Pid};
+ _ ->
+ wait_for_process_up1(Name, RegName, N-1)
+ end;
undefined ->
%% ct:pal("Waiting for process ~p (~p tries left)",[Name,N]),
- wait_for_process_up1(Name,N-1)
+ wait_for_process_up1(Name, RegName, N-1)
end.
+
+file_delete(Log) ->
+ file:delete(Log).
diff --git a/lib/kernel/test/logger_env_var_SUITE.erl b/lib/kernel/test/logger_env_var_SUITE.erl
index 04a4364947..9d2ad11be8 100644
--- a/lib/kernel/test/logger_env_var_SUITE.erl
+++ b/lib/kernel/test/logger_env_var_SUITE.erl
@@ -59,7 +59,8 @@ groups() ->
logger_undefined,
logger_many_handlers_default_first,
logger_many_handlers_default_last,
- logger_many_handlers_default_last_broken_filter
+ logger_many_handlers_default_last_broken_filter,
+ logger_proxy
]},
{bad,[],[bad_error_logger,
bad_level,
@@ -71,6 +72,7 @@ all() ->
sasl_compatible_false,
sasl_compatible_false_no_progress,
sasl_compatible,
+ all_logger_level,
{group,bad},
{group,error_logger},
{group,logger}
@@ -540,6 +542,19 @@ logger_many_handlers(Config, Env, LogErr, LogInfo, NumProgress) ->
ok.
+logger_proxy(Config) ->
+ %% assume current node runs with default settings
+ DefOpts = logger_olp:get_opts(logger_proxy),
+ {ok,_,Node} = setup(Config,
+ [{logger,[{proxy,#{sync_mode_qlen=>0,
+ drop_mode_qlen=>2}}]}]),
+ Expected = DefOpts#{sync_mode_qlen:=0,
+ drop_mode_qlen:=2},
+ Expected = rpc:call(Node,logger_olp,get_opts,[logger_proxy]),
+ Expected = rpc:call(Node,logger,get_proxy_config,[]),
+
+ ok.
+
sasl_compatible_false(Config) ->
Log = file(Config,?FUNCTION_NAME),
{ok,_,Node} = setup(Config,
@@ -572,6 +587,24 @@ sasl_compatible(Config) ->
0),% progress in std logger
ok.
+all_logger_level(Config) ->
+ [all_logger_level(Config,Level) || Level <- [none,
+ emergency,
+ alert,
+ critical,
+ error,
+ warning,
+ notice,
+ info,
+ debug,
+ all]],
+ ok.
+
+all_logger_level(Config,Level) ->
+ {ok,#{primary:=#{level:=Level}},Node} = setup(Config,[{logger_level,Level}]),
+ true = test_server:stop_node(Node),
+ ok.
+
bad_error_logger(Config) ->
error = setup(Config,[{error_logger,baddest}]).
diff --git a/lib/kernel/test/logger_formatter_SUITE.erl b/lib/kernel/test/logger_formatter_SUITE.erl
index 8fe8d5199b..83e3e6c40a 100644
--- a/lib/kernel/test/logger_formatter_SUITE.erl
+++ b/lib/kernel/test/logger_formatter_SUITE.erl
@@ -166,6 +166,56 @@ single_line(_Config) ->
" info:\nterm\n" = string:prefix(String2,ExpectedTimestamp),
String2 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>bad}),
+
+
+ %% Test that no extra commas/spaces are added when removing
+ %% newlines, especially not after "=>" in a map association (as
+ %% was the case in OTP-21.0, when the only single_line adjustment
+ %% was done by regexp replacement of "\n" by ", ").
+ Prefix =
+ "Some characters to fill the line ------------------------------------- ",
+ String3 = format(info,{"~s~p~n~s~p~n",[Prefix,
+ lists:seq(1,10),
+ Prefix,
+ #{a=>map,with=>a,few=>accociations}]},
+ #{time=>Time},
+ #{single_line=>true}),
+ ct:log(String3),
+ match = re:run(String3,"\\[1,2,3,4,5,6,7,8,9,10\\]",[{capture,none}]),
+ match = re:run(String3,
+ "#{a => map,few => accociations,with => a}",
+ [{capture,none}]),
+
+ %% This part is added to make sure that the previous test made
+ %% sense, i.e. that there would actually be newlines inside the
+ %% list and map.
+ String4 = format(info,{"~s~p~n~s~p~n",[Prefix,
+ lists:seq(1,10),
+ Prefix,
+ #{a=>map,with=>a,few=>accociations}]},
+ #{time=>Time},
+ #{single_line=>false}),
+ ct:log(String4),
+ match = re:run(String4,"\\[1,2,3,\n",[global,{capture,none}]),
+ {match,Match4} = re:run(String4,"=>\n",[global,{capture,all}]),
+ 3 = length(Match4),
+
+ %% Test that big metadata fields do not get line breaks
+ String5 = format(info,"",
+ #{mymeta=>lists:seq(1,100)},
+ #{single_line=>true,template=>[mymeta,"\n"]}),
+ ct:log(String5),
+ [_] = string:lexemes(String5,"\n"),
+
+ %% Ensure that the previous test made sense, i.e. that the
+ %% metadata field does produce multiple lines if
+ %% single_line==false.
+ String6 = format(info,"",
+ #{mymeta=>lists:seq(1,100)},
+ #{single_line=>false,template=>[mymeta,"\n"]}),
+ ct:log(String6),
+ [_,_|_] = string:lexemes(String6,"\n"),
+
ok.
template(_Config) ->
@@ -312,30 +362,48 @@ format_msg(_Config) ->
#{report_cb=>fun(_)-> faulty_return end},
#{template=>Template}),
ct:log(String5),
- "REPORT_CB ERROR: term; Returned: faulty_return" = String5,
+ "REPORT_CB/1 ERROR: term; Returned: faulty_return" = String5,
String6 = format(info,{report,term},
#{report_cb=>fun(_)-> erlang:error(fun_crashed) end},
#{template=>Template}),
ct:log(String6),
- "REPORT_CB CRASH: term; Reason: {error,fun_crashed}" = String6,
+ "REPORT_CB/1 CRASH: term; Reason: {error,fun_crashed,"++_ = String6,
+
+ String7 = format(info,{report,term},
+ #{report_cb=>fun(_,_)-> ['not',a,string] end},
+ #{template=>Template}),
+ ct:log(String7),
+ "REPORT_CB/2 ERROR: term; Returned: ['not',a,string]" = String7,
+
+ String8 = format(info,{report,term},
+ #{report_cb=>fun(_,_)-> faulty_return end},
+ #{template=>Template}),
+ ct:log(String8),
+ "REPORT_CB/2 ERROR: term; Returned: faulty_return" = String8,
+
+ String9 = format(info,{report,term},
+ #{report_cb=>fun(_,_)-> erlang:error(fun_crashed) end},
+ #{template=>Template}),
+ ct:log(String9),
+ "REPORT_CB/2 CRASH: term; Reason: {error,fun_crashed,"++_ = String9,
%% strings are not formatted
- String7 = format(info,{string,"string"},
+ String10 = format(info,{string,"string"},
#{report_cb=>fun(_)-> {"formatted",[]} end},
#{template=>Template}),
- ct:log(String7),
- "string" = String7,
+ ct:log(String10),
+ "string" = String10,
- String8 = format(info,{string,['not',printable,list]},
+ String11 = format(info,{string,['not',printable,list]},
#{report_cb=>fun(_)-> {"formatted",[]} end},
#{template=>Template}),
- ct:log("~ts",[String8]), % avoiding ct_log crash
- "FORMAT ERROR: \"~ts\" - [['not',printable,list]]" = String8,
+ ct:log("~ts",[String11]), % avoiding ct_log crash
+ "FORMAT ERROR: \"~ts\" - [['not',printable,list]]" = String11,
- String9 = format(info,{string,"string"},#{},#{template=>Template}),
- ct:log(String9),
- "string" = String9,
+ String12 = format(info,{string,"string"},#{},#{template=>Template}),
+ ct:log(String12),
+ "string" = String12,
ok.
@@ -639,8 +707,10 @@ check_config(_Config) ->
?cfgerr({max_size,bad}) =
logger_formatter:check_config(#{max_size => bad}),
+ ok =
+ logger_formatter:check_config(#{report_cb => fun(_,_) -> "" end}),
?cfgerr({report_cb,F}) =
- logger_formatter:check_config(#{report_cb => F=fun(_,_) -> {"",[]} end}),
+ logger_formatter:check_config(#{report_cb => F=fun(_,_,_) -> {"",[]} end}),
?cfgerr({report_cb,bad}) =
logger_formatter:check_config(#{report_cb => bad}),
@@ -695,6 +765,8 @@ check_config(_Config) ->
%% Test that formatter config can be changed, and that the default
%% template is updated accordingly
update_config(_Config) ->
+ {error,{not_found,?MODULE}} = logger:update_formatter_config(?MODULE,#{}),
+
logger:add_handler_filter(default,silence,{fun(_,_) -> stop end,ok}),
ok = logger:add_handler(?MODULE,?MODULE,#{}),
D = lists:seq(1,1000),
@@ -747,6 +819,11 @@ update_config(_Config) ->
ct:log("~p",[C6]),
["=NOTICE REPORT==== "++_,_D6] = Lines6,
+ {error,{invalid_formatter_config,bad}} =
+ logger:update_formatter_config(?MODULE,bad),
+ {error,{invalid_formatter_config,logger_formatter,{depth,bad}}} =
+ logger:update_formatter_config(?MODULE,depth,bad),
+
ok.
update_config(cleanup,_Config) ->
@@ -790,7 +867,7 @@ my_try(Fun) ->
try Fun() catch C:R:S -> {C,R,hd(S)} end.
timestamp() ->
- erlang:system_time(microsecond).
+ logger:timestamp().
%% necessary?
add_time(#{time:=_}=Meta) ->
diff --git a/lib/kernel/test/logger_olp_SUITE.erl b/lib/kernel/test/logger_olp_SUITE.erl
new file mode 100644
index 0000000000..ea3eec89f5
--- /dev/null
+++ b/lib/kernel/test/logger_olp_SUITE.erl
@@ -0,0 +1,90 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_olp_SUITE).
+
+-compile(export_all).
+
+-include_lib("kernel/src/logger_olp.hrl").
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [idle_timer].
+
+%%%-----------------------------------------------------------------
+%%% Test cases
+idle_timer(_Config) ->
+ {ok,_Pid,Olp} = logger_olp:start_link(?MODULE,?MODULE,self(),#{}),
+ [logger_olp:load(Olp,{msg,N}) || N<-lists:seq(1,3)],
+ timer:sleep(?IDLE_DETECT_TIME*2),
+ [{load,{msg,1}},
+ {load,{msg,2}},
+ {load,{msg,3}},
+ {notify,idle}] = test_server:messages_get(),
+ logger_olp:cast(Olp,hello),
+ timer:sleep(?IDLE_DETECT_TIME*2),
+ [{cast,hello}] = test_server:messages_get(),
+ ok.
+idle_timer(cleanup,_Config) ->
+ unlink(whereis(?MODULE)),
+ logger_olp:stop(?MODULE),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Olp callbacks
+init(P) ->
+ {ok,P}.
+
+handle_load(M,P) ->
+ P ! {load,M},
+ P.
+
+handle_cast(M,P) ->
+ P ! {cast,M},
+ {noreply,P}.
+
+notify(N,P) ->
+ P ! {notify,N},
+ P.
diff --git a/lib/kernel/test/logger_proxy_SUITE.erl b/lib/kernel/test/logger_proxy_SUITE.erl
new file mode 100644
index 0000000000..777531e4ed
--- /dev/null
+++ b/lib/kernel/test/logger_proxy_SUITE.erl
@@ -0,0 +1,274 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_proxy_SUITE).
+
+-compile(export_all).
+
+%% -include_lib("common_test/include/ct.hrl").
+%% -include_lib("kernel/include/logger.hrl").
+%% -include_lib("kernel/src/logger_internal.hrl").
+
+%% -define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++
+%% ":"++integer_to_list(?LINE)).
+%% -define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}).
+%% -define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]).
+
+%% -define(MY_LOC(N),#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY},
+%% file=>?FILE, line=>?LINE-N}).
+
+%% -define(TRY(X), my_try(fun() -> X end)).
+
+
+-define(HNAME,list_to_atom(lists:concat([?MODULE,"_",?FUNCTION_NAME]))).
+-define(LOC,#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY},line=>?LINE}).
+-define(ENSURE_TIME,5000).
+
+suite() ->
+ [{timetrap,{seconds,30}},
+ {ct_hooks,[logger_test_lib]}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [basic,
+ emulator,
+ remote,
+ remote_emulator,
+ config,
+ restart_after,
+ terminate].
+
+%%%-----------------------------------------------------------------
+%%% Test cases
+basic(_Config) ->
+ ok = logger:add_handler(?HNAME,?MODULE,#{config=>self()}),
+ logger_proxy ! {log,notice,"Log from: ~p; ~p",[?FUNCTION_NAME,?LINE],L1=?LOC},
+ ok = ensure(L1),
+ logger_proxy ! {log,notice,[{test_case,?FUNCTION_NAME},{line,?LINE}],L2=?LOC},
+ ok = ensure(L2),
+ logger_proxy:log({remote,node(),{log,notice,
+ "Log from: ~p; ~p",
+ [?FUNCTION_NAME,?LINE],
+ L3=?LOC}}),
+ ok = ensure(L3),
+ logger_proxy:log({remote,node(),{log,notice,
+ [{test_case,?FUNCTION_NAME},
+ {line,?LINE}],
+ L4=?LOC}}),
+ ok = ensure(L4),
+ ok.
+basic(cleanup,_Config) ->
+ ok = logger:remove_handler(?HNAME).
+
+emulator(_Config) ->
+ ok = logger:add_handler(?HNAME,?MODULE,#{config=>self()}),
+ Pid = spawn(fun() -> erlang:error(some_reason) end),
+ ok = ensure(#{pid=>Pid}),
+ ok.
+emulator(cleanup,_Config) ->
+ ok = logger:remove_handler(?HNAME).
+
+remote(Config) ->
+ {ok,_,Node} = logger_test_lib:setup(Config,[{logger,[{proxy,#{}}]}]),
+ ok = logger:add_handler(?HNAME,?MODULE,#{config=>self()}),
+ L1 = ?LOC, spawn(Node,fun() -> logger:notice("Log from ~p; ~p",[?FUNCTION_NAME,?LINE],L1) end),
+ ok = ensure(L1),
+ L2 = ?LOC, spawn(Node,fun() -> logger:notice([{test_case,?FUNCTION_NAME},{line,?LINE}],L2) end),
+ ok = ensure(L2),
+ ok.
+remote(cleanup,_Config) ->
+ ok = logger:remove_handler(?HNAME).
+
+remote_emulator(Config) ->
+ {ok,_,Node} = logger_test_lib:setup(Config,[{logger,[{proxy,#{}}]}]),
+ ok = logger:add_handler(?HNAME,?MODULE,#{config=>self()}),
+ Pid = spawn(Node,fun() -> erlang:error(some_reason) end),
+ ok = ensure(#{pid=>Pid}),
+ ok.
+remote_emulator(cleanup,_Config) ->
+ ok = logger:remove_handler(?HNAME).
+
+config(_Config) ->
+ C1 = #{sync_mode_qlen:=SQ,
+ drop_mode_qlen:=DQ} = logger:get_proxy_config(),
+ C1 = logger_olp:get_opts(logger_proxy),
+
+ %% Update the existing config with these two values
+ SQ1 = SQ+1,
+ DQ1 = DQ+1,
+ ok = logger:update_proxy_config(#{sync_mode_qlen=>SQ1,
+ drop_mode_qlen=>DQ1}),
+ C2 = logger:get_proxy_config(), % reads from ets table
+ C2 = logger_olp:get_opts(logger_proxy), % ensure consistency with process opts
+ C2 = C1#{sync_mode_qlen:=SQ1,
+ drop_mode_qlen:=DQ1},
+
+ %% Update the existing again with only one value
+ SQ2 = SQ+2,
+ ok = logger:update_proxy_config(#{sync_mode_qlen=>SQ2}),
+ C3 = logger:get_proxy_config(),
+ C3 = logger_olp:get_opts(logger_proxy),
+ C3 = C2#{sync_mode_qlen:=SQ2},
+
+ %% Set the config, i.e. merge with defaults
+ ok = logger:set_proxy_config(#{sync_mode_qlen=>SQ1}),
+ C4 = logger:get_proxy_config(),
+ C4 = logger_olp:get_opts(logger_proxy),
+ C4 = C1#{sync_mode_qlen:=SQ1},
+
+ %% Reset to default
+ ok = logger:set_proxy_config(#{}),
+ C5 = logger:get_proxy_config(),
+ C5 = logger_olp:get_opts(logger_proxy),
+ C5 = logger_proxy:get_default_config(),
+
+ %% Errors
+ {error,{invalid_olp_config,_}} =
+ logger:set_proxy_config(#{faulty_key=>1}),
+ {error,{invalid_olp_config,_}} =
+ logger:set_proxy_config(#{sync_mode_qlen=>infinity}),
+ {error,{invalid_config,[]}} = logger:set_proxy_config([]),
+
+ {error,{invalid_olp_config,_}} =
+ logger:update_proxy_config(#{faulty_key=>1}),
+ {error,{invalid_olp_config,_}} =
+ logger:update_proxy_config(#{sync_mode_qlen=>infinity}),
+ {error,{invalid_config,[]}} = logger:update_proxy_config([]),
+
+ C5 = logger:get_proxy_config(),
+ C5 = logger_olp:get_opts(logger_proxy),
+
+ ok.
+config(cleanup,_Config) ->
+ _ = logger:set_logger_proxy(logger_proxy:get_default_config()),
+ ok.
+
+restart_after(_Config) ->
+ Restart = 3000,
+ ok = logger:update_proxy_config(#{overload_kill_enable => true,
+ overload_kill_qlen => 10,
+ overload_kill_restart_after => Restart}),
+ Proxy = whereis(logger_proxy),
+ Proxy = erlang:system_info(system_logger),
+ ProxyConfig = logger:get_proxy_config(),
+ ProxyConfig = logger_olp:get_opts(logger_proxy),
+
+ Ref = erlang:monitor(process,Proxy),
+ spawn(fun() ->
+ [logger_proxy ! {log,debug,
+ [{test_case,?FUNCTION_NAME},
+ {line,?LINE}],
+ ?LOC} || _ <- lists:seq(1,100)]
+ end),
+ receive
+ {'DOWN',Ref,_,_,_Reason} ->
+ undefined = erlang:system_info(system_logger),
+ timer:sleep(Restart),
+ poll_restarted(10)
+ after 5000 ->
+ ct:fail(proxy_not_terminated)
+ end,
+
+ Proxy1 = whereis(logger_proxy),
+ Proxy1 = erlang:system_info(system_logger),
+ ProxyConfig = logger:get_proxy_config(),
+ ProxyConfig = logger_olp:get_opts(logger_proxy),
+
+ ok.
+restart_after(cleanup,_Config) ->
+ _ = logger:set_logger_proxy(logger_proxy:get_default_config()),
+ ok.
+
+%% Test that system_logger flag is set to logger process if
+%% logger_proxy terminates for other reason than overloaded.
+terminate(_Config) ->
+ Logger = whereis(logger),
+ Proxy = whereis(logger_proxy),
+ Proxy = erlang:system_info(system_logger),
+ ProxyConfig = logger:get_proxy_config(),
+ ProxyConfig = logger_olp:get_opts(logger_proxy),
+
+ Ref = erlang:monitor(process,Proxy),
+ ok = logger_olp:stop(Proxy),
+ receive
+ {'DOWN',Ref,_,_,_Reason} ->
+ Logger = erlang:system_info(system_logger),
+ logger_proxy:restart(),
+ poll_restarted(10)
+ after 5000 ->
+ ct:fail(proxy_not_terminated)
+ end,
+
+ Proxy1 = whereis(logger_proxy),
+ Proxy1 = erlang:system_info(system_logger),
+ ProxyConfig = logger:get_proxy_config(),
+ ProxyConfig = logger_olp:get_opts(logger_proxy),
+
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Internal functions
+
+poll_restarted(0) ->
+ ct:fail(proxy_not_restarted);
+poll_restarted(N) ->
+ timer:sleep(1000),
+ case whereis(logger_proxy) of
+ undefined ->
+ poll_restarted(N-1);
+ _Pid ->
+ ok
+ end.
+
+%% Logger handler callback
+log(#{meta:=Meta},#{config:=Pid}) ->
+ Pid ! {logged,Meta}.
+
+%% Check that the log from the logger callback function log/2 is received
+ensure(Match) ->
+ receive {logged,Meta} ->
+ case maps:with(maps:keys(Match),Meta) of
+ Match -> ok;
+ _NoMatch -> {error,Match,Meta,test_server:messages_get()}
+ end
+ after ?ENSURE_TIME -> {error,Match,test_server:messages_get()}
+ end.
diff --git a/lib/kernel/test/logger_simple_h_SUITE.erl b/lib/kernel/test/logger_simple_h_SUITE.erl
index 79e5c057ad..e0ad792bdb 100644
--- a/lib/kernel/test/logger_simple_h_SUITE.erl
+++ b/lib/kernel/test/logger_simple_h_SUITE.erl
@@ -117,8 +117,7 @@ replace_default(Config) ->
log(Node, critical, [?str,[?keyval_rep]]),
log(Node, notice, [["fake",string,"line:",?LINE]]),
- Env = rpc:call(Node, application, get_env, [kernel, logger, []]),
- ok = rpc:call(Node, logger, add_handlers, [Env]),
+ ok = rpc:call(Node, logger, add_handlers, [kernel]),
ok.
diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl
index ca54458ac1..16ab0e97fc 100644
--- a/lib/kernel/test/logger_std_h_SUITE.erl
+++ b/lib/kernel/test/logger_std_h_SUITE.erl
@@ -25,10 +25,15 @@
-include_lib("kernel/include/logger.hrl").
-include_lib("kernel/src/logger_internal.hrl").
-include_lib("kernel/src/logger_h_common.hrl").
+-include_lib("kernel/src/logger_olp.hrl").
-include_lib("stdlib/include/ms_transform.hrl").
-include_lib("kernel/include/file.hrl").
--define(check_no_log, [] = test_server:messages_get()).
+-define(check_no_log,
+ begin
+ timer:sleep(?IDLE_DETECT_TIME*2),
+ [] = test_server:messages_get()
+ end).
-define(check(Expected),
receive
{log,Expected} ->
@@ -66,6 +71,14 @@ init_per_group(_Group, Config) ->
end_per_group(_Group, _Config) ->
ok.
+init_per_testcase(reopen_changed_log=TC, Config) ->
+ case os:type() of
+ {win32,_} ->
+ {skip,"This test can only work with inodes, i.e. not on Windows"};
+ _ ->
+ ct:print("********** ~w **********", [TC]),
+ Config
+ end;
init_per_testcase(TestHooksCase, Config) when
TestHooksCase == write_failure;
TestHooksCase == sync_failure ->
@@ -107,14 +120,16 @@ all() ->
add_remove_instance_standard_error,
add_remove_instance_file1,
add_remove_instance_file2,
+ add_remove_instance_file3,
+ add_remove_instance_file4,
default_formatter,
+ filter_config,
errors,
formatter_fail,
config_fail,
crash_std_h_to_file,
crash_std_h_to_disk_log,
bad_input,
- info_and_reset,
reconfig,
file_opts,
sync,
@@ -135,11 +150,18 @@ all() ->
mem_kill_new,
mem_kill_std,
restart_after,
- handler_requests_under_load
+ handler_requests_under_load,
+ recreate_deleted_log,
+ reopen_changed_log,
+ rotate_size,
+ rotate_size_compressed,
+ rotate_size_reopen,
+ rotation_opts,
+ rotation_opts_restart_handler
].
add_remove_instance_tty(_Config) ->
- {error,{handler_not_added,{invalid_config,logger_std_h,{type,tty}}}} =
+ {error,{handler_not_added,{invalid_config,logger_std_h,#{type:=tty}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{type => tty},
filter_default=>log,
@@ -172,10 +194,27 @@ add_remove_instance_file2(Config) ->
add_remove_instance_file2(cleanup,_Config) ->
logger_std_h_remove().
-add_remove_instance_file(Log, Type) ->
+add_remove_instance_file3(_Config) ->
+ Log = atom_to_list(?MODULE),
+ StdHConfig = #{type=>file},
+ add_remove_instance_file(Log, StdHConfig).
+add_remove_instance_file3(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file4(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,"stdlog4.txt"),
+ StdHConfig = #{file=>Log,modes=>[]},
+ add_remove_instance_file(Log, StdHConfig).
+add_remove_instance_file4(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file(Log, Type) when not is_map(Type) ->
+ add_remove_instance_file(Log,#{type=>Type});
+add_remove_instance_file(Log, StdHConfig) when is_map(StdHConfig) ->
ok = logger:add_handler(?MODULE,
logger_std_h,
- #{config => #{type => Type},
+ #{config => StdHConfig,
filter_default=>stop,
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,self()}}),
@@ -204,6 +243,20 @@ default_formatter(_Config) ->
match = re:run(Msg,"=NOTICE REPORT====.*\n"++M1,[{capture,none}]),
ok.
+filter_config(_Config) ->
+ ok = logger:add_handler(?MODULE,logger_std_h,#{}),
+ {ok,#{config:=HConfig}=Config} = logger:get_handler_config(?MODULE),
+ HConfig = maps:without([olp],HConfig),
+
+ FakeFullHConfig = HConfig#{olp=>{regname,self(),erlang:make_ref()}},
+ #{config:=HConfig} =
+ logger_std_h:filter_config(Config#{config=>FakeFullHConfig}),
+ ok.
+
+filter_config(cleanup,_Config) ->
+ logger:remove_handler(?MODULE),
+ ok.
+
errors(Config) ->
Dir = ?config(priv_dir,Config),
Log = filename:join(Dir,?FUNCTION_NAME),
@@ -219,7 +272,7 @@ errors(Config) ->
{error,
{handler_not_added,
- {invalid_config,logger_std_h,{type,faulty_type}}}} =
+ {invalid_config,logger_std_h,#{type:=faulty_type}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{type => faulty_type}}),
@@ -230,15 +283,16 @@ errors(Config) ->
_ ->
NoDir = lists:concat(["/",?MODULE,"_dir"]),
{error,
- {handler_not_added,{{open_failed,NoDir,eacces},_}}} =
+ {handler_not_added,{open_failed,NoDir,eacces}}} =
logger:add_handler(myh2,logger_std_h,
#{config=>#{type=>{file,NoDir}}})
end,
{error,
- {handler_not_added,{{open_failed,Log,_},_}}} =
+ {handler_not_added,
+ {invalid_config,logger_std_h,#{modes:=bad_file_opt}}}} =
logger:add_handler(myh3,logger_std_h,
- #{config=>#{type=>{file,Log,[bad_file_opt]}}}),
+ #{config=>#{type=>{file,Log,bad_file_opt}}}),
ok = logger:notice(?msg).
@@ -280,7 +334,7 @@ formatter_fail(Config) ->
ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,bad_return}),
logger:notice(?msg,?domain),
try_match_file(Log,
- escape(Got3)++"FORMATTER ERROR: bad_return_value",
+ escape(Got3)++"FORMATTER ERROR: bad return value",
5000),
%% Check that handler is still alive and was never dead
@@ -293,25 +347,27 @@ formatter_fail(cleanup,_Config) ->
logger:remove_handler(?MODULE).
config_fail(_Config) ->
- {error,{handler_not_added,{invalid_config,logger_std_h,{bad,bad}}}} =
+ {error,{handler_not_added,{invalid_config,logger_std_h,#{bad:=bad}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{bad => bad},
filter_default=>log,
formatter=>{?MODULE,self()}}),
{error,{handler_not_added,{invalid_config,logger_std_h,
- {restart_type,bad}}}} =
+ #{restart_type:=bad}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{restart_type => bad},
filter_default=>log,
formatter=>{?MODULE,self()}}),
- {error,{handler_not_added,{invalid_levels,{_,1,_}}}} =
+ {error,{handler_not_added,{invalid_olp_levels,#{drop_mode_qlen:=1}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{drop_mode_qlen=>1}}),
- {error,{handler_not_added,{invalid_levels,{43,42,_}}}} =
+ {error,{handler_not_added,{invalid_olp_levels,#{sync_mode_qlen:=43,
+ drop_mode_qlen:=42}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{sync_mode_qlen=>43,
drop_mode_qlen=>42}}),
- {error,{handler_not_added,{invalid_levels,{_,43,42}}}} =
+ {error,{handler_not_added,{invalid_olp_levels,#{drop_mode_qlen:=43,
+ flush_qlen:=42}}}} =
logger:add_handler(?MODULE,logger_std_h,
#{config => #{drop_mode_qlen=>43,
flush_qlen=>42}}),
@@ -319,18 +375,24 @@ config_fail(_Config) ->
ok = logger:add_handler(?MODULE,logger_std_h,
#{filter_default=>log,
formatter=>{?MODULE,self()}}),
- {error,{illegal_config_change,_,_}} =
+ {error,{illegal_config_change,logger_std_h,#{type:=_},#{type:=_}}} =
logger:set_handler_config(?MODULE,config,
#{type=>{file,"file"}}),
- {error,{illegal_config_change,_,_}} =
- logger:set_handler_config(?MODULE,id,bad),
- {error,{invalid_levels,_}} =
+
+ {error,{invalid_olp_levels,_}} =
logger:set_handler_config(?MODULE,config,
#{sync_mode_qlen=>100,
flush_qlen=>99}),
- {error,{invalid_config,logger_std_h,{filesync_rep_int,2000}}} =
+ {error,{invalid_config,logger_std_h,#{filesync_rep_int:=2000}}} =
logger:set_handler_config(?MODULE, config,
#{filesync_rep_int => 2000}),
+
+ %% Read-only fields may (accidentially) be included in the change,
+ %% but it won't take effect
+ {ok,C} = logger:get_handler_config(?MODULE),
+ ok = logger:set_handler_config(?MODULE,config,#{olp=>dummyvalue}),
+ {ok,C} = logger:get_handler_config(?MODULE),
+
ok.
config_fail(cleanup,_Config) ->
@@ -396,10 +458,13 @@ crash_std_h(Config,Func,Var,Type,Log) ->
%% logger would send the log event to the logger process here instead
%% of logging it itself.
log_on_remote_node(Node,Msg) ->
+ Pid = self(),
_ = spawn_link(Node,
fun() -> erlang:group_leader(whereis(user),self()),
- logger:notice(Msg)
+ logger:notice(Msg),
+ Pid ! done
end),
+ receive done -> ok end,
ok.
@@ -427,14 +492,7 @@ sync_and_read(Node,file,Log) ->
end.
bad_input(_Config) ->
- {error,{badarg,{filesync,["BadType"]}}} = logger_std_h:filesync("BadType"),
- {error,{badarg,{info,["BadType"]}}} = logger_std_h:info("BadType"),
- {error,{badarg,{reset,["BadType"]}}} = logger_std_h:reset("BadType").
-
-
-info_and_reset(_Config) ->
- #{id := ?STANDARD_HANDLER} = logger_std_h:info(?STANDARD_HANDLER),
- ok = logger_std_h:reset(?STANDARD_HANDLER).
+ {error,{badarg,{filesync,["BadType"]}}} = logger_std_h:filesync("BadType").
reconfig(Config) ->
Dir = ?config(priv_dir,Config),
@@ -444,9 +502,10 @@ reconfig(Config) ->
filter_default=>log,
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,self()}}),
- #{id := ?MODULE,
- type := standard_io,
- file_ctrl_pid := FileCtrlPid,
+ #{%id := ?MODULE,
+ cb_state:=#{handler_state := #{type := standard_io,
+ file_ctrl_pid := FileCtrlPid},
+ filesync_repeat_interval := no_repeat},
sync_mode_qlen := ?SYNC_MODE_QLEN,
drop_mode_qlen := ?DROP_MODE_QLEN,
flush_qlen := ?FLUSH_QLEN,
@@ -456,9 +515,25 @@ reconfig(Config) ->
overload_kill_enable := ?OVERLOAD_KILL_ENABLE,
overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
- overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
- filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL} =
- logger_std_h:info(?MODULE),
+ overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER} =
+ logger_olp:info(h_proc_name()),
+
+ {ok,
+ #{config:=
+ #{type := standard_io,
+ sync_mode_qlen := ?SYNC_MODE_QLEN,
+ drop_mode_qlen := ?DROP_MODE_QLEN,
+ flush_qlen := ?FLUSH_QLEN,
+ burst_limit_enable := ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable := ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
+ filesync_repeat_interval := no_repeat} =
+ DefaultHConf}}
+ = logger:get_handler_config(?MODULE),
ok = logger:set_handler_config(?MODULE, config,
#{sync_mode_qlen => 1,
@@ -471,10 +546,11 @@ reconfig(Config) ->
overload_kill_qlen => 100000,
overload_kill_mem_size => 10000000,
overload_kill_restart_after => infinity,
- filesync_repeat_interval => no_repeat}),
- #{id := ?MODULE,
- type := standard_io,
- file_ctrl_pid := FileCtrlPid,
+ filesync_repeat_interval => 5000}),
+ #{%id := ?MODULE,
+ cb_state := #{handler_state := #{type := standard_io,
+ file_ctrl_pid := FileCtrlPid},
+ filesync_repeat_interval := no_repeat},
sync_mode_qlen := 1,
drop_mode_qlen := 2,
flush_qlen := 3,
@@ -484,8 +560,77 @@ reconfig(Config) ->
overload_kill_enable := true,
overload_kill_qlen := 100000,
overload_kill_mem_size := 10000000,
- overload_kill_restart_after := infinity,
- filesync_repeat_interval := no_repeat} = logger_std_h:info(?MODULE),
+ overload_kill_restart_after := infinity} = logger_olp:info(h_proc_name()),
+
+ {ok,#{config :=
+ #{type := standard_io,
+ sync_mode_qlen := 1,
+ drop_mode_qlen := 2,
+ flush_qlen := 3,
+ burst_limit_enable := false,
+ burst_limit_max_count := 10,
+ burst_limit_window_time := 10,
+ overload_kill_enable := true,
+ overload_kill_qlen := 100000,
+ overload_kill_mem_size := 10000000,
+ overload_kill_restart_after := infinity,
+ filesync_repeat_interval := no_repeat} = HConf}} =
+ logger:get_handler_config(?MODULE),
+
+ ok = logger:update_handler_config(?MODULE, config,
+ #{flush_qlen => ?FLUSH_QLEN}),
+ {ok,#{config:=C1}} = logger:get_handler_config(?MODULE),
+ ct:log("C1: ~p",[C1]),
+ C1 = HConf#{flush_qlen => ?FLUSH_QLEN},
+
+ ok = logger:set_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C2}} = logger:get_handler_config(?MODULE),
+ ct:log("C2: ~p",[C2]),
+ C2 = DefaultHConf#{sync_mode_qlen => 1},
+
+ ok = logger:set_handler_config(?MODULE, config, #{drop_mode_qlen => 100}),
+ {ok,#{config:=C3}} = logger:get_handler_config(?MODULE),
+ ct:log("C3: ~p",[C3]),
+ C3 = DefaultHConf#{drop_mode_qlen => 100},
+
+ ok = logger:update_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C4}} = logger:get_handler_config(?MODULE),
+ ct:log("C4: ~p",[C4]),
+ C4 = DefaultHConf#{sync_mode_qlen => 1,
+ drop_mode_qlen => 100},
+
+ ok = logger:remove_handler(?MODULE),
+
+ File = filename:join(Dir,lists:concat([?FUNCTION_NAME,".log"])),
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => {file,File}},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+
+ {ok,#{config:=#{filesync_repeat_interval:=FSI}=FileHConfig}} =
+ logger:get_handler_config(?MODULE),
+ ok = logger:update_handler_config(?MODULE,config,
+ #{filesync_repeat_interval=>FSI+2000}),
+ {ok,#{config:=C5}} = logger:get_handler_config(?MODULE),
+ ct:log("C5: ~p",[C5]),
+ C5 = FileHConfig#{filesync_repeat_interval=>FSI+2000},
+
+ %% You are not allowed to actively set 'type' in runtime, since
+ %% this is a write once field.
+ {error, {illegal_config_change,logger_std_h,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{type=>standard_io}),
+ {ok,#{config:=C6}} = logger:get_handler_config(?MODULE),
+ ct:log("C6: ~p",[C6]),
+ C6 = C5,
+
+ %% ... but if you don't specify 'type', then set_handler_config shall
+ %% NOT reset it to its default value
+ ok = logger:set_handler_config(?MODULE,config,#{sync_mode_qlen=>1}),
+ {ok,#{config:=C7}} = logger:get_handler_config(?MODULE),
+ ct:log("C7: ~p",[C7]),
+ C7 = FileHConfig#{sync_mode_qlen=>1},
ok.
reconfig(cleanup, _Config) ->
@@ -495,22 +640,51 @@ reconfig(cleanup, _Config) ->
file_opts(Config) ->
Dir = ?config(priv_dir,Config),
Log = filename:join(Dir, lists:concat([?FUNCTION_NAME,".log"])),
- BadFileOpts = [raw],
- BadType = {file,Log,BadFileOpts},
- {error,{handler_not_added,{{open_failed,Log,enoent},_}}} =
- logger:add_handler(?MODULE, logger_std_h,
- #{config => #{type => BadType}}),
+ MissingOpts = [raw],
+ Type1 = {file,Log,MissingOpts},
+ ok = logger:add_handler(?MODULE, logger_std_h,
+ #{config => #{type => Type1}}),
+ {ok,#{config:=#{type:=file,file:=Log,modes:=Modes1}}} =
+ logger:get_handler_config(?MODULE),
+ [append,delayed_write,raw] = lists:sort(Modes1),
+ ok = logger:remove_handler(?MODULE),
OkFileOpts = [raw,append],
OkType = {file,Log,OkFileOpts},
ok = logger:add_handler(?MODULE,
logger_std_h,
- #{config => #{type => OkType},
+ #{config => #{type => OkType}, % old format
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+
+ ModOpts = [delayed_write|OkFileOpts],
+ #{cb_state := #{handler_state := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} =
+ logger_olp:info(h_proc_name()),
+ {ok,#{config := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => file,
+ file => Log,
+ modes => OkFileOpts}, % new format
filter_default=>log,
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,self()}}),
- #{type := OkType} = logger_std_h:info(?MODULE),
+ #{cb_state := #{handler_state := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} =
+ logger_olp:info(h_proc_name()),
+ {ok,#{config := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} =
+ logger:get_handler_config(?MODULE),
logger:notice(M1=?msg,?domain),
?check(M1),
B1 = ?bin(M1),
@@ -526,17 +700,16 @@ sync(Config) ->
Type = {file,Log},
ok = logger:add_handler(?MODULE,
logger_std_h,
- #{config => #{type => Type},
+ #{config => #{type => Type,
+ file_check => 10000},
filter_default=>log,
filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
formatter=>{?MODULE,nl}}),
%% check repeated filesync happens
- start_tracer([{logger_std_h, write_to_dev, 5},
- {logger_std_h, sync_dev, 4},
+ start_tracer([{logger_std_h, write_to_dev, 2},
{file, datasync, 1}],
[{logger_std_h, write_to_dev, <<"first\n">>},
- {logger_std_h, sync_dev},
{file,datasync}]),
logger:notice("first", ?domain),
@@ -544,11 +717,9 @@ sync(Config) ->
check_tracer(filesync_rep_int()*2),
%% check that explicit filesync is only done once
- start_tracer([{logger_std_h, write_to_dev, 5},
- {logger_std_h, sync_dev, 4},
+ start_tracer([{logger_std_h, write_to_dev, 2},
{file, datasync, 1}],
[{logger_std_h, write_to_dev, <<"second\n">>},
- {logger_std_h, sync_dev},
{file,datasync},
{no_more,500}
]),
@@ -561,43 +732,39 @@ sync(Config) ->
%% check that if there's no repeated filesync active,
%% a filesync is still performed when handler goes idle
- logger:set_handler_config(?MODULE, config,
- #{filesync_repeat_interval => no_repeat}),
- no_repeat = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)),
- %% The following timer is to make sure the time from last log
- %% ("second") to next ("third") is long enough, so the a flush is
- %% triggered by the idle timeout between "thrid" and "fourth".
- timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
- start_tracer([{logger_std_h, write_to_dev, 5},
- {logger_std_h, sync_dev, 4},
+ ok = logger:update_handler_config(?MODULE, config,
+ #{filesync_repeat_interval => no_repeat}),
+ no_repeat = maps:get(filesync_repeat_interval,
+ maps:get(cb_state, logger_olp:info(h_proc_name()))),
+ start_tracer([{logger_std_h, write_to_dev, 2},
{file, datasync, 1}],
[{logger_std_h, write_to_dev, <<"third\n">>},
- {logger_std_h, sync_dev},
{file,datasync},
{logger_std_h, write_to_dev, <<"fourth\n">>},
- {logger_std_h, sync_dev},
{file,datasync}]),
logger:notice("third", ?domain),
%% wait for automatic filesync
- timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
+ timer:sleep(?IDLE_DETECT_TIME*2),
logger:notice("fourth", ?domain),
%% wait for automatic filesync
- check_tracer(?IDLE_DETECT_TIME_MSEC*2),
+ check_tracer(?IDLE_DETECT_TIME*2),
%% switch repeated filesync on and verify that the looping works
SyncInt = 1000,
WaitT = 4500,
- OneSync = {logger_std_h,handle_cast,repeated_filesync},
- %% receive 1 initial repeated_filesync, then 1 per sec
- start_tracer([{logger_std_h,handle_cast,2}],
- [OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]),
-
- logger:set_handler_config(?MODULE, config,
- #{filesync_repeat_interval => SyncInt}),
- SyncInt = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)),
+ OneSync = {logger_h_common,handle_cast,repeated_filesync},
+ %% receive 1 repeated_filesync per sec
+ start_tracer([{{logger_h_common,handle_cast,2},
+ [{[repeated_filesync,'_'],[],[]}]}],
+ [OneSync || _ <- lists:seq(1, trunc(WaitT/SyncInt))]),
+
+ ok = logger:update_handler_config(?MODULE, config,
+ #{filesync_repeat_interval => SyncInt}),
+ SyncInt = maps:get(filesync_repeat_interval,
+ maps:get(cb_state,logger_olp:info(h_proc_name()))),
timer:sleep(WaitT),
- logger:set_handler_config(?MODULE, config,
- #{filesync_repeat_interval => no_repeat}),
+ ok = logger:update_handler_config(?MODULE, config,
+ #{filesync_repeat_interval => no_repeat}),
check_tracer(100),
ok.
sync(cleanup, _Config) ->
@@ -652,11 +819,9 @@ sync_failure(Config) ->
rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]),
SyncInt = 500,
- ok = rpc:call(Node, logger, set_handler_config,
+ ok = rpc:call(Node, logger, update_handler_config,
[?STANDARD_HANDLER, config,
#{filesync_repeat_interval => SyncInt}]),
- Info = rpc:call(Node, logger_std_h, info, [?STANDARD_HANDLER]),
- SyncInt = maps:get(filesync_repeat_interval, Info),
ok = log_on_remote_node(Node, "Logged1"),
?check_no_log,
@@ -718,7 +883,7 @@ op_switch_to_sync_file(Config) ->
drop_mode_qlen => NumOfReqs+1,
flush_qlen => 2*NumOfReqs,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
%% TRecvPid = start_op_trace(),
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Lines = count_lines(Log),
@@ -747,7 +912,7 @@ op_switch_to_sync_tty(Config) ->
drop_mode_qlen => NumOfReqs+1,
flush_qlen => 2*NumOfReqs,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
ok.
op_switch_to_sync_tty(cleanup, _Config) ->
@@ -770,7 +935,7 @@ op_switch_to_drop_file(Config) ->
flush_qlen =>
Procs*NumOfReqs*Bursts,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
%% It sometimes happens that the handler gets the
%% requests in a slow enough pace so that dropping
%% never occurs. Therefore, lets generate a number of
@@ -807,7 +972,7 @@ op_switch_to_drop_tty(Config) ->
flush_qlen =>
Procs*NumOfReqs+1,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
ok.
op_switch_to_drop_tty(cleanup, _Config) ->
@@ -832,7 +997,7 @@ op_switch_to_flush_file(Config) ->
drop_mode_qlen => 300,
flush_qlen => 300,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 1500,
Procs = 10,
Bursts = 10,
@@ -879,7 +1044,7 @@ op_switch_to_flush_tty(Config) ->
drop_mode_qlen => 100,
flush_qlen => 100,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 1000,
Procs = 100,
send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
@@ -895,7 +1060,7 @@ limit_burst_disabled(Config) ->
burst_limit_window_time => 2000,
drop_mode_qlen => 200,
flush_qlen => 300}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -915,7 +1080,7 @@ limit_burst_enabled_one(Config) ->
burst_limit_window_time => 2000,
drop_mode_qlen => 200,
flush_qlen => 300}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -936,7 +1101,7 @@ limit_burst_enabled_period(Config) ->
burst_limit_window_time => BurstTWin,
drop_mode_qlen => 20000,
flush_qlen => 20001}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
Windows = 3,
Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, notice),
@@ -956,7 +1121,7 @@ kill_disabled(Config) ->
HConfig#{config=>StdHConfig#{overload_kill_enable=>false,
overload_kill_qlen=>10,
overload_kill_mem_size=>100}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
NumOfReqs = 100,
send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
Logged = count_lines(Log),
@@ -977,7 +1142,7 @@ qlen_kill_new(Config) ->
overload_kill_qlen=>10,
overload_kill_mem_size=>Mem0+50000,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
MRef = erlang:monitor(process, Pid0),
NumOfReqs = 100,
Procs = 4,
@@ -986,7 +1151,7 @@ qlen_kill_new(Config) ->
receive
{'DOWN', MRef, _, _, Info} ->
case Info of
- {shutdown,{overloaded,?MODULE,QLen,Mem}} ->
+ {shutdown,{overloaded,QLen,Mem}} ->
ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
killed ->
ct:pal("Slow shutdown, handler process was killed!", [])
@@ -996,7 +1161,7 @@ qlen_kill_new(Config) ->
ok
after
5000 ->
- Info = logger_std_h:info(?MODULE),
+ Info = logger_olp:info(h_proc_name()),
ct:pal("Handler state = ~p", [Info]),
ct:fail("Handler not dead! It should not have survived this!")
end.
@@ -1011,7 +1176,7 @@ qlen_kill_std(_Config) ->
%% File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]),
%% Log = filename:join(Dir, File),
%% Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log),
- %% ok = rpc:call(Node, logger, set_handler_config,
+ %% ok = rpc:call(Node, logger, update_handler_config,
%% [?STANDARD_HANDLER, config,
%% #{overload_kill_enable=>true,
%% overload_kill_qlen=>10,
@@ -1028,7 +1193,7 @@ mem_kill_new(Config) ->
overload_kill_qlen=>50000,
overload_kill_mem_size=>Mem0+500,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
MRef = erlang:monitor(process, Pid0),
NumOfReqs = 100,
Procs = 4,
@@ -1037,7 +1202,7 @@ mem_kill_new(Config) ->
receive
{'DOWN', MRef, _, _, Info} ->
case Info of
- {shutdown,{overloaded,?MODULE,QLen,Mem}} ->
+ {shutdown,{overloaded,QLen,Mem}} ->
ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
killed ->
ct:pal("Slow shutdown, handler process was killed!", [])
@@ -1047,7 +1212,7 @@ mem_kill_new(Config) ->
ok
after
5000 ->
- Info = logger_std_h:info(?MODULE),
+ Info = logger_olp:info(h_proc_name()),
ct:pal("Handler state = ~p", [Info]),
ct:fail("Handler not dead! It should not have survived this!")
end.
@@ -1067,7 +1232,7 @@ restart_after(Config) ->
HConfig#{config=>StdHConfig#{overload_kill_enable=>true,
overload_kill_qlen=>10,
overload_kill_restart_after=>infinity}},
- ok = logger:set_handler_config(?MODULE, NewHConfig1),
+ ok = logger:update_handler_config(?MODULE, NewHConfig1),
MRef1 = erlang:monitor(process, whereis(h_proc_name())),
%% kill handler
send_burst({n,100}, {spawn,4,0}, {chars,79}, notice),
@@ -1078,18 +1243,19 @@ restart_after(Config) ->
ok
after
5000 ->
- Info1 = logger_std_h:info(?MODULE),
+ Info1 = logger_olp:info(h_proc_name()),
ct:pal("Handler state = ~p", [Info1]),
ct:fail("Handler not dead! It should not have survived this!")
end,
-
+
{Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER,
+
NewHConfig2 =
HConfig#{config=>StdHConfig#{overload_kill_enable=>true,
overload_kill_qlen=>10,
overload_kill_restart_after=>RestartAfter}},
- ok = logger:set_handler_config(?MODULE, NewHConfig2),
+ ok = logger:update_handler_config(?MODULE, NewHConfig2),
Pid0 = whereis(h_proc_name()),
MRef2 = erlang:monitor(process, Pid0),
%% kill handler
@@ -1102,7 +1268,7 @@ restart_after(Config) ->
ok
after
5000 ->
- Info2 = logger_std_h:info(?MODULE),
+ Info2 = logger_olp:info(h_proc_name()),
ct:pal("Handler state = ~p", [Info2]),
ct:fail("Handler not dead! It should not have survived this!")
end,
@@ -1123,12 +1289,16 @@ handler_requests_under_load(Config) ->
drop_mode_qlen => 1000,
flush_qlen => 2000,
burst_limit_enable => false}},
- ok = logger:set_handler_config(?MODULE, NewHConfig),
- Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]},
- {info,[]},
- {reset,[]},
- {change_config,[]}])
- end),
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ Pid = spawn_link(
+ fun() -> send_requests(1,[{logger_std_h,filesync,[?MODULE],[]},
+ {logger_olp,info,[h_proc_name()],[]},
+ {logger_olp,reset,[h_proc_name()],[]},
+ {logger,update_handler_config,
+ [?MODULE, config,
+ #{overload_kill_enable => false}],
+ []}])
+ end),
Sent = send_burst({t,10000}, seq, {chars,79}, notice),
Pid ! {self(),finish},
ReqResult = receive {Pid,Result} -> Result end,
@@ -1139,41 +1309,390 @@ handler_requests_under_load(Config) ->
[E || E <- Res,
is_tuple(E) andalso (element(1,E) == error)]
end,
- Errors = [{Req,FindError(Res)} || {Req,Res} <- ReqResult],
- NoOfReqs = lists:foldl(fun({_,Res}, N) -> N + length(Res) end, 0, ReqResult),
+ Errors = [{Func,FindError(Res)} || {_,Func,_,Res} <- ReqResult],
+ NoOfReqs = lists:foldl(fun({_,_,_,Res}, N) -> N + length(Res) end,
+ 0, ReqResult),
ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]),
ok = file_delete(Log).
handler_requests_under_load(cleanup, _Config) ->
ok = stop_handler(?MODULE).
-send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) ->
+recreate_deleted_log(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ logger:notice("first",?domain),
+ logger_std_h:filesync(?MODULE),
+ ok = file:rename(Log,Log++".old"),
+ logger:notice("second",?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,<<"first\n">>} = file:read_file(Log++".old"),
+ {ok,<<"second\n">>} = file:read_file(Log),
+ ok.
+recreate_deleted_log(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+reopen_changed_log(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ logger:notice("first",?domain),
+ logger_std_h:filesync(?MODULE),
+ ok = file:rename(Log,Log++".old"),
+ ok = file:write_file(Log,""),
+ logger:notice("second",?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,<<"first\n">>} = file:read_file(Log++".old"),
+ {ok,<<"second\n">>} = file:read_file(Log),
+ ok.
+reopen_changed_log(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+rotate_size(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,#{config=>#{max_no_bytes=>1000,
+ max_no_files=>2}}),
+
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+
+ logger:notice(Str,?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,51)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+
+ logger:notice("bbbb",?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=1005}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+
+ ok.
+rotate_size(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotate_size_compressed(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,
+ #{config=>#{max_no_bytes=>1000,
+ max_no_files=>2,
+ compress_on_rotate=>true}}),
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {error,enoent} = file:read_file_info(Log++".0.gz"),
+
+ logger:notice(Str,?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".1.gz"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,51)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".1.gz"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+ {error,enoent} = file:read_file_info(Log++".2.gz"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".1.gz"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+ {error,enoent} = file:read_file_info(Log++".2.gz"),
+
+ logger:notice("bbbb",?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=38}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".1.gz"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+ {error,enoent} = file:read_file_info(Log++".2.gz"),
+
+ ok.
+rotate_size_compressed(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotate_size_reopen(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,#{config=>#{max_no_bytes=>1000,
+ max_no_files=>2}}),
+
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,40)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=800}} = file:read_file_info(Log),
+
+ {ok,HConfig} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(?MODULE,maps:get(module,HConfig),HConfig),
+ {ok,#file_info{size=800}} = file:read_file_info(Log),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,40)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=580}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ ok.
+rotate_size_reopen(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotation_opts(Config) ->
+ {Log,_HConfig,StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ #{max_no_bytes:=infinity,
+ max_no_files:=0,
+ compress_on_rotate:=false} = StdHConfig,
+
+ %% Test bad rotation config
+ {error,{invalid_config,_,_}} =
+ logger:update_handler_config(?MODULE,config,#{max_no_bytes=>0}),
+ {error,{invalid_config,_,_}} =
+ logger:update_handler_config(?MODULE,config,#{max_no_files=>infinity}),
+ {error,{invalid_config,_,_}} =
+ logger:update_handler_config(?MODULE,config,
+ #{compress_on_rotate=>undefined}),
+
+
+ %% Test good rotation config - start with no rotation
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=200}} = file:read_file_info(Log),
+ [] = filelib:wildcard(Log++".*"),
+
+ %% Turn on rotation, check that existing file is rotated since its
+ %% size exceeds max_no_bytes
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>100,
+ max_no_files=>2}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ Log0 = Log++".0",
+ {ok,#file_info{size=200}} = file:read_file_info(Log0),
+ [Log0] = filelib:wildcard(Log++".*"),
+
+ %% Fill all logs
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,13)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log0),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Extend size and count and check that nothing changes with existing files
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>200,
+ max_no_files=>3}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log0),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Add more log events and see that extended size and count works
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=220}} = file:read_file_info(Log0),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".2"),
+ [_,_,_] = filelib:wildcard(Log++".*"),
+
+ %% Reduce count and check that archive files that exceed the new
+ %% count are moved
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_files=>1}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=220}} = file:read_file_info(Log0),
+ [Log0] = filelib:wildcard(Log++".*"),
+
+ %% Extend size and count again, and turn on compression. Check
+ %% that archives are compressed
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>100,
+ max_no_files=>2,
+ compress_on_rotate=>true}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ Log0gz = Log0++".gz",
+ {ok,#file_info{size=29}} = file:read_file_info(Log0gz),
+ [Log0gz] = filelib:wildcard(Log++".*"),
+
+ %% Fill all logs
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,13)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=29}} = file:read_file_info(Log0gz),
+ {ok,#file_info{size=29}} = file:read_file_info(Log++".1.gz"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Reduce count and turn off compression. Check that archives that
+ %% exceeds the new count are removed, and the rest are
+ %% uncompressed.
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_files=>1,
+ compress_on_rotate=>false}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log0),
+ [Log0] = filelib:wildcard(Log++".*"),
+
+ %% Check that config and handler state agree on the current rotation settings
+ {ok,#{config:=#{max_no_bytes:=100,
+ max_no_files:=1,
+ compress_on_rotate:=false}}} =
+ logger:get_handler_config(?MODULE),
+ #{cb_state:=#{handler_state:=#{max_no_bytes:=100,
+ max_no_files:=1,
+ compress_on_rotate:=false}}} =
+ logger_olp:info(h_proc_name()),
+ ok.
+rotation_opts(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotation_opts_restart_handler(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>100,
+ max_no_files=>2}),
+
+ %% Fill all logs
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,15)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=60}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Stop/start handler and turn off rotation. Check that archives are removed.
+ {ok,#{config:=StdHConfig1}=HConfig1} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig1#{config=>StdHConfig1#{max_no_bytes=>infinity}}),
+ timer:sleep(100),
+ {ok,#file_info{size=60}} = file:read_file_info(Log),
+ [] = filelib:wildcard(Log++".*"),
+
+ %% Add some log events and check that file is no longer rotated.
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=260}} = file:read_file_info(Log),
+ [] = filelib:wildcard(Log++".*"),
+
+ %% Stop/start handler and trun on rotation. Check that file is rotated.
+ {ok,#{config:=StdHConfig2}=HConfig2} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig2#{config=>StdHConfig2#{max_no_bytes=>100,
+ max_no_files=>2}}),
+ timer:sleep(100),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=260}} = file:read_file_info(Log++".0"),
+ [_] = filelib:wildcard(Log++".*"),
+
+ %% Fill all logs
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=80}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=260}} = file:read_file_info(Log++".1"),
+
+ %% Stop/start handler, reduce count and turn on compression. Check
+ %% that excess archives are removed, and the rest compressed.
+ {ok,#{config:=StdHConfig3}=HConfig3} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig3#{config=>StdHConfig3#{max_no_bytes=>75,
+ max_no_files=>1,
+ compress_on_rotate=>true}}),
+ timer:sleep(100),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=29}} = file:read_file_info(Log++".0.gz"),
+ [_] = filelib:wildcard(Log++".*"),
+
+ %% Stop/start handler and turn off compression. Check that achives
+ %% are decompressed.
+ {ok,#{config:=StdHConfig4}=HConfig4} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig4#{config=>StdHConfig4#{compress_on_rotate=>false}}),
+ timer:sleep(100),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=80}} = file:read_file_info(Log++".0"),
+ [_] = filelib:wildcard(Log++".*"),
+
+ ok.
+rotation_opts_restart_handler(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+%%%-----------------------------------------------------------------
+%%%
+send_requests(TO, Reqs = [{Mod,Func,Args,Res}|Rs]) ->
receive
{From,finish} ->
From ! {self(),Reqs}
after
TO ->
- Result =
- case Req of
- change_config ->
- logger:set_handler_config(HName, config,
- #{overload_kill_enable =>
- false});
- Func ->
- logger_std_h:Func(HName)
- end,
- send_requests(HName, TO, Rs ++ [{Req,[Result|Res]}])
+ Result = apply(Mod,Func,Args),
+ send_requests(TO, Rs ++ [{Mod,Func,Args,[Result|Res]}])
end.
%%%-----------------------------------------------------------------
%%%
-start_handler(Name, TTY, Config) when TTY == standard_io;
- TTY == standard_error->
+start_handler(Name, TTY, _Config) when TTY == standard_io;
+ TTY == standard_error->
ok = logger:add_handler(Name,
logger_std_h,
#{config => #{type => TTY},
- filter_default=>log,
- filters=>?DEFAULT_HANDLER_FILTERS([Name]),
+ filter_default=>stop,
+ filters=>filter_only_this_domain(Name),
formatter=>{?MODULE,op}}),
{ok,HConfig = #{config := StdHConfig}} = logger:get_handler_config(Name),
{HConfig,StdHConfig};
@@ -1187,12 +1706,17 @@ start_handler(Name, FuncName, Config) ->
ok = logger:add_handler(Name,
logger_std_h,
#{config => #{type => Type},
- filter_default=>log,
- filters=>?DEFAULT_HANDLER_FILTERS([Name]),
+ filter_default=>stop,
+ filters=>filter_only_this_domain(Name),
formatter=>{?MODULE,op}}),
{ok,HConfig = #{config := StdHConfig}} = logger:get_handler_config(Name),
{Log,HConfig,StdHConfig}.
+
+filter_only_this_domain(Name) ->
+ [{remote_gl,{fun logger_filters:remote_gl/2,stop}},
+ {domain,{fun logger_filters:domain/2,{log,super,[Name]}}}].
+
stop_handler(Name) ->
R = logger:remove_handler(Name),
ct:pal("Handler ~p stopped! Result: ~p", [Name,R]),
@@ -1422,7 +1946,7 @@ start_op_trace() ->
{ok,_} = dbg:p(self(), [c]),
MS1 = dbg:fun2ms(fun([_]) -> return_trace() end),
- {ok,_} = dbg:tp(logger_h_common, check_load, 1, MS1),
+ {ok,_} = dbg:tpl(logger_h_common, check_load, 1, MS1),
{ok,_} = dbg:tpl(logger_h_common, flush_log_requests, 2, []),
@@ -1496,7 +2020,10 @@ analyse(Msgs) ->
start_tracer(Trace,Expected) ->
Pid = self(),
- FileCtrlPid = maps:get(file_ctrl_pid, logger_std_h:info(?MODULE)),
+ FileCtrlPid = maps:get(file_ctrl_pid,
+ maps:get(handler_state,
+ maps:get(cb_state,
+ logger_olp:info(h_proc_name())))),
dbg:tracer(process,{fun tracer/2,{Pid,Expected}}),
dbg:p(whereis(h_proc_name()),[c]),
dbg:p(FileCtrlPid,[c]),
@@ -1504,7 +2031,9 @@ start_tracer(Trace,Expected) ->
ok.
tpl([{M,F,A}|Trace]) ->
- {ok,Match} = dbg:tpl(M,F,A,[]),
+ tpl([{{M,F,A},[]}|Trace]);
+tpl([{{M,F,A},MS}|Trace]) ->
+ {ok,Match} = dbg:tpl(M,F,A,MS),
case lists:keyfind(matched,1,Match) of
{_,_,1} ->
ok;
@@ -1517,10 +2046,10 @@ tpl([{M,F,A}|Trace]) ->
tpl([]) ->
ok.
-tracer({trace,_,call,{logger_std_h,handle_cast,[Op|_]}},
+tracer({trace,_,call,{logger_h_common,handle_cast,[Op|_]}},
{Pid,[{Mod,Func,Op}|Expected]}) ->
maybe_tracer_done(Pid,Expected,{Mod,Func,Op});
-tracer({trace,_,call,{Mod=logger_std_h,Func=write_to_dev,[_,Data,_,_,_]}},
+tracer({trace,_,call,{Mod=logger_std_h,Func=write_to_dev,[Data,_]}},
{Pid,[{Mod,Func,Data}|Expected]}) ->
maybe_tracer_done(Pid,Expected,{Mod,Func,Data});
tracer({trace,_,call,{Mod,Func,_}}, {Pid,[{Mod,Func}|Expected]}) ->
@@ -1570,27 +2099,29 @@ h_proc_name() ->
h_proc_name(Name) ->
?name_to_reg_name(logger_std_h,Name).
-file_delete(Log) ->
- file:delete(Log).
-
wait_for_process_up(T) ->
- wait_for_process_up(h_proc_name(),T).
+ wait_for_process_up(?MODULE, h_proc_name(), T).
-wait_for_process_up(Name,T) ->
+wait_for_process_up(Name, RegName, T) ->
N = (T div 500) + 1,
- wait_for_process_up1(Name,N).
+ wait_for_process_up1(Name, RegName, N).
-wait_for_process_up1(_Name,0) ->
+wait_for_process_up1(_Name, _RegName, 0) ->
error;
-wait_for_process_up1(Name,N) ->
+wait_for_process_up1(Name, RegName, N) ->
timer:sleep(500),
- case whereis(Name) of
+ case whereis(RegName) of
Pid when is_pid(Pid) ->
- %% ct:pal("Process ~p up (~p tries left)",[Name,N]),
- {ok,Pid};
+ case logger:get_handler_config(Name) of
+ {ok,_} ->
+ %% ct:pal("Process ~p up (~p tries left)",[Name,N]),
+ {ok,Pid};
+ _ ->
+ wait_for_process_up1(Name, RegName, N-1)
+ end;
undefined ->
%% ct:pal("Waiting for process ~p (~p tries left)",[Name,N]),
- wait_for_process_up1(Name,N-1)
+ wait_for_process_up1(Name, RegName, N-1)
end.
filesync_rep_int() ->
@@ -1598,3 +2129,7 @@ filesync_rep_int() ->
true -> 5500;
false -> ?FILESYNC_REPEAT_INTERVAL + 500
end.
+
+
+file_delete(Log) ->
+ file:delete(Log).
diff --git a/lib/kernel/test/logger_stress_SUITE.erl b/lib/kernel/test/logger_stress_SUITE.erl
new file mode 100644
index 0000000000..1a278fb1b2
--- /dev/null
+++ b/lib/kernel/test/logger_stress_SUITE.erl
@@ -0,0 +1,550 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_stress_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct_event.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_h_common.hrl").
+
+-ifdef(SAVE_STATS).
+ -define(COLLECT_STATS(_All_,_Procs_),
+ ct:pal("~p",[stats(_All_,_Procs_)])).
+-else.
+ -define(COLLECT_STATS(_All_,_Procs__), ok).
+-endif.
+
+-define(TEST_DURATION,120). % seconds
+
+suite() ->
+ [{timetrap,{minutes,3}},
+ {ct_hooks,[logger_test_lib]}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [allow_events,
+ reject_events,
+ std_handler,
+ disk_log_handler,
+ std_handler_time,
+ std_handler_time_big,
+ disk_log_handler_time,
+ disk_log_handler_time_big,
+ emulator_events,
+ remote_events,
+ remote_to_disk_log,
+ remote_emulator_events,
+ remote_emulator_to_disk_log].
+
+%%%-----------------------------------------------------------------
+%%% Test cases
+%%%-----------------------------------------------------------------
+%% Time from log macro call to handler callback
+allow_events(Config) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(Config,
+ [{logger,
+ [{handler,default,?MODULE,#{}}]},
+ {logger_level,notice}]),
+ N = 100000,
+ {T,_} = timer:tc(fun() -> rpc:call(Node,?MODULE,nlogs,[N]) end),
+ IOPS = N * 1000/T, % log events allowed per millisecond
+ ct_event:notify(#event{name = benchmark_data,
+ data = [{value,IOPS}]}),
+ {comment,io_lib:format("~.2f accepted events pr millisecond",
+ [IOPS])}.
+
+%% Time from log macro call to reject (log level)
+reject_events(Config) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(Config,
+ [{logger,
+ [{handler,default,?MODULE,#{}}]},
+ {logger_level,error}]),
+ N = 1000000,
+ {T,_} = timer:tc(fun() -> rpc:call(Node,?MODULE,nlogs,[N]) end),
+ IOPS = N * 1000/T, % log events rejected per millisecond
+ ct_event:notify(#event{name = benchmark_data,
+ data = [{value,IOPS}]}),
+ {comment,io_lib:format("~.2f rejected events pr millisecond",
+ [IOPS])}.
+
+%% Cascading failure that produce gen_server and proc_lib reports -
+%% how many of the produced log events are actually written to a log
+%% with logger_std_h file handler.
+std_handler(Config) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(Config,
+ [{logger,
+ [{handler,default,logger_std_h,
+ #{config=>#{type=>{file,"default.log"}}}}]}]),
+
+ cascade({Node,{logger_backend,log_allowed,2},[]},
+ {Node,{logger_std_h,write,4},[{default,logger_std_h_default}]},
+ fun otp_cascading/0).
+std_handler(cleanup,_Config) ->
+ _ = file:delete("default.log"),
+ ok.
+
+%% Disable overload protection and just print a lot - measure time.
+%% The IOPS reported is the number of log events written per millisecond.
+std_handler_time(Config) ->
+ measure_handler_time(logger_std_h,#{type=>{file,"default.log"}},Config).
+std_handler_time(cleanup,_Config) ->
+ _ = file:delete("default.log"),
+ ok.
+
+std_handler_time_big(Config) ->
+ measure_handler_time_big(logger_std_h,#{type=>{file,"default.log"}},Config).
+std_handler_time_big(cleanup,_Config) ->
+ _ = file:delete("default.log"),
+ ok.
+
+%% Cascading failure that produce gen_server and proc_lib reports -
+%% how many of the produced log events are actually written to a log
+%% with logger_disk_log_h wrap file handler.
+disk_log_handler(Config) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(Config,
+ [{logger,
+ [{handler,default,logger_disk_log_h,#{}}]}]),
+ cascade({Node,{logger_backend,log_allowed,2},[]},
+ {Node,{logger_disk_log_h,write,4},
+ [{default,logger_disk_log_h_default}]},
+ fun otp_cascading/0).
+disk_log_handler(cleanup,_Config) ->
+ Files = filelib:wildcard("default.log.*"),
+ [_ = file:delete(F) || F <- Files],
+ ok.
+
+%% Disable overload protection and just print a lot - measure time.
+%% The IOPS reported is the number of log events written per millisecond.
+disk_log_handler_time(Config) ->
+ measure_handler_time(logger_disk_log_h,#{type=>halt},Config).
+disk_log_handler_time(cleanup,_Config) ->
+ _ = file:delete("default"),
+ ok.
+
+disk_log_handler_time_big(Config) ->
+ measure_handler_time_big(logger_disk_log_h,#{type=>halt},Config).
+disk_log_handler_time_big(cleanup,_Config) ->
+ _ = file:delete("default"),
+ ok.
+
+%% Cascading failure that produce log events from the emulator - how
+%% many of the produced log events pass through the proxy.
+emulator_events(Config) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(Config,
+ [{logger,
+ [{handler,default,?MODULE,#{}}]}]),
+ cascade({Node,{?MODULE,producer,0},[]},
+ {Node,{?MODULE,log,2},[{proxy,logger_proxy}]},
+ fun em_cascading/0).
+
+%% Cascading failure that produce gen_server and proc_lib reports on
+%% remote node - how many of the produced log events pass through the
+%% proxy.
+remote_events(Config) ->
+ {ok,_,Node1} =
+ logger_test_lib:setup([{postfix,1}|Config],
+ [{logger,
+ [{handler,default,?MODULE,#{}}]}]),
+ {ok,_,Node2} =
+ logger_test_lib:setup([{postfix,2}|Config],[]),
+ cascade({Node2,{logger_backend,log_allowed,2},[{remote_proxy,logger_proxy}]},
+ {Node1,{?MODULE,log,2},[{local_proxy,logger_proxy}]},
+ fun otp_cascading/0).
+
+%% Cascading failure that produce gen_server and proc_lib reports on
+%% remote node - how many of the produced log events are actually
+%% written to a log with logger_disk_log_h wrap file handler.
+remote_to_disk_log(Config) ->
+ {ok,_,Node1} =
+ logger_test_lib:setup([{postfix,1}|Config],
+ [{logger,
+ [{handler,default,logger_disk_log_h,#{}}]}]),
+ {ok,_,Node2} =
+ logger_test_lib:setup([{postfix,2}|Config],[]),
+ cascade({Node2,{logger_backend,log_allowed,2},[{remote_proxy,logger_proxy}]},
+ {Node1,{logger_disk_log_h,write,4},
+ [{local_proxy,logger_proxy},
+ {local_default,logger_disk_log_h_default}]},
+ fun otp_cascading/0).
+remote_to_disk_log(cleanup,_Config) ->
+ Files = filelib:wildcard("default.log.*"),
+ [_ = file:delete(F) || F <- Files],
+ ok.
+
+%% Cascading failure that produce log events from the emulator on
+%% remote node - how many of the produced log events pass through the
+%% proxy.
+remote_emulator_events(Config) ->
+ {ok,_,Node1} =
+ logger_test_lib:setup([{postfix,1}|Config],
+ [{logger,
+ [{handler,default,?MODULE,#{}}]}]),
+ {ok,_,Node2} =
+ logger_test_lib:setup([{postfix,2}|Config],[]),
+ cascade({Node2,{?MODULE,producer,0},[{remote_proxy,logger_proxy}]},
+ {Node1,{?MODULE,log,2},[{local_proxy,logger_proxy}]},
+ fun em_cascading/0).
+
+%% Cascading failure that produce log events from the emulator on
+%% remote node - how many of the produced log events are actually
+%% written to a log with logger_disk_log_h wrap file handler.
+remote_emulator_to_disk_log(Config) ->
+ {ok,_,Node1} =
+ logger_test_lib:setup([{postfix,1}|Config],
+ [{logger,
+ [{handler,default,logger_disk_log_h,#{}}]}]),
+ {ok,_,Node2} =
+ logger_test_lib:setup([{postfix,2}|Config],[]),
+ cascade({Node2,{?MODULE,producer,0},[{remote_proxy,logger_proxy}]},
+ {Node1,{logger_disk_log_h,write,4},
+ [{local_proxy,logger_proxy},
+ {local_default,logger_disk_log_h_default}]},
+ fun em_cascading/0).
+remote_emulator_to_disk_log(cleanup,_Config) ->
+ Files = filelib:wildcard("default.log.*"),
+ [_ = file:delete(F) || F <- Files],
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Internal functions
+measure_handler_time(Module,HCfg,Config) ->
+ measure_handler_time(Module,100000,small_fa(),millisecond,HCfg,#{},Config).
+
+measure_handler_time_big(Module,HCfg,Config) ->
+ FCfg = #{chars_limit=>4096, max_size=>1024},
+ measure_handler_time(Module,100,big_fa(),second,HCfg,FCfg,Config).
+
+measure_handler_time(Module,N,FA,Unit,HCfg,FCfg,Config) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(
+ Config,
+ [{logger,
+ [{handler,default,Module,
+ #{formatter => {logger_formatter,
+ maps:merge(#{legacy_header=>false,
+ single_line=>true},FCfg)},
+ config=>maps:merge(#{sync_mode_qlen => N+1,
+ drop_mode_qlen => N+1,
+ flush_qlen => N+1,
+ burst_limit_enable => false,
+ filesync_repeat_interval => no_repeat},
+ HCfg)}}]}]),
+ %% HPid = rpc:call(Node,erlang,whereis,[?name_to_reg_name(Module,default)]),
+ %% {links,[_,FCPid]} = rpc:call(Node,erlang,process_info,[HPid,links]),
+ T0 = erlang:monotonic_time(millisecond),
+ ok = rpc:call(Node,?MODULE,nlogs_wait,[N,FA,Module]),
+ %% ok = rpc:call(Node,fprof,apply,[fun ?MODULE:nlogs_wait/2,[N div 10,FA,Module],[{procs,[HPid,FCPid]}]]),
+ T1 = erlang:monotonic_time(millisecond),
+ T = T1-T0,
+ M = case Unit of
+ millisecond -> 1;
+ second -> 1000
+ end,
+ IOPS = M*N/T,
+ ct:pal("N: ~p~nT: ~p~nIOPS: ~.2f events pr ~w",[N,T,IOPS,Unit]),
+ %% Stats = rpc:call(Node,logger_olp,info,[?name_to_reg_name(Module,default)]),
+ %% ct:pal("Stats: ~p",[Stats]),
+ ct_event:notify(#event{name = benchmark_data,
+ data = [{value,IOPS}]}),
+ {comment,io_lib:format("~.2f events written pr ~w",[IOPS,Unit])}.
+
+nlogs_wait(N,{F,A},Module) ->
+ group_leader(whereis(user),self()),
+ [?LOG_NOTICE(F,A) || _ <- lists:seq(1,N)],
+ wait(Module).
+
+wait(Module) ->
+ case Module:filesync(default) of
+ {error,handler_busy} ->
+ wait(Module);
+ ok ->
+ ok
+ end.
+
+small_fa() ->
+ Str = "\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "[\\]^_`abcdefghijklmnopqr",
+ {"~ts",[Str]}.
+
+big_fa() ->
+ {"~p",[lists:duplicate(1048576,"a")]}.
+
+nlogs(N) ->
+ group_leader(whereis(user),self()),
+ Str = "\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "[\\]^_`abcdefghijklmnopqr",
+ [?LOG_NOTICE(Str) || _ <- lists:seq(1,N)],
+ ok.
+
+%% cascade(ProducerInfo,ConsumerInfo,TestFun)
+cascade({PNode,PMFA,_PStatProcs},{CNode,CMFA,_CStatProcs},TestFun) ->
+ Tab = ets:new(counter,[set,public]),
+ ets:insert(Tab,{producer,0}),
+ ets:insert(Tab,{consumer,0}),
+ dbg:tracer(process,{fun tracer/2,{Tab,PNode,CNode}}),
+ dbg:n(PNode),
+ dbg:n(CNode),
+ dbg:cn(node()),
+ dbg:p(all,[call,arity]),
+ dbg:tpl(PMFA,[]),
+ dbg:tpl(CMFA,[]),
+
+ Pid = rpc:call(CNode,?MODULE,wrap_test,[PNode,TestFun]),
+ MRef = erlang:monitor(process,Pid),
+ TO = ?TEST_DURATION*1000,
+ receive {'DOWN',MRef,_,_,Reason} ->
+ ct:fail({remote_pid_down,Reason})
+ after TO ->
+ All = ets:lookup_element(Tab,producer,2),
+ Written = ets:lookup_element(Tab,consumer,2),
+ dbg:stop_clear(),
+ ?COLLECT_STATS(All,
+ [{PNode,P,Id} || {Id,P} <- _PStatProcs] ++
+ [{CNode,P,Id} || {Id,P} <- _CStatProcs]),
+ Ratio = Written/All * 100,
+ ct_event:notify(#event{name = benchmark_data,
+ data = [{value,Ratio}]}),
+ {comment,io_lib:format("~p % (~p written, ~p produced)",
+ [round(Ratio),Written,All])}
+ end.
+
+wrap_test(Fun) ->
+ wrap_test(node(),Fun).
+wrap_test(Node,Fun) ->
+ reset(),
+ group_leader(whereis(user),self()),
+ rpc:call(Node,?MODULE,do_fun,[Fun]).
+
+do_fun(Fun) ->
+ reset(),
+ Fun().
+
+reset() ->
+ reset([logger_std_h_default, logger_disk_log_h_default, logger_proxy]).
+reset([P|Ps]) ->
+ is_pid(whereis(P)) andalso logger_olp:reset(P),
+ reset(Ps);
+reset([]) ->
+ ok.
+
+
+tracer({trace,_,call,{?MODULE,producer,_}},{Tab,_PNode,_CNode}=S) ->
+ ets:update_counter(Tab,producer,1),
+ S;
+tracer({trace,Pid,call,{logger_backend,log_allowed,_}},{Tab,PNode,_CNode}=S) when node(Pid)=:=PNode ->
+ ets:update_counter(Tab,producer,1),
+ S;
+tracer({trace,_,call,{?MODULE,log,_}},{Tab,_PNode,_CNode}=S) ->
+ ets:update_counter(Tab,consumer,1),
+ S;
+tracer({trace,_,call,{_,write,_}},{Tab,_PNode,_CNode}=S) ->
+ ets:update_counter(Tab,consumer,1),
+ S;
+tracer(_,S) ->
+ S.
+
+
+%%%-----------------------------------------------------------------
+%%% Collect statistics
+-define(STAT_KEYS,
+ [burst_drops,
+ calls,
+ casts,
+ drops,
+ flushed,
+ flushes,
+ freq,
+ last_qlen,
+ max_qlen,
+ time,
+ writes]).
+-define(EVENT_KEYS,
+ [calls,casts,flushed]).
+
+stats(All,Procs) ->
+ NI = [{Id,rpc:call(N,logger_olp,info,[P])} || {N,P,Id}<-Procs],
+ [{all,All}|[stats(Id,I,All) || {Id,I} <- NI]].
+
+stats(Id,Info,All) ->
+ S = maps:with(?STAT_KEYS,Info),
+ AllOnProc = lists:sum(maps:values(maps:with(?EVENT_KEYS,S))),
+ if All>0 ->
+ Writes = maps:get(writes,S),
+ {_,ActiveTime} = maps:get(time,S),
+ Rate = round(100*Writes/All),
+ RateOnProc =
+ if AllOnProc>0 ->
+ round(100*Writes/AllOnProc);
+ true ->
+ 0
+ end,
+ AvFreq =
+ if ActiveTime>0 ->
+ round(Writes/ActiveTime);
+ true ->
+ 0
+ end,
+ {Id,
+ {stats,S},
+ {rate,Rate},
+ {rate_on_proc,RateOnProc},
+ {av_freq,AvFreq}};
+ true ->
+ {Id,none}
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Spawn a lot of processes that crash repeatedly, causing a lot of
+%%% error reports from the emulator.
+em_cascading() ->
+ spawn(fun() -> super() end).
+
+super() ->
+ process_flag(trap_exit,true),
+ spawn_link(fun server/0),
+ [spawn_link(fun client/0) || _<-lists:seq(1,10000)],
+ super_loop().
+
+super_loop() ->
+ receive
+ {'EXIT',_,server} ->
+ spawn_link(fun server/0),
+ super_loop();
+ {'EXIT',_,_} ->
+ _L = lists:sum(lists:seq(1,10000)),
+ spawn_link(fun client/0),
+ super_loop()
+ end.
+
+client() ->
+ receive
+ after 1 ->
+ case whereis(server) of
+ Pid when is_pid(Pid) ->
+ ok;
+ undefined ->
+ producer(),
+ erlang:error(some_exception)
+ end
+ end,
+ client().
+
+server() ->
+ register(server,self()),
+ receive
+ after 3000 ->
+ exit(server)
+ end.
+
+
+%%%-----------------------------------------------------------------
+%%% Create a supervisor tree with processes that crash repeatedly,
+%%% causing a lot of supervisor reports and crashreports
+otp_cascading() ->
+ {ok,Pid} = supervisor:start_link({local,otp_super}, ?MODULE, [otp_super]),
+ unlink(Pid),
+ Pid.
+
+otp_server_sup() ->
+ supervisor:start_link({local,otp_server_sup},?MODULE,[otp_server_sup]).
+
+otp_client_sup(N) ->
+ supervisor:start_link({local,otp_client_sup},?MODULE,[otp_client_sup,N]).
+
+otp_server() ->
+ gen_server:start_link({local,otp_server},?MODULE,[otp_server],[]).
+
+otp_client() ->
+ gen_server:start_link(?MODULE,[otp_client],[]).
+
+init([otp_super]) ->
+ {ok, {{one_for_one, 200, 10},
+ [{client_sup,
+ {?MODULE, otp_client_sup, [10000]},
+ permanent, 1000, supervisor, [?MODULE]},
+ {server_sup,
+ {?MODULE, otp_server_sup, []},
+ permanent, 1000, supervisor, [?MODULE]}
+ ]}};
+init([otp_server_sup]) ->
+ {ok, {{one_for_one, 2, 10},
+ [{server,
+ {?MODULE, otp_server, []},
+ permanent, 1000, worker, [?MODULE]}
+ ]}};
+init([otp_client_sup,N]) ->
+ spawn(fun() ->
+ [supervisor:start_child(otp_client_sup,[])
+ || _ <- lists:seq(1,N)]
+ end),
+ {ok, {{simple_one_for_one, N*10, 1},
+ [{client,
+ {?MODULE, otp_client, []},
+ permanent, 1000, worker, [?MODULE]}
+ ]}};
+init([otp_server]) ->
+ {ok, server, 10000};
+init([otp_client]) ->
+ {ok, client,1}.
+
+handle_info(timeout, client) ->
+ true = is_pid(whereis(otp_server)),
+ {noreply,client,1};
+handle_info(timeout, server) ->
+ exit(self(), some_error).
+
+%%%-----------------------------------------------------------------
+%%% Logger callbacks
+log(_LogEvent,_Config) ->
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Function to trace on for counting produced emulator messages
+producer() ->
+ ok.
diff --git a/lib/kernel/test/logger_test_lib.erl b/lib/kernel/test/logger_test_lib.erl
index 81eb9ce5eb..be4bc427fb 100644
--- a/lib/kernel/test/logger_test_lib.erl
+++ b/lib/kernel/test/logger_test_lib.erl
@@ -28,11 +28,17 @@
post_end_per_testcase/5, post_end_per_suite/3]).
setup(Config,Vars) ->
+ Postfix = case proplists:get_value(postfix, Config) of
+ undefined -> "";
+ P -> ["_",P]
+ end,
FuncStr = lists:concat([proplists:get_value(suite, Config), "_",
- proplists:get_value(tc, Config)]),
+ proplists:get_value(tc, Config)|
+ Postfix]),
ConfigFileName = filename:join(proplists:get_value(priv_dir, Config), FuncStr),
file:write_file(ConfigFileName ++ ".config", io_lib:format("[{kernel, ~p}].",[Vars])),
- case test_server:start_node(proplists:get_value(tc, Config), slave,
+ Sname = lists:concat([proplists:get_value(tc,Config)|Postfix]),
+ case test_server:start_node(Sname, slave,
[{args, ["-pa ",filename:dirname(code:which(?MODULE)),
" -boot start_sasl -kernel start_timer true "
"-config ",ConfigFileName]}]) of
diff --git a/lib/kernel/test/prim_file_SUITE.erl b/lib/kernel/test/prim_file_SUITE.erl
index a02b5f87d1..2f465a15bc 100644
--- a/lib/kernel/test/prim_file_SUITE.erl
+++ b/lib/kernel/test/prim_file_SUITE.erl
@@ -1300,7 +1300,8 @@ e_delete(Config) when is_list(Config) ->
case os:type() of
{win32, _} ->
%% Remove a character device.
- {error, eacces} = ?PRIM_FILE:delete("nul");
+ expect({error, eacces}, {error, einval},
+ ?PRIM_FILE:delete("nul"));
_ ->
?PRIM_FILE:write_file_info(
Base, #file_info {mode=0}),
diff --git a/lib/kernel/test/sendfile_SUITE.erl b/lib/kernel/test/sendfile_SUITE.erl
index 0c0b1cbcb6..ad060aa05c 100644
--- a/lib/kernel/test/sendfile_SUITE.erl
+++ b/lib/kernel/test/sendfile_SUITE.erl
@@ -341,7 +341,21 @@ t_sendfile_closeduring(Config) ->
-1
end,
- ok = sendfile_send({127,0,0,1}, Send, 0).
+ ok = sendfile_send({127,0,0,1}, Send, 0, [{active,false}]),
+ [] = flush(),
+ ok = sendfile_send({127,0,0,1}, Send, 0, [{active,true}]),
+ [] = flush(),
+ ok.
+
+flush() ->
+ lists:reverse(flush([])).
+
+flush(Acc) ->
+ receive M ->
+ flush([M | Acc])
+ after 0 ->
+ Acc
+ end.
t_sendfile_crashduring(Config) ->
Filename = proplists:get_value(big_file, Config),
@@ -409,12 +423,16 @@ sendfile_send(Send) ->
sendfile_send(Host, Send) ->
sendfile_send(Host, Send, []).
sendfile_send(Host, Send, Orig) ->
+ sendfile_send(Host, Send, Orig, [{active,false}]).
+
+sendfile_send(Host, Send, Orig, SockOpts) ->
+
SFServer = spawn_link(?MODULE, sendfile_server, [self(), Orig]),
receive
{server, Port} ->
- {ok, Sock} = gen_tcp:connect(Host, Port,
- [binary,{packet,0},
- {active,false}]),
+ Opts = [binary,{packet,0}|SockOpts],
+ io:format("connect with opts = ~p\n", [Opts]),
+ {ok, Sock} = gen_tcp:connect(Host, Port, Opts),
Data = case proplists:get_value(arity,erlang:fun_info(Send)) of
1 ->
Send(Sock);
diff --git a/lib/kernel/test/seq_trace_SUITE.erl b/lib/kernel/test/seq_trace_SUITE.erl
index ceb4e9cc49..663f910751 100644
--- a/lib/kernel/test/seq_trace_SUITE.erl
+++ b/lib/kernel/test/seq_trace_SUITE.erl
@@ -19,13 +19,17 @@
%%
-module(seq_trace_SUITE).
+%% label_capability_mismatch needs to run a part of the test on an OTP 20 node.
+-compile(r20).
+
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
init_per_testcase/2,end_per_testcase/2]).
-export([token_set_get/1, tracer_set_get/1, print/1,
send/1, distributed_send/1, recv/1, distributed_recv/1,
trace_exit/1, distributed_exit/1, call/1, port/1,
- match_set_seq_token/1, gc_seq_token/1, label_capability_mismatch/1]).
+ match_set_seq_token/1, gc_seq_token/1, label_capability_mismatch/1,
+ send_literal/1]).
%% internal exports
-export([simple_tracer/2, one_time_receiver/0, one_time_receiver/1,
@@ -44,7 +48,7 @@ suite() ->
{timetrap,{minutes,1}}].
all() ->
- [token_set_get, tracer_set_get, print, send,
+ [token_set_get, tracer_set_get, print, send, send_literal,
distributed_send, recv, distributed_recv, trace_exit,
distributed_exit, call, port, match_set_seq_token,
gc_seq_token, label_capability_mismatch].
@@ -158,23 +162,51 @@ do_print(TsType) ->
{0,{print,_,_,[],print3}, Ts1}] = stop_tracer(2),
check_ts(TsType, Ts0),
check_ts(TsType, Ts1).
-
+
send(Config) when is_list(Config) ->
lists:foreach(fun do_send/1, ?TIMESTAMP_MODES).
do_send(TsType) ->
+ do_send(TsType, send).
+
+do_send(TsType, Msg) ->
seq_trace:reset_trace(),
start_tracer(),
Receiver = spawn(?MODULE,one_time_receiver,[]),
Label = make_ref(),
seq_trace:set_token(label,Label),
set_token_flags([send, TsType]),
- Receiver ! send,
+ Receiver ! Msg,
Self = self(),
seq_trace:reset_trace(),
- [{Label,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1),
+ [{Label,{send,_,Self,Receiver,Msg}, Ts}] = stop_tracer(1),
check_ts(TsType, Ts).
+%% This testcase tests that we do not segfault when we have a
+%% literal as the message and the message is copied onto the
+%% heap during a GC.
+send_literal(Config) when is_list(Config) ->
+ lists:foreach(fun do_send_literal/1,
+ [atom, make_ref(), ets:new(hej,[]), 1 bsl 64,
+ "gurka", {tuple,test,with,#{}}, #{}]).
+
+do_send_literal(Msg) ->
+ N = 10000,
+ seq_trace:reset_trace(),
+ start_tracer(),
+ Label = make_ref(),
+ seq_trace:set_token(label,Label),
+ set_token_flags([send, 'receive', no_timestamp]),
+ Receiver = spawn_link(fun() -> receive ok -> ok end end),
+ [Receiver ! Msg || _ <- lists:seq(1, N)],
+ erlang:garbage_collect(Receiver),
+ [Receiver ! Msg || _ <- lists:seq(1, N)],
+ erlang:garbage_collect(Receiver),
+ Self = self(),
+ seq_trace:reset_trace(),
+ [{Label,{send,_,Self,Receiver,Msg}, Ts} | _] = stop_tracer(N),
+ check_ts(no_timestamp, Ts).
+
distributed_send(Config) when is_list(Config) ->
lists:foreach(fun do_distributed_send/1, ?TIMESTAMP_MODES).
@@ -329,7 +361,7 @@ do_incompatible_labels(Rel) ->
Mdir = filename:dirname(Dir),
true = rpc:call(Node,code,add_patha,[Mdir]),
seq_trace:reset_trace(),
- rpc:call(Node,?MODULE,start_tracer,[]),
+ true = is_pid(rpc:call(Node,?MODULE,start_tracer,[])),
Receiver = spawn(Node,?MODULE,one_time_receiver,[]),
%% This node does not support arbitrary labels, so it must fail with a
@@ -356,7 +388,7 @@ do_compatible_labels(Rel) ->
Mdir = filename:dirname(Dir),
true = rpc:call(Node,code,add_patha,[Mdir]),
seq_trace:reset_trace(),
- rpc:call(Node,?MODULE,start_tracer,[]),
+ true = is_pid(rpc:call(Node,?MODULE,start_tracer,[])),
Receiver = spawn(Node,?MODULE,one_time_receiver,[]),
%% This node does not support arbitrary labels, but small integers should
@@ -783,6 +815,24 @@ do_shrink(N) ->
erlang:garbage_collect(),
do_shrink(N-1).
+%% Test that messages from a port does not clear the token
+port_clean_token(Config) when is_list(Config) ->
+ seq_trace:reset_trace(),
+ Label = make_ref(),
+ seq_trace:set_token(label, Label),
+ {label,Label} = seq_trace:get_token(label),
+
+ %% Create a port and get messages from it
+ %% We use os:cmd as a convenience as it does
+ %% open_port, port_command, port_close and receives replies.
+ %% Maybe it is not ideal to rely on the internal implementation
+ %% of os:cmd but it will have to do.
+ os:cmd("ls"),
+
+ %% Make sure that the seq_trace token is still there
+ {label,Label} = seq_trace:get_token(label),
+
+ ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%