diff options
Diffstat (limited to 'lib/kernel/test')
44 files changed, 9109 insertions, 1166 deletions
diff --git a/lib/kernel/test/Makefile b/lib/kernel/test/Makefile index efe3a68531..4a86265a4a 100644 --- a/lib/kernel/test/Makefile +++ b/lib/kernel/test/Makefile @@ -1,7 +1,7 @@ # # %CopyrightBegin% # -# Copyright Ericsson AB 1997-2016. All Rights Reserved. +# Copyright Ericsson AB 1997-2018. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -70,6 +70,15 @@ MODULES= \ interactive_shell_SUITE \ init_SUITE \ kernel_config_SUITE \ + logger_SUITE \ + logger_disk_log_h_SUITE \ + logger_env_var_SUITE \ + logger_filters_SUITE \ + logger_formatter_SUITE \ + logger_legacy_SUITE \ + logger_simple_h_SUITE \ + logger_std_h_SUITE \ + logger_test_lib \ os_SUITE \ pg2_SUITE \ seq_trace_SUITE \ @@ -80,7 +89,8 @@ MODULES= \ loose_node \ sendfile_SUITE \ standard_error_SUITE \ - multi_load_SUITE + multi_load_SUITE \ + zzz_SUITE APP_FILES = \ appinc.app \ @@ -101,7 +111,7 @@ TARGET_FILES= $(MODULES:%=$(EBIN)/%.$(EMULATOR)) INSTALL_PROGS= $(TARGET_FILES) EMAKEFILE=Emakefile -COVERFILE=kernel.cover +COVERFILE=kernel.cover logger.cover # ---------------------------------------------------- # Release directory specification @@ -148,7 +158,8 @@ release_tests_spec: make_emakefile $(INSTALL_DIR) "$(RELSYSDIR)" $(INSTALL_DATA) $(ERL_FILES) "$(RELSYSDIR)" $(INSTALL_DATA) $(APP_FILES) "$(RELSYSDIR)" - $(INSTALL_DATA) kernel.spec kernel_smoke.spec kernel_bench.spec \ + $(INSTALL_DATA) \ + kernel.spec kernel_smoke.spec kernel_bench.spec logger.spec \ $(EMAKEFILE) $(COVERFILE) "$(RELSYSDIR)" chmod -R u+w "$(RELSYSDIR)" @tar cf - *_SUITE_data | (cd "$(RELSYSDIR)"; tar xf -) diff --git a/lib/kernel/test/application_SUITE.erl b/lib/kernel/test/application_SUITE.erl index 866043cfb4..5c35b82207 100644 --- a/lib/kernel/test/application_SUITE.erl +++ b/lib/kernel/test/application_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2017. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -37,7 +37,8 @@ -export([config_change/1, persistent_env/1, distr_changed_tc1/1, distr_changed_tc2/1, ensure_started/1, ensure_all_started/1, - shutdown_func/1, do_shutdown/1, shutdown_timeout/1, shutdown_deadlock/1]). + shutdown_func/1, do_shutdown/1, shutdown_timeout/1, shutdown_deadlock/1, + config_relative_paths/1]). -define(TESTCASE, testcase_name). -define(testcase, proplists:get_value(?TESTCASE, Config)). @@ -55,7 +56,7 @@ all() -> script_start, nodedown_start, permit_false_start_local, permit_false_start_dist, get_key, get_env, ensure_all_started, {group, distr_changed}, config_change, shutdown_func, shutdown_timeout, - shutdown_deadlock, + shutdown_deadlock, config_relative_paths, persistent_env]. groups() -> @@ -1568,7 +1569,8 @@ loop5606(Pid) -> %% Tests get_env/* functions. get_env(Conf) when is_list(Conf) -> - {ok, _} = application:get_env(kernel, error_logger), + ok = application:set_env(kernel, new_var, new_val), + {ok, new_val} = application:get_env(kernel, new_var), undefined = application:get_env(undefined_app, a), undefined = application:get_env(kernel, error_logger_xyz), default = application:get_env(kernel, error_logger_xyz, default), @@ -1602,8 +1604,7 @@ get_key(Conf) when is_list(Conf) -> {ok, [{init, [kalle]}, {takeover, []}, {go, [sune]}]} = rpc:call(Cp1, application, get_key, [appinc, start_phases]), {ok, Env} = rpc:call(Cp1, application, get_key, [appinc ,env]), - [{included_applications,[appinc1,appinc2]}, - {own2,val2},{own_env1,value1}] = lists:sort(Env), + [{own2,val2},{own_env1,value1}] = lists:sort(Env), {ok, []} = rpc:call(Cp1, application, get_key, [appinc, modules]), {ok, {application_starter, [ch_sup, {appinc, 41, 43}] }} = rpc:call(Cp1, application, get_key, [appinc, mod]), @@ -1624,8 +1625,7 @@ get_key(Conf) when is_list(Conf) -> {mod, {application_starter, [ch_sup, {appinc, 41, 43}] }}, {start_phases, [{init, [kalle]}, {takeover, []}, {go, [sune]}]}]} = rpc:call(Cp1, application, get_all_key, [appinc]), - [{included_applications,[appinc1,appinc2]}, - {own2,val2},{own_env1,value1}] = lists:sort(Env), + [{own2,val2},{own_env1,value1}] = lists:sort(Env), {ok, "Test of new app file, including appnew"} = gen_server:call({global, {ch,41}}, {get_pid_key, description}), @@ -1642,8 +1642,7 @@ get_key(Conf) when is_list(Conf) -> {ok, [{init, [kalle]}, {takeover, []}, {go, [sune]}]} = gen_server:call({global, {ch,41}}, {get_pid_key, start_phases}), {ok, Env} = gen_server:call({global, {ch,41}}, {get_pid_key, env}), - [{included_applications,[appinc1,appinc2]}, - {own2,val2},{own_env1,value1}] = lists:sort(Env), + [{own2,val2},{own_env1,value1}] = lists:sort(Env), {ok, []} = gen_server:call({global, {ch,41}}, {get_pid_key, modules}), {ok, {application_starter, [ch_sup, {appinc, 41, 43}] }} = @@ -1670,8 +1669,7 @@ get_key(Conf) when is_list(Conf) -> {mod, {application_starter, [ch_sup, {appinc, 41, 43}] }}, {start_phases, [{init, [kalle]}, {takeover, []}, {go, [sune]}]}]} = gen_server:call({global, {ch,41}}, get_pid_all_key), - [{included_applications,[appinc1,appinc2]}, - {own2,val2},{own_env1,value1}] = lists:sort(Env), + [{own2,val2},{own_env1,value1}] = lists:sort(Env), stop_node_nice(Cp1), ok. @@ -2078,6 +2076,42 @@ shutdown_deadlock(Config) when is_list(Config) -> %%----------------------------------------------------------------- +%% Relative paths in sys.config +%%----------------------------------------------------------------- +config_relative_paths(Config) -> + Dir = ?config(priv_dir,Config), + SubDir = filename:join(Dir,"subdir"), + Sys = filename:join(SubDir,"sys.config"), + ok = filelib:ensure_dir(Sys), + ok = file:write_file(Sys,"[\"../up.config\",\"current\"].\n"), + + Up = filename:join(Dir,"up.config"), + ok = file:write_file(Up,"[{app1,[{key1,value}]}].\n"), + + {ok,Cwd} = file:get_cwd(), + Current1 = filename:join(Cwd,"current.config"), + ok = file:write_file(Current1,"[{app1,[{key2,value1}]}].\n"), + + N1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])), + {ok,Node1} = start_node(N1,filename:rootname(Sys)), + ok = rpc:call(Node1, application, load, [app1()]), + {ok, value} = rpc:call(Node1, application, get_env,[app1,key1]), + {ok, value1} = rpc:call(Node1, application, get_env,[app1,key2]), + + Current2 = filename:join(SubDir,"current.config"), + ok = file:write_file(Current2,"[{app1,[{key2,value2}]}].\n"), + + N2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_2"])), + {ok, Node2} = start_node(N2,filename:rootname(Sys)), + ok = rpc:call(Node2, application, load, [app1()]), + {ok, value} = rpc:call(Node2, application, get_env,[app1,key1]), + {ok, value2} = rpc:call(Node2, application, get_env,[app1,key2]), + + stop_node_nice([Node1,Node2]), + + ok. + +%%----------------------------------------------------------------- %% Utility functions %%----------------------------------------------------------------- app0() -> diff --git a/lib/kernel/test/code_SUITE.erl b/lib/kernel/test/code_SUITE.erl index 569753155f..1314316c13 100644 --- a/lib/kernel/test/code_SUITE.erl +++ b/lib/kernel/test/code_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2017. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -931,37 +931,34 @@ purge_stacktrace(Config) when is_list(Config) -> code:purge(code_b_test), try code_b_test:call(fun(b) -> ok end, a) catch - error:function_clause -> + error:function_clause:Stacktrace -> code:load_file(code_b_test), - case erlang:get_stacktrace() of + case Stacktrace of [{?MODULE,_,[a],_}, {code_b_test,call,2,_}, {?MODULE,purge_stacktrace,1,_}|_] -> - false = code:purge(code_b_test), - [] = erlang:get_stacktrace() + false = code:purge(code_b_test) end end, try code_b_test:call(nofun, 2) catch - error:function_clause -> + error:function_clause:Stacktrace2 -> code:load_file(code_b_test), - case erlang:get_stacktrace() of + case Stacktrace2 of [{code_b_test,call,[nofun,2],_}, {?MODULE,purge_stacktrace,1,_}|_] -> - false = code:purge(code_b_test), - [] = erlang:get_stacktrace() + false = code:purge(code_b_test) end end, Args = [erlang,error,[badarg]], try code_b_test:call(erlang, error, [badarg,Args]) catch - error:badarg -> + error:badarg:Stacktrace3 -> code:load_file(code_b_test), - case erlang:get_stacktrace() of + case Stacktrace3 of [{code_b_test,call,Args,_}, {?MODULE,purge_stacktrace,1,_}|_] -> - false = code:purge(code_b_test), - [] = erlang:get_stacktrace() + false = code:purge(code_b_test) end end, ok. diff --git a/lib/kernel/test/disk_log_SUITE.erl b/lib/kernel/test/disk_log_SUITE.erl index 12e2521939..9704c3b28c 100644 --- a/lib/kernel/test/disk_log_SUITE.erl +++ b/lib/kernel/test/disk_log_SUITE.erl @@ -89,8 +89,6 @@ dist_terminate/1, dist_accessible/1, dist_deadlock/1, dist_open2/1, other_groups/1, - evil/1, - otp_6278/1, otp_10131/1]). -export([head_fun/1, hf/0, lserv/1, @@ -123,7 +121,7 @@ [halt_int, wrap_int, halt_ext, wrap_ext, read_mode, head, notif, new_idx_vsn, reopen, block, unblock, open, close, error, chunk, truncate, many_users, info, change_size, - change_attribute, distribution, evil, otp_6278, otp_10131]). + change_attribute, distribution, otp_6278, otp_10131]). %% These test cases should be skipped if the VxWorks card is %% configured without NFS cache. @@ -149,7 +147,7 @@ all() -> {group, open}, {group, close}, {group, error}, chunk, truncate, many_users, {group, info}, {group, change_size}, change_attribute, - {group, distribution}, evil, otp_6278, otp_10131]. + {group, distribution}, otp_6278, otp_10131]. groups() -> [{halt_int, [], [halt_int_inf, {group, halt_int_sz}]}, @@ -1752,7 +1750,7 @@ block_queue(Conf) when is_list(Conf) -> true = [{1,a},{2,b},{3,c},{4,d},{5,e},{6,f},{7,g},{8,h}] == Terms, del(File, 2), Q = qlen(), - true = (P0 == pps()), + check_pps(P0), ok. %% OTP-4880. Blocked processes did not get disk_log_stopped message. @@ -1784,7 +1782,7 @@ block_queue2(Conf) when is_list(Conf) -> {ok,<<>>} = file:read_file(File ++ ".1"), del(File, No), Q = qlen(), - true = (P0 == pps()), + check_pps(P0), ok. @@ -2121,7 +2119,7 @@ close_block(Conf) when is_list(Conf) -> 0 = sync_do(Pid2, users), sync_do(Pid2, terminate), {error, no_such_log} = disk_log:info(n), - true = (P0 == pps()), + check_pps(P0), %% Users terminate (no link...). Pid3 = spawn_link(?MODULE, lserv, [n]), @@ -2139,7 +2137,7 @@ close_block(Conf) when is_list(Conf) -> disk_log:close(n), disk_log:close(n), {error, no_such_log} = disk_log:info(n), - true = (P0 == pps()), + check_pps(P0), %% Blocking owner terminates. Pid5 = spawn_link(?MODULE, lserv, [n]), @@ -2156,7 +2154,7 @@ close_block(Conf) when is_list(Conf) -> 1 = users(n), ok = disk_log:close(n), {error, no_such_log} = disk_log:info(n), - true = (P0 == pps()), + check_pps(P0), %% Blocking user terminates. Pid6 = spawn_link(?MODULE, lserv, [n]), @@ -2176,7 +2174,7 @@ close_block(Conf) when is_list(Conf) -> 1 = users(n), ok = disk_log:close(n), {error, no_such_log} = disk_log:info(n), - true = (P0 == pps()), + check_pps(P0), %% Blocking owner terminates. Pid7 = spawn_link(?MODULE, lserv, [n]), @@ -2194,7 +2192,7 @@ close_block(Conf) when is_list(Conf) -> 1 = users(n), ok = disk_log:close(n), {error, no_such_log} = disk_log:info(n), - true = (P0 == pps()), + check_pps(P0), %% Two owners, the blocking one terminates. Pid8 = spawn_link(?MODULE, lserv, [n]), @@ -2209,7 +2207,7 @@ close_block(Conf) when is_list(Conf) -> 0 = sync_do(Pid9, users), sync_do(Pid9, terminate), {error, no_such_log} = disk_log:info(n), - true = (P0 == pps()), + check_pps(P0), %% Blocking user closes. Pid10 = spawn_link(?MODULE, lserv, [n]), @@ -2227,7 +2225,7 @@ close_block(Conf) when is_list(Conf) -> ok = disk_log:close(n), sync_do(Pid10, terminate), {error, no_such_log} = disk_log:info(n), - true = (P0 == pps()), + check_pps(P0), %% Blocking user unblocks and closes. Pid11 = spawn_link(?MODULE, lserv, [n]), @@ -2246,7 +2244,7 @@ close_block(Conf) when is_list(Conf) -> ok = disk_log:close(n), {error, no_such_log} = disk_log:info(n), sync_do(Pid11, terminate), - true = (P0 == pps()), + check_pps(P0), %% Blocking owner closes. Pid12 = spawn_link(?MODULE, lserv, [n]), @@ -2265,7 +2263,7 @@ close_block(Conf) when is_list(Conf) -> ok = disk_log:close(n), {error, no_such_log} = disk_log:info(n), sync_do(Pid12, terminate), - true = (P0 == pps()), + check_pps(P0), %% Blocking owner unblocks and closes. Pid13 = spawn_link(?MODULE, lserv, [n]), @@ -2285,7 +2283,7 @@ close_block(Conf) when is_list(Conf) -> ok = disk_log:close(n), {error, no_such_log} = disk_log:info(n), sync_do(Pid13, terminate), - true = (P0 == pps()), + check_pps(P0), del(File, No), % cleanup ok. @@ -2489,7 +2487,7 @@ error_repair(Conf) when is_list(Conf) -> P0 = pps(), {error, {file_error, _, _}} = disk_log:open([{name, n}, {file, File}, {type, wrap}, {size,{40,4}}]), - true = (P0 == pps()), + check_pps(P0), del(File, No), ok = file:del_dir(Dir), @@ -2508,7 +2506,7 @@ error_repair(Conf) when is_list(Conf) -> disk_log:open([{name, n}, {file, File}, {type, wrap}, {format, internal}, {size, {40,No}}]), ok = disk_log:close(n), - true = (P1 == pps()), + check_pps(P1), del(File, No), receive {info_msg, _, "disk_log: repairing" ++ _, _} -> ok after 1000 -> ct:fail(failed) end, @@ -2526,7 +2524,7 @@ error_repair(Conf) when is_list(Conf) -> disk_log:open([{name, n}, {file, File}, {type, wrap}, {format, internal}, {size, {4000,No}}]), ok = disk_log:close(n), - true = (P2 == pps()), + check_pps(P2), del(File, No), receive {info_msg, _, "disk_log: repairing" ++ _, _} -> ok after 1000 -> ct:fail(failed) end, @@ -2635,7 +2633,7 @@ error_log(Conf) when is_list(Conf) -> {ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap}, {format, external},{size, {100, No}}]), {error, {file_error, _, _}} = disk_log:truncate(n), - true = (P0 == pps()), + check_pps(P0), del(File, No), %% OTP-4880. @@ -2643,7 +2641,7 @@ error_log(Conf) when is_list(Conf) -> {ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt}, {format, external},{size, 100000}]), {error, {file_error, _, eisdir}} = disk_log:reopen(n, LDir), - true = (P0 == pps()), + check_pps(P0), file:delete(File), B = mk_bytes(60), @@ -3005,7 +3003,7 @@ error_index(Conf) when is_list(Conf) -> {error, {invalid_index_file, _}} = disk_log:open(Args), del(File, No), - true = (P0 == pps()), + check_pps(P0), true = (Q == qlen()), ok. @@ -4438,7 +4436,7 @@ dist_open2(Conf) when is_list(Conf) -> timer:sleep(500), file:delete(File), - true = (P0 == pps()), + check_pps(P0), %% This time the first process has a naughty head_func. This test %% does not add very much. Perhaps it should be removed. However, @@ -4484,7 +4482,7 @@ dist_open2(Conf) when is_list(Conf) -> timer:sleep(100), {error, no_such_log} = disk_log:close(Log), file:delete(File), - true = (P0 == pps()), + check_pps(P0), No = 2, Log2 = n2, @@ -4513,7 +4511,7 @@ dist_open2(Conf) when is_list(Conf) -> file:delete(File2), del(File, No), - true = (P0 == pps()), + check_pps(P0), R. @@ -4558,7 +4556,7 @@ dist_open2_1(Conf, Delay) -> {error, no_such_log} = disk_log:info(Log), file:delete(File), - true = (P0 == pps()), + check_pps(P0), ok. @@ -4615,7 +4613,7 @@ dist_open2_2(Conf, Delay) -> {[{Node1,{repaired,_,_,_}}],[]}} -> ok end, - true = (P0 == pps()), + check_pps(P0), stop_node(Node1), file:delete(File), ok. @@ -4676,119 +4674,6 @@ other_groups(Conf) when is_list(Conf) -> ok. --define(MAX, ?MAX_FWRITE_CACHE). % as in disk_log_1.erl -%% Evil cases such as closed file descriptor port. -evil(Conf) when is_list(Conf) -> - Dir = ?privdir(Conf), - File = filename:join(Dir, "n.LOG"), - Log = n, - - %% Not a very thorough test. - - ok = setup_evil_filled_cache_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:log(Log, apa), - ok = disk_log:close(Log), - - ok = setup_evil_filled_cache_halt(Log, Dir), - {error, {file_error,_,einval}} = disk_log:truncate(Log, apa), - ok = stop_evil(Log), - - %% White box test. - file:delete(File), - Ports0 = erlang:ports(), - {ok, Log} = disk_log:open([{name,Log},{file,File},{type,halt}, - {size,?MAX+50},{format,external}]), - [Fd] = erlang:ports() -- Ports0, - {B,_} = x_mk_bytes(30), - ok = disk_log:blog(Log, <<0:(?MAX-1)/unit:8>>), - exit(Fd, kill), - {error, {file_error,_,einval}} = disk_log:blog_terms(Log, [B,B]), - ok= disk_log:close(Log), - file:delete(File), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:close(Log), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:log(Log, apa), - ok = stop_evil(Log), - - ok = setup_evil_halt(Log, Dir), - {error, {file_error,_,einval}} = disk_log:log(Log, apa), - ok = stop_evil(Log), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:reopen(Log, apa), - {error, {file_error,_,einval}} = disk_log:reopen(Log, apa), - ok = stop_evil(Log), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:reopen(Log, apa), - ok = stop_evil(Log), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:inc_wrap_file(Log), - ok = stop_evil(Log), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:chunk(Log, start), - ok = stop_evil(Log), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:truncate(Log), - ok = stop_evil(Log), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:chunk_step(Log, start, 1), - ok = stop_evil(Log), - - io:format("messages: ~p~n", [erlang:process_info(self(), messages)]), - del(File, 2), - file:delete(File), - ok. - -setup_evil_wrap(Log, Dir) -> - setup_evil(Log, [{type,wrap},{size,{100,2}}], Dir). - -setup_evil_halt(Log, Dir) -> - setup_evil(Log, [{type,halt},{size,10000}], Dir). - -setup_evil(Log, Args, Dir) -> - File = filename:join(Dir, lists:concat([Log, ".LOG"])), - file:delete(File), - del(File, 2), - ok = disk_log:start(), - Ports0 = erlang:ports(), - {ok, Log} = disk_log:open([{name,Log},{file,File} | Args]), - [Fd] = erlang:ports() -- Ports0, - exit(Fd, kill), - ok = disk_log:log_terms(n, [<<0:10/unit:8>>]), - timer:sleep(2500), % TIMEOUT in disk_log_1.erl is 2000 - ok. - -stop_evil(Log) -> - {error, _} = disk_log:close(Log), - ok. - -setup_evil_filled_cache_wrap(Log, Dir) -> - setup_evil_filled_cache(Log, [{type,wrap},{size,{?MAX,2}}], Dir). - -setup_evil_filled_cache_halt(Log, Dir) -> - setup_evil_filled_cache(Log, [{type,halt},{size,infinity}], Dir). - -%% The cache is filled, and the file descriptor port gone. -setup_evil_filled_cache(Log, Args, Dir) -> - File = filename:join(Dir, lists:concat([Log, ".LOG"])), - file:delete(File), - del(File, 2), - ok = disk_log:start(), - Ports0 = erlang:ports(), - {ok, Log} = disk_log:open([{name,Log},{file,File} | Args]), - [Fd] = erlang:ports() -- Ports0, - ok = disk_log:log_terms(n, [<<0:?MAX/unit:8>>]), - exit(Fd, kill), - ok. - %% OTP-6278. open/1 creates no status or crash report. otp_6278(Conf) when is_list(Conf) -> Dir = ?privdir(Conf), @@ -4906,10 +4791,59 @@ log(Name, N) -> format_error(E) -> lists:flatten(disk_log:format_error(E)). +check_pps({Ports0,Procs0} = P0) -> + case pps() of + P0 -> + ok; + _ -> + timer:sleep(500), + case pps() of + P0 -> + ok; + {Ports1,Procs1} = P1 -> + case {Ports1 -- Ports0, Procs1 -- Procs0} of + {[], []} -> ok; + {PortsDiff,ProcsDiff} -> + io:format("failure, got ~p~n, expected ~p\n", [P1, P0]), + show("Old port", Ports0 -- Ports1), + show("New port", PortsDiff), + show("Old proc", Procs0 -- Procs1), + show("New proc", ProcsDiff), + ct:fail(failed) + end + end + end. + +show(_S, []) -> + ok; +show(S, [{Pid, Name, InitCall}|Pids]) when is_pid(Pid) -> + io:format("~s: ~w (~w), ~w: ~p~n", + [S, Pid, proc_reg_name(Name), InitCall, + erlang:process_info(Pid)]), + show(S, Pids); +show(S, [{Port, _}|Ports]) when is_port(Port)-> + io:format("~s: ~w: ~p~n", [S, Port, erlang:port_info(Port)]), + show(S, Ports). + pps() -> timer:sleep(100), - {erlang:ports(), lists:filter(fun(P) -> erlang:is_process_alive(P) end, - processes())}. + {port_list(), process_list()}. + +port_list() -> + [{P,safe_second_element(erlang:port_info(P, name))} || + P <- erlang:ports()]. + +process_list() -> + [{P,process_info(P, registered_name), + safe_second_element(process_info(P, initial_call))} || + P <- processes(), erlang:is_process_alive(P)]. + +proc_reg_name({registered_name, Name}) -> Name; +proc_reg_name([]) -> no_reg_name. + +safe_second_element({_,Info}) -> Info; +safe_second_element(Other) -> Other. + qlen() -> {_, {_, N}} = lists:keysearch(message_queue_len, 1, process_info(self())), diff --git a/lib/kernel/test/erl_distribution_SUITE.erl b/lib/kernel/test/erl_distribution_SUITE.erl index e34b4d77d2..5a8bbd56c4 100644 --- a/lib/kernel/test/erl_distribution_SUITE.erl +++ b/lib/kernel/test/erl_distribution_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2017. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -25,6 +25,7 @@ init_per_group/2,end_per_group/2]). -export([tick/1, tick_change/1, + connect_node/1, nodenames/1, hostnames/1, illegal_nodenames/1, hidden_node/1, setopts/1, @@ -70,6 +71,7 @@ suite() -> all() -> [tick, tick_change, nodenames, hostnames, illegal_nodenames, + connect_node, hidden_node, setopts, table_waste, net_setuptime, inet_dist_options_options, {group, monitor_nodes}]. @@ -87,6 +89,7 @@ init_per_suite(Config) -> Config. end_per_suite(_Config) -> + [slave:stop(N) || N <- nodes()], ok. init_per_group(_GroupName, Config) -> @@ -106,6 +109,12 @@ init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) -> end_per_testcase(_Func, _Config) -> ok. +connect_node(Config) when is_list(Config) -> + Connected = nodes(connected), + true = net_kernel:connect_node(node()), + Connected = nodes(connected), + ok. + tick(Config) when is_list(Config) -> PaDir = filename:dirname(code:which(erl_distribution_SUITE)), @@ -244,7 +253,7 @@ illegal(Name) -> test_node(Name) -> test_node(Name, false). test_node(Name, Illigal) -> - ProgName = atom_to_list(lib:progname()), + ProgName = ct:get_progname(), Command = ProgName ++ " -noinput " ++ long_or_short() ++ Name ++ " -eval \"net_adm:ping('" ++ atom_to_list(node()) ++ "')\"" ++ case Illigal of @@ -463,9 +472,9 @@ run_remote_test([FuncStr, TestNodeStr | Args]) -> 1 end catch - C:E -> + C:E:S -> io:format("Node ~p got EXCEPTION ~p:~p\nat ~p\n", - [node(), C, E, erlang:get_stacktrace()]), + [node(), C, E, S]), 2 end, io:format("Node ~p doing halt(~p).\n",[node(), Status]), @@ -1144,17 +1153,16 @@ monitor_nodes_otp_6481_test(Config, TestType) when is_list(Config) -> TestMonNodeState = monitor_node_state(), %% io:format("~p~n", [TestMonNodeState]), TestMonNodeState = - MonNodeState + case TestType of + nodedown -> []; + nodeup -> [{self(), []}] + end + ++ lists:map(fun (_) -> {MN, []} end, Seq) ++ case TestType of nodedown -> [{self(), []}]; nodeup -> [] end - ++ lists:map(fun (_) -> {MN, []} end, Seq) - ++ case TestType of - nodedown -> []; - nodeup -> [{self(), []}] - end, - + ++ MonNodeState, {ok, Node} = start_node(Name, "", this), receive {nodeup, Node} -> ok end, diff --git a/lib/kernel/test/erl_distribution_wb_SUITE.erl b/lib/kernel/test/erl_distribution_wb_SUITE.erl index 03aaee56b7..8256444bdc 100644 --- a/lib/kernel/test/erl_distribution_wb_SUITE.erl +++ b/lib/kernel/test/erl_distribution_wb_SUITE.erl @@ -61,10 +61,13 @@ %% From R9 and forward extended references is compulsory %% From R10 and forward extended pids and ports are compulsory %% From R20 and forward UTF8 atoms are compulsory +%% From R21 and forward NEW_FUN_TAGS is compulsory (no more tuple fallback {fun, ...}) -define(COMPULSORY_DFLAGS, (?DFLAG_EXTENDED_REFERENCES bor ?DFLAG_EXTENDED_PIDS_PORTS bor - ?DFLAG_UTF8_ATOMS)). + ?DFLAG_UTF8_ATOMS bor + ?DFLAG_NEW_FUN_TAGS)). +-define(PASS_THROUGH, $p). -define(shutdown(X), exit(X)). -define(int16(X), [((X) bsr 8) band 16#ff, (X) band 16#ff]). @@ -674,15 +677,16 @@ build_rex_message(Cookie,OurName) -> %% Receive a distribution message recv_message(Socket) -> case gen_tcp:recv(Socket, 0) of + {ok,[]} -> + recv_message(Socket); %% a tick, ignore {ok,Data} -> B0 = list_to_binary(Data), - {_,B1} = erlang:split_binary(B0,1), - Header = binary_to_term(B1), - Siz = byte_size(term_to_binary(Header)), - {_,B2} = erlang:split_binary(B1,Siz), + <<?PASS_THROUGH, B1/binary>> = B0, + {Header,Siz} = binary_to_term(B1,[used]), + <<_:Siz/binary,B2/binary>> = B1, Message = case (catch binary_to_term(B2)) of {'EXIT', _} -> - could_not_digest_message; + {could_not_digest_message,B2}; Other -> Other end, diff --git a/lib/kernel/test/erl_prim_loader_SUITE.erl b/lib/kernel/test/erl_prim_loader_SUITE.erl index b6417210b9..16a127aa3e 100644 --- a/lib/kernel/test/erl_prim_loader_SUITE.erl +++ b/lib/kernel/test/erl_prim_loader_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2016. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -33,6 +33,7 @@ primary_archive/1, virtual_dir_in_archive/1, get_modules/1]). +-define(PRIM_FILE, prim_file). %%----------------------------------------------------------------- %% Test suite for erl_prim_loader. (Most code is run during system start/stop.) @@ -461,7 +462,7 @@ primary_archive(Config) when is_list(Config) -> %% Set primary archive ExpectedEbins = [Archive, DictDir ++ "/ebin", DummyDir ++ "/ebin"], io:format("ExpectedEbins: ~p\n", [ExpectedEbins]), - {ok, FileInfo} = prim_file:read_file_info(Archive), + {ok, FileInfo} = ?PRIM_FILE:read_file_info(Archive), {ok, Ebins} = rpc:call(Node, erl_prim_loader, set_primary_archive, [Archive, ArchiveBin, FileInfo, fun escript:parse_file/1]), diff --git a/lib/kernel/test/error_logger_SUITE.erl b/lib/kernel/test/error_logger_SUITE.erl index 2d26a7246c..eab72e58a7 100644 --- a/lib/kernel/test/error_logger_SUITE.erl +++ b/lib/kernel/test/error_logger_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2017. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -32,7 +32,8 @@ init_per_group/2,end_per_group/2, off_heap/1, error_report/1, info_report/1, error/1, info/1, - emulator/1, tty/1, logfile/1, add/1, delete/1]). + emulator/1, via_logger_process/1, other_node/1, + tty/1, logfile/1, add/1, delete/1, format_depth/1]). -export([generate_error/2]). @@ -46,16 +47,20 @@ suite() -> {timetrap,{minutes,1}}]. all() -> - [off_heap, error_report, info_report, error, info, emulator, tty, - logfile, add, delete]. + [off_heap, error_report, info_report, error, info, emulator, + via_logger_process, other_node, tty, logfile, add, delete, + format_depth]. groups() -> []. init_per_suite(Config) -> + logger:add_handler(error_logger,error_logger, + #{level=>info,filter_default=>log}), Config. end_per_suite(_Config) -> + logger:remove_handler(error_logger), ok. init_per_group(_GroupName, Config) -> @@ -226,6 +231,40 @@ generate_error(Error, Stack) -> erlang:raise(error, Error, Stack). %%----------------------------------------------------------------- + +via_logger_process(Config) -> + case os:type() of + {win32,_} -> + {skip,"Skip on windows - cant change file mode"}; + _ -> + error_logger:add_report_handler(?MODULE, self()), + Dir = filename:join(?config(priv_dir,Config),"dummydir"), + Msg = "File operation error: eacces. Target: " ++ + Dir ++ ". Function: list_dir. ", + ok = file:make_dir(Dir), + ok = file:change_mode(Dir,8#0222), + error = erl_prim_loader:list_dir(Dir), + ok = file:change_mode(Dir,8#0664), + _ = file:del_dir(Dir), + reported(error_report, std_error, Msg), + my_yes = error_logger:delete_report_handler(?MODULE), + ok + end. + +%%----------------------------------------------------------------- + +other_node(_Config) -> + error_logger:add_report_handler(?MODULE, self()), + {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]), + ok = rpc:call(Node,logger,add_handler,[error_logger,error_logger, + #{level=>info,filter_default=>log}]), + rpc:call(Node,error_logger,error_report,[hi_from_remote]), + reported(error_report,std_error,hi_from_remote), + test_server:stop_node(Node), + ok. + + +%%----------------------------------------------------------------- %% We don't enables or disables tty error logging here. We do not %% want to interact with the test run. %%----------------------------------------------------------------- @@ -271,6 +310,21 @@ delete(Config) when is_list(Config) -> ok. %%----------------------------------------------------------------- + +format_depth(_Config) -> + ok = application:set_env(kernel,error_logger_format_depth,30), + 30 = error_logger:get_format_depth(), + ok = application:set_env(kernel,error_logger_format_depth,3), + 10 = error_logger:get_format_depth(), + ok = application:set_env(kernel,error_logger_format_depth,11), + 11 = error_logger:get_format_depth(), + ok = application:set_env(kernel,error_logger_format_depth,unlimited), + unlimited = error_logger:get_format_depth(), + ok = application:unset_env(kernel,error_logger_format_depth), + unlimited = error_logger:get_format_depth(), + ok. + +%%----------------------------------------------------------------- %% Check that the report has been received. %%----------------------------------------------------------------- reported(Tag, Type, Report) -> @@ -279,7 +333,7 @@ reported(Tag, Type, Report) -> test_server:messages_get(), ok after 1000 -> - ct:fail(no_report_received) + ct:fail({no_report_received,test_server:messages_get()}) end. %%----------------------------------------------------------------- diff --git a/lib/kernel/test/error_logger_warn_SUITE.erl b/lib/kernel/test/error_logger_warn_SUITE.erl index a8087e11f9..8f1eb2ba0a 100644 --- a/lib/kernel/test/error_logger_warn_SUITE.erl +++ b/lib/kernel/test/error_logger_warn_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2003-2016. All Rights Reserved. +%% Copyright Ericsson AB 2003-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -480,9 +480,12 @@ rb_utc() -> UtcLog=case application:get_env(sasl,utc_log) of {ok,true} -> true; - _AllOthers -> + {ok,false} -> application:set_env(sasl,utc_log,true), - false + false; + undefined -> + application:set_env(sasl,utc_log,true), + undefined end, application:start(sasl), rb:start([{report_dir, rd()}]), @@ -494,7 +497,12 @@ rb_utc() -> Sum=one_rb_findstr([],"UTC"), rb:stop(), application:stop(sasl), - application:set_env(sasl,utc_log,UtcLog), + case UtcLog of + undefined -> + application:unset_env(sasl,utc_log); + _ -> + application:set_env(sasl,utc_log,UtcLog) + end, stop_node(Node), ok. diff --git a/lib/kernel/test/file_SUITE.erl b/lib/kernel/test/file_SUITE.erl index 119e1f24bb..a51025cba6 100644 --- a/lib/kernel/test/file_SUITE.erl +++ b/lib/kernel/test/file_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2017. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -39,6 +39,8 @@ -define(FILE_FIN_PER_TESTCASE(Config), Config). -endif. +-define(PRIM_FILE, prim_file). + -module(?FILE_SUITE). -export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, @@ -54,7 +56,8 @@ open1/1, old_modes/1, new_modes/1, path_open/1, open_errors/1]). -export([ file_info_basic_file/1, file_info_basic_directory/1, - file_info_bad/1, file_info_times/1, file_write_file_info/1]). + file_info_bad/1, file_info_times/1, file_write_file_info/1, + file_wfi_helpers/1]). -export([rename/1, access/1, truncate/1, datasync/1, sync/1, read_write/1, pread_write/1, append/1, exclusive/1]). -export([ e_delete/1, e_rename/1, e_make_dir/1, e_del_dir/1]). @@ -97,6 +100,12 @@ -export([unicode_mode/1]). +-export([volume_relative_paths/1,unc_paths/1]). + +-export([tiny_writes/1, tiny_writes_delayed/1, + large_writes/1, large_writes_delayed/1, + tiny_reads/1, tiny_reads_ahead/1]). + %% Debug exports -export([create_file_slow/2, create_file/2, create_bin/2]). -export([verify_file/2, verify_bin/3]). @@ -107,6 +116,8 @@ -export([disc_free/1, memsize/0]). -include_lib("common_test/include/ct.hrl"). +-include_lib("common_test/include/ct_event.hrl"). + -include_lib("kernel/include/file.hrl"). -define(THROW_ERROR(RES), throw({fail, ?LINE, RES})). @@ -118,13 +129,13 @@ suite() -> all() -> [unicode, altname, read_write_file, {group, dirs}, - {group, files}, delete, rename, names, {group, errors}, - {group, compression}, {group, links}, copy, + {group, files}, delete, rename, names, volume_relative_paths, unc_paths, + {group, errors}, {group, compression}, {group, links}, copy, delayed_write, read_ahead, segment_read, segment_write, ipread, pid2name, interleaved_read_write, otp_5814, otp_10852, large_file, large_write, read_line_1, read_line_2, read_line_3, read_line_4, standard_io, old_io_protocol, - unicode_mode + unicode_mode, {group, bench} ]. groups() -> @@ -142,7 +153,8 @@ groups() -> {pos, [], [pos1, pos2, pos3]}, {file_info, [], [file_info_basic_file, file_info_basic_directory, - file_info_bad, file_info_times, file_write_file_info]}, + file_info_bad, file_info_times, file_write_file_info, + file_wfi_helpers]}, {consult, [], [consult1, path_consult]}, {eval, [], [eval1, path_eval]}, {script, [], [script1, path_script]}, @@ -154,11 +166,19 @@ groups() -> write_compressed, compress_errors, catenated_gzips, compress_async_crash]}, {links, [], - [make_link, read_link_info_for_non_link, symlinks]}]. + [make_link, read_link_info_for_non_link, symlinks]}, + {bench, [], + [tiny_writes, tiny_writes_delayed, + large_writes, large_writes_delayed, + tiny_reads, tiny_reads_ahead]}]. init_per_group(_GroupName, Config) -> Config. +end_per_group(bench, Config) -> + ScratchDir = proplists:get_value(priv_dir, Config), + file:delete(filename:join(ScratchDir, "benchmark_scratch_file")), + Config; end_per_group(_GroupName, Config) -> Config. @@ -381,11 +401,11 @@ read_write_0(Str, {Func, ReadFun}, Options) -> io:format("~p:~p: ~p ERROR: ~ts vs~n ~w~n - ~p~n", [?MODULE, Line, Func, Str, ReadBytes, Options]), exit({error, ?LINE}); - error:What -> + error:What:Stacktrace -> io:format("~p:??: ~p ERROR: ~p from~n ~w~n ~p~n", [?MODULE, Func, What, Str, Options]), - io:format("\t~p~n", [erlang:get_stacktrace()]), + io:format("\t~p~n", [Stacktrace]), exit({error, ?LINE}) end. @@ -473,7 +493,7 @@ um_check_unicode(_Utf8Bin, {ok, _ListOrBin}, _, _UTF8_) -> um_filename(Bin, Dir, Options) when is_binary(Bin) -> um_filename(binary_to_list(Bin), Dir, Options); um_filename(Str = [_|_], Dir, Options) -> - Name = hd(string:tokens(Str, ":")), + Name = hd(string:lexemes(Str, ":")), Enc = atom_to_list(proplists:get_value(encoding, Options, latin1)), File = case lists:member(binary, Options) of true -> @@ -638,6 +658,10 @@ cur_dir_0(Config) when is_list(Config) -> {ok,NewDirFiles} = ?FILE_MODULE:list_dir("."), true = lists:member(UncommonName,NewDirFiles), + %% Ensure that we get the same result with a trailing slash; the + %% APIs used on Windows will choke on them if passed directly. + {ok,NewDirFiles} = ?FILE_MODULE:list_dir("./"), + %% Delete the directory and return to the old current directory %% and check that the created file isn't there (too!) expect({error, einval}, {error, eacces}, @@ -690,10 +714,15 @@ win_cur_dir_1(_Config) -> %% Get the drive letter from the current directory, %% and try to get current directory for that drive. - [Drive,$:|_] = BaseDir, - {ok,BaseDir} = ?FILE_MODULE:get_cwd([Drive,$:]), + [CurDrive,$:|_] = BaseDir, + {ok,BaseDir} = ?FILE_MODULE:get_cwd([CurDrive,$:]), io:format("BaseDir = ~s\n", [BaseDir]), + %% We should error out on non-existent drives. Any reasonable system will + %% have at least one. + CurDirs = [?FILE_MODULE:get_cwd([Drive,$:]) || Drive <- lists:seq($A, $Z)], + lists:member({error,eaccess}, CurDirs), + %% Unfortunately, there is no way to move away from the %% current drive as we can't use the "subst" command from %% a SSH connection. We can't test any more. @@ -831,7 +860,7 @@ no_untranslatable_names() -> end. start_node(Name, Args) -> - [_,Host] = string:tokens(atom_to_list(node()), "@"), + [_,Host] = string:lexemes(atom_to_list(node()), "@"), ct:log("Trying to start ~w@~s~n", [Name,Host]), case test_server:start_node(Name, peer, [{args,Args}]) of {error,Reason} -> @@ -1019,6 +1048,23 @@ close(Config) when is_list(Config) -> Val = ?FILE_MODULE:close(Fd1), io:format("Second close gave: ~p",[Val]), + %% All operations on a closed raw file should EINVAL, even if they're not + %% supported on the current platform. + {ok,Fd2} = ?FILE_MODULE:open(Name, [read, write, raw]), + ok = ?FILE_MODULE:close(Fd2), + + {error, einval} = ?FILE_MODULE:advise(Fd2, 5, 5, normal), + {error, einval} = ?FILE_MODULE:allocate(Fd2, 5, 5), + {error, einval} = ?FILE_MODULE:close(Fd2), + {error, einval} = ?FILE_MODULE:datasync(Fd2), + {error, einval} = ?FILE_MODULE:position(Fd2, 5), + {error, einval} = ?FILE_MODULE:pread(Fd2, 5, 1), + {error, einval} = ?FILE_MODULE:pwrite(Fd2, 5, "einval please"), + {error, einval} = ?FILE_MODULE:read(Fd2, 1), + {error, einval} = ?FILE_MODULE:sync(Fd2), + {error, einval} = ?FILE_MODULE:truncate(Fd2), + {error, einval} = ?FILE_MODULE:write(Fd2, "einval please"), + [] = flush(), ok. @@ -1132,8 +1178,8 @@ pread_write_test(File, Data) -> end, I = Size + 17, ok = ?FILE_MODULE:pwrite(File, 0, Data), - Res = ?FILE_MODULE:pread(File, 0, I), - {ok, Data} = Res, + {ok, Data} = ?FILE_MODULE:pread(File, 0, I), + {ok, [Data]} = ?FILE_MODULE:pread(File, [{0, I}]), eof = ?FILE_MODULE:pread(File, I, 1), ok = ?FILE_MODULE:pwrite(File, [{0, Data}, {I, Data}]), {ok, [Data, eof, Data]} = @@ -1321,6 +1367,10 @@ file_info_basic_file(Config) when is_list(Config) -> io:put_chars(Fd1, "foo bar"), ok = ?FILE_MODULE:close(Fd1), + %% Don't crash the file server when passing incorrect arguments. + {error,badarg} = ?FILE_MODULE:read_file_info(Name, [{time, gurka}]), + {error,badarg} = ?FILE_MODULE:read_file_info([#{} | gaffel]), + %% Test that the file has the expected attributes. %% The times are tricky, so we will save them to a separate test case. {ok,FileInfo} = ?FILE_MODULE:read_file_info(Name), @@ -1564,6 +1614,39 @@ file_write_file_info(Config) when is_list(Config) -> [] = flush(), ok. +file_wfi_helpers(Config) when is_list(Config) -> + RootDir = get_good_directory(Config), + io:format("RootDir = ~p", [RootDir]), + + Name = filename:join(RootDir, + atom_to_list(?MODULE) ++ "_wfi_helpers"), + + ok = ?FILE_MODULE:write_file(Name, "hello again"), + NewTime = {{1997, 02, 15}, {13, 18, 20}}, + ok = ?FILE_MODULE:change_time(Name, NewTime, NewTime), + + {ok, #file_info{atime=NewActAtime, mtime=NewTime}} = + ?FILE_MODULE:read_file_info(Name), + + NewFilteredAtime = filter_atime(NewTime, Config), + NewFilteredAtime = filter_atime(NewActAtime, Config), + + %% Make the file unwritable + ok = ?FILE_MODULE:change_mode(Name, 8#400), + {error, eacces} = ?FILE_MODULE:write_file(Name, "hello again"), + + %% ... and writable again + ok = ?FILE_MODULE:change_mode(Name, 8#600), + ok = ?FILE_MODULE:write_file(Name, "hello again"), + + %% We have no idea which users will work, so all we can do is to check + %% that it returns enoent instead of crashing. + {error, enoent} = ?FILE_MODULE:change_group("bogus file name", 0), + {error, enoent} = ?FILE_MODULE:change_owner("bogus file name", 0), + + [] = flush(), + ok. + %% Returns a directory on a file system that has correct file times. get_good_directory(Config) -> @@ -2044,13 +2127,22 @@ names(Config) when is_list(Config) -> ok = ?FILE_MODULE:close(Fd2), {ok,Fd3} = ?FILE_MODULE:open(Name3,read), ok = ?FILE_MODULE:close(Fd3), + + %% Now try the same on raw files. + {ok,Fd4} = ?FILE_MODULE:open(Name2, [read, raw]), + ok = ?FILE_MODULE:close(Fd4), + {ok,Fd4f} = ?FILE_MODULE:open(lists:flatten(Name2), [read, raw]), + ok = ?FILE_MODULE:close(Fd4f), + {ok,Fd5} = ?FILE_MODULE:open(Name3, [read, raw]), + ok = ?FILE_MODULE:close(Fd5), + case length(Name1) > 255 of true -> io:format("Path too long for an atom:\n\n~p\n", [Name1]); false -> Name4 = list_to_atom(Name1), - {ok,Fd4} = ?FILE_MODULE:open(Name4,read), - ok = ?FILE_MODULE:close(Fd4) + {ok,Fd6} = ?FILE_MODULE:open(Name4,read), + ok = ?FILE_MODULE:close(Fd6) end, %% Try some path names @@ -2074,6 +2166,46 @@ names(Config) when is_list(Config) -> [] = flush(), ok. +volume_relative_paths(Config) when is_list(Config) -> + case os:type() of + {win32, _} -> + {ok, [Drive, $: | _]} = file:get_cwd(), + %% Relative to current device root. + {ok, RootInfo} = file:read_file_info([Drive, $:, $/]), + {ok, RootInfo} = file:read_file_info("/"), + %% Relative to current device directory. + {ok, DirContents} = file:list_dir([Drive, $:]), + {ok, DirContents} = file:list_dir("."), + [] = flush(), + ok; + _ -> + {skip, "This test is Windows-specific."} + end. + +unc_paths(Config) when is_list(Config) -> + case os:type() of + {win32, _} -> + %% We assume administrative shares are set up and reachable, and we + %% settle for testing presence as some of the returned data is + %% different. + {ok, _} = file:read_file_info("C:\\Windows\\explorer.exe"), + {ok, _} = file:read_file_info("\\\\localhost\\c$\\Windows\\explorer.exe"), + + {ok, Cwd} = file:get_cwd(), + + try + ok = file:set_cwd("\\\\localhost\\c$\\Windows\\"), + {ok, _} = file:read_file_info("explorer.exe") + after + file:set_cwd(Cwd) + end, + + [] = flush(), + ok; + _ -> + {skip, "This test is Windows-specific."} + end. + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -2102,13 +2234,14 @@ e_delete(Config) when is_list(Config) -> case os:type() of {win32, _} -> %% Remove a character device. - {error, eacces} = ?FILE_MODULE:delete("nul"); + expect({error, eacces}, {error, einval}, + ?FILE_MODULE:delete("nul")); _ -> ?FILE_MODULE:write_file_info( Base, #file_info {mode=0}), {error, eacces} = ?FILE_MODULE:delete(Afile), ?FILE_MODULE:write_file_info( - Base, #file_info {mode=8#600}) + Base, #file_info {mode=8#700}) end, [] = flush(), @@ -2239,7 +2372,7 @@ e_make_dir(Config) when is_list(Config) -> ?FILE_MODULE:write_file_info(Base, #file_info {mode=0}), {error, eacces} = ?FILE_MODULE:make_dir(filename:join(Base, "xxxx")), ?FILE_MODULE:write_file_info( - Base, #file_info {mode=8#600}) + Base, #file_info {mode=8#700}) end, ok. @@ -2285,7 +2418,7 @@ e_del_dir(Config) when is_list(Config) -> ok = ?FILE_MODULE:make_dir(ADirectory), ?FILE_MODULE:write_file_info( Base, #file_info {mode=0}), {error, eacces} = ?FILE_MODULE:del_dir(ADirectory), - ?FILE_MODULE:write_file_info( Base, #file_info {mode=8#600}) + ?FILE_MODULE:write_file_info( Base, #file_info {mode=8#700}) end, [] = flush(), ok. @@ -2641,8 +2774,8 @@ altname(Config) when is_list(Config) -> {skipped, "Altname not supported on this platform"}; {ok, "LONGAL~1"} -> {ok, "A_FILE~1"} = ?FILE_MODULE:altname(Name), - {ok, "C:/"} = ?FILE_MODULE:altname("C:/"), - {ok, "C:\\"} = ?FILE_MODULE:altname("C:\\"), + {ok, "c:/"} = ?FILE_MODULE:altname("C:/"), + {ok, "c:/"} = ?FILE_MODULE:altname("C:\\"), {error,enoent} = ?FILE_MODULE:altname(NonexName), {ok, "short"} = ?FILE_MODULE:altname(ShortName), ok @@ -2923,20 +3056,22 @@ delayed_write(Config) when is_list(Config) -> %% %% Test caching and normal close of non-raw file {ok, Fd1} = - ?FILE_MODULE:open(File, [write, {delayed_write, Size+1, 2000}]), + ?FILE_MODULE:open(File, [write, {delayed_write, Size+1, 400}]), ok = ?FILE_MODULE:write(Fd1, Data1), - timer:sleep(1000), % Just in case the file system is slow + %% Wait for a reasonable amount of time to check whether the write was + %% practically instantaneous or actually delayed. + timer:sleep(100), {ok, Fd2} = ?FILE_MODULE:open(File, [read]), eof = ?FILE_MODULE:read(Fd2, 1), ok = ?FILE_MODULE:write(Fd1, Data1), % Data flush on size - timer:sleep(1000), % Just in case the file system is slow + timer:sleep(100), {ok, Data1Data1} = ?FILE_MODULE:pread(Fd2, bof, 2*Size+1), ok = ?FILE_MODULE:write(Fd1, Data1), - timer:sleep(3000), % Wait until data flush on timeout + timer:sleep(500), % Wait until data flush on timeout {ok, Data1Data1Data1} = ?FILE_MODULE:pread(Fd2, bof, 3*Size+1), ok = ?FILE_MODULE:write(Fd1, Data1), ok = ?FILE_MODULE:close(Fd1), % Data flush on close - timer:sleep(1000), % Just in case the file system is slow + timer:sleep(100), {ok, Data1Data1Data1Data1} = ?FILE_MODULE:pread(Fd2, bof, 4*Size+1), ok = ?FILE_MODULE:close(Fd2), %% @@ -2970,7 +3105,7 @@ delayed_write(Config) when is_list(Config) -> {'DOWN', Mref1, _, _, _} = Down1a -> ct:fail(Down1a) end, - timer:sleep(1000), % Just in case the file system is slow + timer:sleep(100), % Just in case the file system is slow {ok, Fd3} = ?FILE_MODULE:open(File, [read]), eof = ?FILE_MODULE:read(Fd3, 1), Child1 ! {Parent, continue, normal}, @@ -2980,7 +3115,7 @@ delayed_write(Config) when is_list(Config) -> {'DOWN', Mref1, _, _, _} = Down1b -> ct:fail(Down1b) end, - timer:sleep(1000), % Just in case the file system is slow + timer:sleep(100), % Just in case the file system is slow {ok, Data1} = ?FILE_MODULE:pread(Fd3, bof, Size+1), ok = ?FILE_MODULE:close(Fd3), %% @@ -2993,7 +3128,7 @@ delayed_write(Config) when is_list(Config) -> {'DOWN', Mref2, _, _, _} = Down2a -> ct:fail(Down2a) end, - timer:sleep(1000), % Just in case the file system is slow + timer:sleep(100), % Just in case the file system is slow {ok, Fd4} = ?FILE_MODULE:open(File, [read]), eof = ?FILE_MODULE:read(Fd4, 1), Child2 ! {Parent, continue, kill}, @@ -3003,7 +3138,7 @@ delayed_write(Config) when is_list(Config) -> {'DOWN', Mref2, _, _, _} = Down2b -> ct:fail(Down2b) end, - timer:sleep(1000), % Just in case the file system is slow + timer:sleep(100), % Just in case the file system is slow eof = ?FILE_MODULE:pread(Fd4, bof, 1), ok = ?FILE_MODULE:close(Fd4), %% @@ -3095,6 +3230,16 @@ read_ahead(Config) when is_list(Config) -> Data1Data2Data3 = Data1++Data2++Data3, {ok, Data1Data2Data3} = ?FILE_MODULE:read(Fd5, 3*Size+1), ok = ?FILE_MODULE:close(Fd5), + + %% Ensure that a read that draws from both the buffer and the file won't + %% return anything wonky. + SplitData = << <<(I rem 256)>> || I <- lists:seq(1, 1024) >>, + file:write_file(File, SplitData), + {ok, Fd6} = ?FILE_MODULE:open(File, [raw, read, binary, {read_ahead, 256}]), + {ok, <<1>>} = file:read(Fd6, 1), + <<1, Shifted:512/binary, _Rest/binary>> = SplitData, + {ok, Shifted} = file:read(Fd6, 512), + %% [] = flush(), ok. @@ -3699,6 +3844,83 @@ do_large_write(Name) -> %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%% Benchmarks +%% +%% Note that we only measure the time it takes to run the isolated file +%% operations and that the actual test runtime can differ significantly, +%% especially on the write side as the files need to be truncated before +%% writing. + +large_writes(Config) when is_list(Config) -> + Modes = [raw, binary], + OpCount = 4096, + Data = <<0:(64 bsl 10)/unit:8>>, + run_write_benchmark(Config, Modes, OpCount, Data). + +large_writes_delayed(Config) when is_list(Config) -> + %% Each write is exactly as large as the delay buffer, causing the writes + %% to pass through each time, giving us a decent idea of how much overhead + %% delayed_write adds. + Modes = [raw, binary, {delayed_write, 64 bsl 10, 2000}], + OpCount = 4096, + Data = <<0:(64 bsl 10)/unit:8>>, + run_write_benchmark(Config, Modes, OpCount, Data). + +tiny_writes(Config) when is_list(Config) -> + Modes = [raw, binary], + OpCount = 512 bsl 10, + Data = <<0>>, + run_write_benchmark(Config, Modes, OpCount, Data). + +tiny_writes_delayed(Config) when is_list(Config) -> + Modes = [raw, binary, {delayed_write, 512 bsl 10, 2000}], + OpCount = 512 bsl 10, + Data = <<0>>, + run_write_benchmark(Config, Modes, OpCount, Data). + +%% The read benchmarks assume that "benchmark_scratch_file" has been filled by +%% the write benchmarks. + +tiny_reads(Config) when is_list(Config) -> + Modes = [raw, binary], + OpCount = 512 bsl 10, + run_read_benchmark(Config, Modes, OpCount, 1). + +tiny_reads_ahead(Config) when is_list(Config) -> + Modes = [raw, binary, {read_ahead, 512 bsl 10}], + OpCount = 512 bsl 10, + run_read_benchmark(Config, Modes, OpCount, 1). + +run_write_benchmark(Config, Modes, OpCount, Data) -> + run_benchmark(Config, [write | Modes], OpCount, fun file:write/2, Data). + +run_read_benchmark(Config, Modes, OpCount, OpSize) -> + run_benchmark(Config, [read | Modes], OpCount, fun file:read/2, OpSize). + +run_benchmark(Config, Modes, OpCount, Fun, Arg) -> + ScratchDir = proplists:get_value(priv_dir, Config), + Path = filename:join(ScratchDir, "benchmark_scratch_file"), + {ok, Fd} = file:open(Path, Modes), + submit_throughput_results(Fun, [Fd, Arg], OpCount). + +submit_throughput_results(Fun, Args, Times) -> + MSecs = measure_repeated_file_op(Fun, Args, Times, millisecond), + IOPS = trunc(Times * (1000 / MSecs)), + ct_event:notify(#event{ name = benchmark_data, data = [{value,IOPS}] }), + {comment, io_lib:format("~p IOPS, ~p ms", [IOPS, trunc(MSecs)])}. + +measure_repeated_file_op(Fun, Args, Times, Unit) -> + Start = os:perf_counter(Unit), + repeated_apply(Fun, Args, Times), + os:perf_counter(Unit) - Start. + +repeated_apply(_F, _Args, Times) when Times =< 0 -> + ok; +repeated_apply(F, Args, Times) -> + erlang:apply(F, Args), + repeated_apply(F, Args, Times - 1). + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% response_analysis(Module, Function, Arguments) -> @@ -3934,7 +4156,7 @@ read_line_create_files(TestData) -> read_line_remove_files(TestData) -> [ file:delete(File) || {_Function,File,_,_} <- TestData ]. -%% read_line with prim_file. +%% read_line with ?PRIM_FILE. read_line_1(Config) when is_list(Config) -> PrivDir = proplists:get_value(priv_dir, Config), All = read_line_testdata(PrivDir), @@ -4103,9 +4325,9 @@ read_line_create7(Filename) -> file:close(F). read_line_all(Filename) -> - {ok,F} = prim_file:open(Filename,[read,binary]), + {ok,F} = ?PRIM_FILE:open(Filename,[read,binary]), X=read_rl_lines(F), - prim_file:close(F), + ?PRIM_FILE:close(F), Bin = list_to_binary([B || {ok,B} <- X]), Bin = re:replace(list_to_binary([element(2,file:read_file(Filename))]), "\r\n","\n",[global,{return,binary}]), @@ -4138,7 +4360,7 @@ read_line_all4(Filename) -> {length(X),Bin}. read_rl_lines(F) -> - case prim_file:read_line(F) of + case ?PRIM_FILE:read_line(F) of eof -> []; {error,X} -> @@ -4158,9 +4380,9 @@ read_rl_lines2(F) -> end. read_line_all_alternating(Filename) -> - {ok,F} = prim_file:open(Filename,[read,binary]), + {ok,F} = ?PRIM_FILE:open(Filename,[read,binary]), X=read_rl_lines(F,true), - prim_file:close(F), + ?PRIM_FILE:close(F), Bin = list_to_binary([B || {ok,B} <- X]), Bin = re:replace(list_to_binary([element(2,file:read_file(Filename))]), "\r\n","\n",[global,{return,binary}]), @@ -4194,8 +4416,8 @@ read_line_all_alternating4(Filename) -> read_rl_lines(F,Alternate) -> case begin case Alternate of - true -> prim_file:read(F,1); - false -> prim_file:read_line(F) + true -> ?PRIM_FILE:read(F,1); + false -> ?PRIM_FILE:read_line(F) end end of eof -> diff --git a/lib/kernel/test/file_name_SUITE.erl b/lib/kernel/test/file_name_SUITE.erl index 899102c908..3afc647081 100644 --- a/lib/kernel/test/file_name_SUITE.erl +++ b/lib/kernel/test/file_name_SUITE.erl @@ -77,6 +77,7 @@ init_per_testcase/2, end_per_testcase/2]). -export([normal/1,icky/1,very_icky/1,normalize/1,home_dir/1]). +-define(PRIM_FILE, prim_file). init_per_testcase(_Func, Config) -> Config. @@ -131,7 +132,7 @@ home_dir(Config) when is_list(Config) -> os:putenv("HOME",NewHome), {"HOME",Save}; _ -> - rm_rf(prim_file,NewHome), + rm_rf(?PRIM_FILE,NewHome), throw(unsupported_os) end, try @@ -145,7 +146,7 @@ home_dir(Config) when is_list(Config) -> _ -> os:putenv(SaveOldName,SaveOldValue) end, - rm_rf(prim_file,NewHome) + rm_rf(?PRIM_FILE,NewHome) end catch throw:need_unicode_mode -> @@ -190,7 +191,7 @@ normal(Config) when is_list(Config) -> try Priv = proplists:get_value(priv_dir, Config), file:set_cwd(Priv), - ok = check_normal(prim_file), + ok = check_normal(?PRIM_FILE), ok = check_normal(file), %% If all is good, delete dir again (avoid hanging dir on windows) rm_rf(file,"normal_dir"), @@ -210,7 +211,7 @@ icky(Config) when is_list(Config) -> try Priv = proplists:get_value(priv_dir, Config), file:set_cwd(Priv), - ok = check_icky(prim_file), + ok = check_icky(?PRIM_FILE), ok = check_icky(file), %% If all is good, delete dir again (avoid hanging dir on windows) rm_rf(file,"icky_dir"), @@ -229,7 +230,7 @@ very_icky(Config) when is_list(Config) -> try Priv = proplists:get_value(priv_dir, Config), file:set_cwd(Priv), - case check_very_icky(prim_file) of + case check_very_icky(?PRIM_FILE) of need_unicode_mode -> {skipped,"VM needs to be started in Unicode filename mode"}; ok -> @@ -292,17 +293,14 @@ check_normal(Mod) -> ok end, [ begin - {ok, FD} = Mod:open(Name,[read]), - {ok, Content} = Mod:read(FD,1024), - ok = file:close(FD) - end || {regular,Name,Content} <- NormalDir ], - [ begin {ok, FD} = Mod:open(Name,[read,binary]), BC = list_to_binary(Content), {ok, BC} = Mod:read(FD,1024), ok = file:close(FD) end || {regular,Name,Content} <- NormalDir ], + {error, badarg} = Mod:rename("fil1\0tmp_fil2","tmp_fil1"), Mod:rename("fil1","tmp_fil1"), + {error, badarg} = Mod:read_file("tmp_fil1\0.txt"), {ok, <<"fil1">>} = Mod:read_file("tmp_fil1"), {error,enoent} = Mod:read_file("fil1"), Mod:rename("tmp_fil1","fil1"), @@ -410,11 +408,6 @@ check_icky(Mod) -> ok end, [ begin - {ok, FD} = Mod:open(Name,[read]), - {ok, Content} = Mod:read(FD,1024), - ok = file:close(FD) - end || {regular,Name,Content} <- IckyDir ], - [ begin {ok, FD} = Mod:open(Name,[read,binary]), BC = list_to_binary([Content]), {ok, BC} = Mod:read(FD,1024), @@ -519,11 +512,6 @@ check_very_icky(Mod) -> ok end, [ begin - {ok, FD} = Mod:open(Name,[read]), - {ok, Content} = Mod:read(FD,1024), - ok = file:close(FD) - end || {regular,Name,Content} <- VeryIckyDir ], - [ begin {ok, FD} = Mod:open(Name,[read,binary]), BC = list_to_binary([Content]), {ok, BC} = Mod:read(FD,1024), diff --git a/lib/kernel/test/gen_sctp_SUITE.erl b/lib/kernel/test/gen_sctp_SUITE.erl index 620ab235a0..a0ae792ba9 100644 --- a/lib/kernel/test/gen_sctp_SUITE.erl +++ b/lib/kernel/test/gen_sctp_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2007-2016. All Rights Reserved. +%% Copyright Ericsson AB 2007-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -1038,8 +1038,7 @@ do_from_other_process(Fun) -> Result -> Parent ! {Ref,Result} catch - Class:Reason -> - Stacktrace = erlang:get_stacktrace(), + Class:Reason:Stacktrace -> Parent ! {Ref,Class,Reason,Stacktrace} end end), @@ -1617,8 +1616,7 @@ s_start(Socket, Timeout, Parent) -> try s_loop(Socket, Timeout, Parent, Handler, gb_trees:empty()) catch - Class:Reason -> - Stacktrace = erlang:get_stacktrace(), + Class:Reason:Stacktrace -> io:format(?MODULE_STRING":socket exception ~w:~w at~n" "~p.~n", [Class,Reason,Stacktrace]), erlang:raise(Class, Reason, Stacktrace) diff --git a/lib/kernel/test/gen_tcp_api_SUITE.erl b/lib/kernel/test/gen_tcp_api_SUITE.erl index 12d22519ce..1be016444f 100644 --- a/lib/kernel/test/gen_tcp_api_SUITE.erl +++ b/lib/kernel/test/gen_tcp_api_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1998-2017. All Rights Reserved. +%% Copyright Ericsson AB 1998-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -605,9 +605,9 @@ ok({ok,V}) -> V; ok(NotOk) -> try throw(not_ok) catch - Thrown -> + throw:Thrown:Stacktrace -> erlang:raise( - error, {Thrown, NotOk}, tl(erlang:get_stacktrace())) + error, {Thrown, NotOk}, tl(Stacktrace)) end. get_localaddr() -> diff --git a/lib/kernel/test/gen_tcp_misc_SUITE.erl b/lib/kernel/test/gen_tcp_misc_SUITE.erl index 331864b5de..04c0c48e3a 100644 --- a/lib/kernel/test/gen_tcp_misc_SUITE.erl +++ b/lib/kernel/test/gen_tcp_misc_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1998-2016. All Rights Reserved. +%% Copyright Ericsson AB 1998-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -41,6 +41,7 @@ busy_send/1, busy_disconnect_passive/1, busy_disconnect_active/1, fill_sendq/1, partial_recv_and_close/1, partial_recv_and_close_2/1,partial_recv_and_close_3/1,so_priority/1, + recvtos/1, recvttl/1, recvtosttl/1, recvtclass/1, %% Accept tests primitive_accept/1,multi_accept_close_listen/1,accept_timeout/1, accept_timeouts_in_order/1,accept_timeouts_in_order2/1, @@ -51,7 +52,8 @@ several_accepts_in_one_go/1, accept_system_limit/1, active_once_closed/1, send_timeout/1, send_timeout_active/1, otp_7731/1, zombie_sockets/1, otp_7816/1, otp_8102/1, - wrapping_oct/0, wrapping_oct/1, otp_9389/1, otp_13939/1]). + wrapping_oct/0, wrapping_oct/1, otp_9389/1, otp_13939/1, + otp_12242/1]). %% Internal exports. -export([sender/3, not_owner/1, passive_sockets_server/2, priority_server/1, @@ -83,7 +85,8 @@ all() -> busy_disconnect_passive, busy_disconnect_active, fill_sendq, partial_recv_and_close, partial_recv_and_close_2, partial_recv_and_close_3, - so_priority, primitive_accept, + so_priority, recvtos, recvttl, recvtosttl, + recvtclass, primitive_accept, multi_accept_close_listen, accept_timeout, accept_timeouts_in_order, accept_timeouts_in_order2, accept_timeouts_in_order3, accept_timeouts_in_order4, @@ -93,7 +96,8 @@ all() -> killing_multi_acceptors2, several_accepts_in_one_go, accept_system_limit, active_once_closed, send_timeout, send_timeout_active, otp_7731, wrapping_oct, - zombie_sockets, otp_7816, otp_8102, otp_9389]. + zombie_sockets, otp_7816, otp_8102, otp_9389, + otp_12242]. groups() -> []. @@ -1572,52 +1576,56 @@ fill_sendq(Config) when is_list(Config) -> Master = self(), Server = spawn_link(fun () -> - {ok,L} = gen_tcp:listen - (0, [{active,false},binary, - {reuseaddr,true},{packet,0}]), + {ok,L} = gen_tcp:listen(0, [{active,false},binary, + {reuseaddr,true},{packet,0}]), {ok,Port} = inet:port(L), Master ! {self(),client, fill_sendq_client(Port, Master)}, fill_sendq_srv(L, Master) end), io:format("~p Server~n", [Server]), - receive {Server,client,Client} -> - io:format("~p Client~n", [Client]), - receive {Server,reader,Reader} -> - io:format("~p Reader~n", [Reader]), - fill_sendq_loop(Server, Client, Reader) + receive + {Server,client,Client} -> + io:format("~p Client~n", [Client]), + receive + {Server,reader,Reader} -> + io:format("~p Reader~n", [Reader]), + fill_sendq_loop(Server, Client, Reader) end end. fill_sendq_loop(Server, Client, Reader) -> %% Master %% - receive {Server,send} -> + receive + {Server,send} -> fill_sendq_loop(Server, Client, Reader) after 2000 -> %% Send queue full, sender blocked -> close client. io:format("Send timeout, closing Client...~n", []), Client ! {self(),close}, - receive {Server,[{error,closed}]} -> - io:format("Got server closed.~n"), - receive {Reader,[{error,closed}]} -> - io:format - ("Got reader closed.~n"), - ok - after 3000 -> - ct:fail({timeout,{closed,reader}}) - end; - {Reader,[{error,closed}]} -> - io:format("Got reader closed.~n"), - receive {Server,[{error,closed}]} -> - io:format("Got server closed~n"), - ok - after 3000 -> - ct:fail({timeout,{closed,server}}) - end - after 3000 -> - ct:fail({timeout,{closed,[server,reader]}}) - end + receive + {Server,[{error,closed}]} -> + io:format("Got server closed.~n"), + receive + {Reader,[{error,closed}]} -> + io:format("Got reader closed.~n"), + ok + after 3000 -> + ct:fail({timeout,{closed,reader}}) + end; + {Reader,[{error,closed}]} -> + io:format("Got reader closed.~n"), + receive + {Server,[{error,closed}]} -> + io:format("Got server closed~n"), + ok + after 3000 -> + ct:fail({timeout,{closed,server}}) + end + after 3000 -> + ct:fail({timeout,{closed,[server,reader]}}) + end end. fill_sendq_srv(L, Master) -> @@ -1910,6 +1918,232 @@ so_priority(Config) when is_list(Config) -> end end. + + +%% IP_RECVTOS and IP_RECVTCLASS for IP_PKTOPTIONS +%% does not seem to be implemented in Linux until kernel 3.1 +%% +%% It seems pktoptions does not return valid values +%% for IPv4 connect sockets. On the accept socket +%% we get valid values, but on the connect socket we get +%% the default values for TOS and TTL. +%% +%% Therefore the argument CheckConnect that enables +%% checking the returned values for the connect socket. +%% It is only used for recvtclass that is an IPv6 option +%% and there we get valid values from both socket ends. + +recvtos(_Config) -> + test_pktoptions( + inet, [{recvtos,tos,96}], + fun recvtos_ok/2, + false). + +recvtosttl(_Config) -> + test_pktoptions( + inet, [{recvtos,tos,96},{recvttl,ttl,33}], + fun (OSType, OSVer) -> + recvtos_ok(OSType, OSVer) andalso recvttl_ok(OSType, OSVer) + end, + false). + +recvttl(_Config) -> + test_pktoptions( + inet, [{recvttl,ttl,33}], + fun recvttl_ok/2, + false). + +recvtclass(_Config) -> + {ok,IFs} = inet:getifaddrs(), + case + [Name || + {Name,Opts} <- IFs, + lists:member({addr,{0,0,0,0,0,0,0,1}}, Opts)] + of + [_] -> + test_pktoptions( + inet6, [{recvtclass,tclass,224}], + fun recvtclass_ok/2, + true); + [] -> + {skip,{ipv6_not_supported,IFs}} + end. + +%% These version numbers are the highest noted in daily tests +%% where the test fails for a plausible reason, so +%% skip on that platform. +%% +%% On newer versions it might be fixed, but we'll see about that +%% when machines with newer versions gets installed... +%% If the test still fails for a plausible reason these +%% version numbers simply should be increased. +%% Or maybe we should change to only test on known good +%% platforms - change {unix,_} to false? + +%% pktoptions is not supported for IPv4 +recvtos_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,4,0}); +recvtos_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {17,6,0}); +%% Using the option returns einval, so it is not implemented. +recvtos_ok({unix,freebsd}, OSVer) -> not semver_lt(OSVer, {11,2,0}); +recvtos_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0}); +%% Does not return any value - not implemented for pktoptions +recvtos_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {3,1,0}); +%% +recvtos_ok({unix,_}, _) -> true; +recvtos_ok(_, _) -> false. + +%% pktoptions is not supported for IPv4 +recvttl_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,4,0}); +recvttl_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {17,6,0}); +%% Using the option returns einval, so it is not implemented. +recvttl_ok({unix,freebsd}, OSVer) -> not semver_lt(OSVer, {11,2,0}); +recvttl_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0}); +%% +recvttl_ok({unix,linux}, _) -> true; +recvttl_ok({unix,_}, _) -> true; +recvttl_ok(_, _) -> false. + +%% pktoptions is not supported for IPv6 +recvtclass_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,4,0}); +recvtclass_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {17,6,0}); +recvtclass_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0}); +%% Using the option returns einval, so it is not implemented. +recvtclass_ok({unix,freebsd}, OSVer) -> not semver_lt(OSVer, {11,2,0}); +%% Does not return any value - not implemented for pktoptions +recvtclass_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {3,1,0}); +%% +recvtclass_ok({unix,_}, _) -> true; +recvtclass_ok(_, _) -> false. + +semver_lt({X1,Y1,Z1}, {X2,Y2,Z2}) -> + if + X1 > X2 -> false; + X1 < X2 -> true; + Y1 > Y2 -> false; + Y1 < Y2 -> true; + Z1 > Z2 -> false; + Z1 < Z2 -> true; + true -> false + end; +semver_lt(_, {_,_,_}) -> false. + +test_pktoptions(Family, Spec, OSFilter, CheckConnect) -> + OSType = os:type(), + OSVer = os:version(), + case OSFilter(OSType, OSVer) of + true -> + io:format("Os: ~p, ~p~n", [OSType,OSVer]), + test_pktoptions(Family, Spec, CheckConnect, OSType, OSVer); + false -> + {skip,{not_supported_for_os_version,{OSType,OSVer}}} + end. +%% +test_pktoptions(Family, Spec, CheckConnect, OSType, OSVer) -> + Timeout = 5000, + RecvOpts = [RecvOpt || {RecvOpt,_,_} <- Spec], + TrueRecvOpts = [{RecvOpt,true} || {RecvOpt,_,_} <- Spec], + FalseRecvOpts = [{RecvOpt,false} || {RecvOpt,_,_} <- Spec], + Opts = [Opt || {_,Opt,_} <- Spec], + OptsVals = [{Opt,Val} || {_,Opt,Val} <- Spec], + Address = + case Family of + inet -> + {127,0,0,1}; + inet6 -> + {0,0,0,0,0,0,0,1} + end, + %% + %% Set RecvOpts on listen socket + {ok,L} = + gen_tcp:listen( + 0, + [Family,binary,{active,false},{send_timeout,Timeout} + |TrueRecvOpts]), + {ok,P} = inet:port(L), + {ok,TrueRecvOpts} = inet:getopts(L, RecvOpts), + {ok,OptsValsDefault} = inet:getopts(L, Opts), + %% + %% Set RecvOpts and Option values on connect socket + {ok,S2} = + gen_tcp:connect( + Address, P, + [Family,binary,{active,false},{send_timeout,Timeout} + |TrueRecvOpts ++ OptsVals], + Timeout), + {ok,TrueRecvOpts} = inet:getopts(S2, RecvOpts), + {ok,OptsVals} = inet:getopts(S2, Opts), + %% + %% Accept socket inherits the options from listen socket + {ok,S1} = gen_tcp:accept(L, Timeout), + {ok,TrueRecvOpts} = inet:getopts(S1, RecvOpts), + {ok,OptsValsDefault} = inet:getopts(S1, Opts), +%%% %% +%%% %% Handshake +%%% ok = gen_tcp:send(S1, <<"hello">>), +%%% {ok,<<"hello">>} = gen_tcp:recv(S2, 5, Timeout), +%%% ok = gen_tcp:send(S2, <<"hi">>), +%%% {ok,<<"hi">>} = gen_tcp:recv(S1, 2, Timeout), + %% + %% Verify returned remote options + {ok,[{pktoptions,OptsVals1}]} = inet:getopts(S1, [pktoptions]), + {ok,[{pktoptions,OptsVals2}]} = inet:getopts(S2, [pktoptions]), + (Result1 = sets_eq(OptsVals1, OptsVals)) + orelse io:format( + "Accept differs: ~p neq ~p~n", [OptsVals1,OptsVals]), + (Result2 = sets_eq(OptsVals2, OptsValsDefault)) + orelse io:format( + "Connect differs: ~p neq ~p~n", + [OptsVals2,OptsValsDefault]), + %% + ok = gen_tcp:close(S2), + ok = gen_tcp:close(S1), + %% + %% + %% Clear RecvOpts on listen socket and set Option values + ok = inet:setopts(L, FalseRecvOpts ++ OptsVals), + {ok,FalseRecvOpts} = inet:getopts(L, RecvOpts), + {ok,OptsVals} = inet:getopts(L, Opts), + %% + %% Set RecvOpts on connecting socket + %% + {ok,S4} = + gen_tcp:connect( + Address, P, + [Family,binary,{active,false},{send_timeout,Timeout} + |TrueRecvOpts], + Timeout), + {ok,TrueRecvOpts} = inet:getopts(S4, RecvOpts), + {ok,OptsValsDefault} = inet:getopts(S4, Opts), + %% + %% Accept socket inherits the options from listen socket + {ok,S3} = gen_tcp:accept(L, Timeout), + {ok,FalseRecvOpts} = inet:getopts(S3, RecvOpts), + {ok,OptsVals} = inet:getopts(S3, Opts), + %% + %% Verify returned remote options + {ok,[{pktoptions,[]}]} = inet:getopts(S3, [pktoptions]), + {ok,[{pktoptions,OptsVals4}]} = inet:getopts(S4, [pktoptions]), + (Result3 = sets_eq(OptsVals4, OptsVals)) + orelse io:format( + "Accept2 differs: ~p neq ~p~n", [OptsVals4,OptsVals]), + %% + ok = gen_tcp:close(S4), + ok = gen_tcp:close(S3), + ok = gen_tcp:close(L), + (Result1 and ((not CheckConnect) or (Result2 and Result3))) + orelse + exit({failed, + [{OptsVals1,OptsVals4,OptsVals}, + {OptsVals2,OptsValsDefault}], + {OSType,OSVer}}), +%% exit({{OSType,OSVer},success}), % In search for the truth + ok. + +sets_eq(L1, L2) -> + lists:sort(L1) == lists:sort(L2). + + + %% Accept test utilities (suites are below) millis() -> @@ -2201,7 +2435,7 @@ wait_until_accepting(Proc,0) -> exit({timeout_waiting_for_accepting,Proc}); wait_until_accepting(Proc,N) -> case process_info(Proc,current_function) of - {current_function,{prim_inet,accept0,2}} -> + {current_function,{prim_inet,accept0,3}} -> case process_info(Proc,status) of {status,waiting} -> ok; @@ -3052,3 +3286,143 @@ otp_13939(Config) when is_list(Config) -> exit(Pid, normal), ct:fail("Server process blocked on send.") end. + +otp_12242(Config) when is_list(Config) -> + case os:type() of + {win32,_} -> + %% Even if we set sndbuf and recbuf to small sizes + %% Windows either happily accepts to send GBytes of data + %% in no time, so the second send below that is supposed + %% to time out just succedes, or the first send that + %% is supposed to fill the inet_drv I/O queue and + %% start waiting for when more data can be sent + %% instead sends all data but suffers a send + %% failure that closes the socket + {skipped,backpressure_broken_on_win32}; + _ -> + %% Find the IPv4 address of an up and running interface + %% that is not loopback nor pointtopoint + {ok,IFList} = inet:getifaddrs(), + ct:pal("IFList ~p~n", [IFList]), + case + lists:flatten( + [lists:filtermap( + fun ({addr,Addr}) when tuple_size(Addr) =:= 4 -> + {true,Addr}; + (_) -> + false + end, Opts) + || {_,Opts} <- IFList, + case lists:keyfind(flags, 1, Opts) of + {_,Flags} -> + lists:member(up, Flags) + andalso + lists:member(running, Flags) + andalso + not lists:member(loopback, Flags) + andalso + not lists:member(pointtopoint, Flags); + false -> + false + end]) + of + [Addr|_] -> + otp_12242(Addr); + Other -> + {skipped,{no_external_address,Other}} + end + end; +%% +otp_12242(Addr) when tuple_size(Addr) =:= 4 -> + ct:timetrap(30000), + ct:pal("Using address ~p~n", [Addr]), + Bufsize = 16 * 1024, + Datasize = 128 * 1024 * 1024, % At least 1 s on GBit interface + Blob = binary:copy(<<$x>>, Datasize), + LOpts = + [{backlog,4},{reuseaddr,true},{ip,Addr}, + binary,{active,false}, + {recbuf,Bufsize},{sndbuf,Bufsize},{buffer,Bufsize}], + COpts = + [binary,{active,false},{ip,Addr}, + {linger,{true,1}}, % 1 s + {send_timeout,500}, + {recbuf,Bufsize},{sndbuf,Bufsize},{buffer,Bufsize}], + Dir = filename:dirname(code:which(?MODULE)), + {ok,ListenerNode} = + test_server:start_node( + ?UNIQ_NODE_NAME, slave, [{args,"-pa " ++ Dir}]), + Tester = self(), + Listener = + spawn( + ListenerNode, + fun () -> + {ok,L} = gen_tcp:listen(0, LOpts), + {ok,LPort} = inet:port(L), + Tester ! {self(),port,LPort}, + {ok,A} = gen_tcp:accept(L), + ok = gen_tcp:close(L), + receive + {Tester,stop} -> + ok = gen_tcp:close(A) + end + end), + ListenerMref = monitor(process, Listener), + LPort = receive {Listener,port,P} -> P end, + {ok,C} = gen_tcp:connect(Addr, LPort, COpts, infinity), + {ok,ReadCOpts} = inet:getopts(C, [recbuf,sndbuf,buffer]), + ct:pal("ReadCOpts ~p~n", [ReadCOpts]), + %% + %% Fill the buffers + ct:pal("Sending ~p bytes~n", [Datasize]), + ok = gen_tcp:send(C, Blob), + ct:pal("Sent ~p bytes~n", [Datasize]), + %% Spawn the Closer, + %% try to ensure that the close call is in progress + %% before the owner proceeds with sending + Owner = self(), + {_Closer,CloserMref} = + spawn_opt( + fun () -> + Owner ! {tref, erlang:start_timer(50, Owner, closing)}, + ct:pal("Calling gen_tcp:close(C)~n"), + try gen_tcp:close(C) of + Result -> + ct:pal("gen_tcp:close(C) -> ~p~n", [Result]), + ok = Result + catch + Class:Reason:Stacktrace -> + ct:pal( + "gen_tcp:close(C) >< ~p:~p~n ~p~n", + [Class,Reason,Stacktrace]), + erlang:raise(Class, Reason, Stacktrace) + end + end, [link,monitor]), + receive + {tref,Tref} -> + receive {timeout,Tref,_} -> ok end, + ct:pal("Sending ~p bytes again~n", [Datasize]), + %% Now should the close be in progress... + %% All buffers are full, remote end is not reading, + %% and the send timeout is 1 s so this will timeout: + {error,timeout} = gen_tcp:send(C, Blob), + ct:pal("Sending ~p bytes again timed out~n", [Datasize]), + ok = inet:setopts(C, [{send_timeout,10000}]), + %% There is a hidden timeout here. Port close is sampled + %% every 5 s by prim_inet:send_recv_reply. + %% Linger is 3 s so the Closer will finish this send: + ct:pal("Sending ~p bytes with 10 s timeout~n", [Datasize]), + {error,closed} = gen_tcp:send(C, Blob), + ct:pal("Sending ~p bytes with 10 s timeout was closed~n", + [Datasize]), + normal = wait(CloserMref), + ct:pal("The Closer has exited~n"), + Listener ! {Tester,stop}, + receive {'DOWN',ListenerMref,_,_,_} -> ok end, + ct:pal("The Listener has exited~n"), + test_server:stop_node(ListenerNode), + ok + end. + +wait(Mref) -> + receive {'DOWN',Mref,_,_,Reason} -> Reason end. diff --git a/lib/kernel/test/gen_udp_SUITE.erl b/lib/kernel/test/gen_udp_SUITE.erl index aa616d43d6..af9985de45 100644 --- a/lib/kernel/test/gen_udp_SUITE.erl +++ b/lib/kernel/test/gen_udp_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1998-2017. All Rights Reserved. +%% Copyright Ericsson AB 1998-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -34,8 +34,9 @@ -export([init_per_testcase/2, end_per_testcase/2]). -export([send_to_closed/1, active_n/1, - buffer_size/1, binary_passive_recv/1, bad_address/1, + buffer_size/1, binary_passive_recv/1, max_buffer_size/1, bad_address/1, read_packets/1, open_fd/1, connect/1, implicit_inet6/1, + recvtos/1, recvtosttl/1, recvttl/1, recvtclass/1, local_basic/1, local_unbound/1, local_fdopen/1, local_fdopen_unbound/1, local_abstract/1]). @@ -44,9 +45,10 @@ suite() -> {timetrap,{minutes,1}}]. all() -> - [send_to_closed, buffer_size, binary_passive_recv, + [send_to_closed, buffer_size, binary_passive_recv, max_buffer_size, bad_address, read_packets, open_fd, connect, implicit_inet6, active_n, + recvtos, recvtosttl, recvttl, recvtclass, {group, local}]. groups() -> @@ -237,6 +239,14 @@ buffer_size_server_recv(Socket, IP, Port, Cnt) -> end. +%%------------------------------------------------------------- +%% OTP-15206: Keep buffer small for udp +%%------------------------------------------------------------- +max_buffer_size(Config) when is_list(Config) -> + {ok, Socket} = gen_udp:open(0, [binary]), + ok = inet:setopts(Socket,[{recbuf, 1 bsl 20}]), + {ok, [{buffer, 65536}]} = inet:getopts(Socket,[buffer]), + gen_udp:close(Socket). %%------------------------------------------------------------- %% OTP-3823 gen_udp:recv does not return address in binary mode @@ -288,58 +298,56 @@ bad_address(Config) when is_list(Config) -> %% %% Starts a slave node that on command sends a bunch of messages %% to our UDP port. The receiving process just receives and -%% ignores the incoming messages, but counts them. -%% A tracing process traces the receiving process for -%% 'receive' and scheduling events. From the trace, -%% message contents is verified; and, how many messages -%% are received per in/out scheduling, which should be -%% the same as the read_packets parameter. -%% -%% What happens on the SMP emulator remains to be seen... -%% +%% ignores the incoming messages. +%% A tracing process traces the receiving port for +%% 'send' and scheduling events. From the trace, +%% how many messages are received per in/out scheduling, +%% which should never be more than the read_packet parameter. %% OTP-6249 UDP option for number of packet reads. read_packets(Config) when is_list(Config) -> - case erlang:system_info(smp_support) of - false -> - read_packets_1(); - true -> - %% We would need some new sort of tracing to test this - %% option reliably in an SMP emulator. - {skip,"SMP emulator"} - end. - -read_packets_1() -> N1 = 5, - N2 = 7, + N2 = 1, + Msgs = 30000, {ok,R} = gen_udp:open(0, [{read_packets,N1}]), {ok,RP} = inet:port(R), {ok,Node} = start_node(gen_udp_SUITE_read_packets), Die = make_ref(), - Loop = erlang:spawn_link(fun () -> infinite_loop(Die) end), %% - Msgs1 = [erlang:integer_to_list(M) || M <- lists:seq(1, N1*3)], - [V1|_] = read_packets_test(R, RP, Msgs1, Node), + {V1, Trace1} = read_packets_test(R, RP, Msgs, Node), {ok,[{read_packets,N1}]} = inet:getopts(R, [read_packets]), %% ok = inet:setopts(R, [{read_packets,N2}]), - Msgs2 = [erlang:integer_to_list(M) || M <- lists:seq(1, N2*3)], - [V2|_] = read_packets_test(R, RP, Msgs2, Node), + {V2, Trace2} = read_packets_test(R, RP, Msgs, Node), {ok,[{read_packets,N2}]} = inet:getopts(R, [read_packets]), %% stop_node(Node), - Mref = erlang:monitor(process, Loop), - Loop ! Die, - receive - {'DOWN',Mref,_,_, normal} -> - case {V1,V2} of - {N1,N2} -> - ok; - _ when V1 =/= N1, V2 =/= N2 -> - ok - end + ct:log("N1=~p, V1=~p vs N2=~p, V2=~p",[N1,V1,N2,V2]), + + dump_terms(Config, "trace1.terms", Trace2), + dump_terms(Config, "trace2.terms", Trace2), + + %% Because of the inherit racy-ness of the feature it is + %% hard to test that it behaves correctly. + %% Right now (OTP 21) a port task takes 5% of the + %% allotted port task reductions to execute, so + %% the max number of executions a port is allowed to + %% do before being re-scheduled is N * 20 + + if + V1 > (N1 * 20) -> + ct:fail("Got ~p msgs, max was ~p", [V1, N1]); + V2 > (N2 * 20) -> + ct:fail("Got ~p msgs, max was ~p", [V2, N2]); + true -> + ok end. +dump_terms(Config, Name, Terms) -> + FName = filename:join(proplists:get_value(priv_dir, Config),Name), + file:write_file(FName, term_to_binary(Terms)), + ct:log("Logged terms to ~s",[FName]). + infinite_loop(Die) -> receive Die -> @@ -350,7 +358,6 @@ infinite_loop(Die) -> end. read_packets_test(R, RP, Msgs, Node) -> - Len = length(Msgs), Receiver = self(), Tracer = spawn_link( @@ -375,24 +382,24 @@ read_packets_test(R, RP, Msgs, Node) -> [link,{priority,high}]), receive {Sender,{port,SP}} -> - erlang:trace(self(), true, - [running,'receive',{tracer,Tracer}]), + erlang:trace(R, true, + [running_ports,'send',{tracer,Tracer}]), erlang:yield(), Sender ! {Receiver,go}, - read_packets_recv(Len), - erlang:trace(self(), false, [all]), + read_packets_recv(Msgs), + erlang:trace(R, false, [all]), Tracer ! {Receiver,get_trace}, receive {Tracer,{trace,Trace}} -> - read_packets_verify(R, SP, Msgs, Trace) + {read_packets_verify(R, SP, Trace), Trace} end end. -read_packets_send(S, RP, [Msg|Msgs]) -> - ok = gen_udp:send(S, localhost, RP, Msg), - read_packets_send(S, RP, Msgs); -read_packets_send(_S, _RP, []) -> - ok. +read_packets_send(_S, _RP, 0) -> + ok; +read_packets_send(S, RP, Msgs) -> + ok = gen_udp:send(S, localhost, RP, "UDP FLOOOOOOD"), + read_packets_send(S, RP, Msgs - 1). read_packets_recv(0) -> ok; @@ -404,23 +411,24 @@ read_packets_recv(N) -> timeout end. -read_packets_verify(R, SP, Msg, Trace) -> - lists:reverse( - lists:sort(read_packets_verify(R, SP, Msg, Trace, 0))). - -read_packets_verify(R, SP, Msgs, [{trace,Self,OutIn,_}|Trace], M) - when Self =:= self(), OutIn =:= out; - Self =:= self(), OutIn =:= in -> - push(M, read_packets_verify(R, SP, Msgs, Trace, 0)); -read_packets_verify(R, SP, [Msg|Msgs], - [{trace,Self,'receive',{udp,R,{127,0,0,1},SP,Msg}} - |Trace], M) +read_packets_verify(R, SP, Trace) -> + [Max | _] = Pkts = lists:reverse(lists:sort(read_packets_verify(R, SP, Trace, 0))), + ct:pal("~p",[lists:sublist(Pkts,10)]), + Max. + +read_packets_verify(R, SP, [{trace,R,OutIn,_}|Trace], M) + when OutIn =:= out; OutIn =:= in -> + push(M, read_packets_verify(R, SP, Trace, 0)); +read_packets_verify(R, SP, [{trace, R,'receive',timeout}|Trace], M) -> + push(M, read_packets_verify(R, SP, Trace, 0)); +read_packets_verify(R, SP, + [{trace,R,'send',{udp,R,{127,0,0,1},SP,_Msg}, Self} | Trace], M) when Self =:= self() -> - read_packets_verify(R, SP, Msgs, Trace, M+1); -read_packets_verify(_R, _SP, [], [], M) -> + read_packets_verify(R, SP, Trace, M+1); +read_packets_verify(_R, _SP, [], M) -> push(M, []); -read_packets_verify(_R, _SP, Msgs, Trace, M) -> - ct:fail({read_packets_verify,mismatch,Msgs,Trace,M}). +read_packets_verify(_R, _SP, Trace, M) -> + ct:fail({read_packets_verify,mismatch,Trace,M}). push(0, Vs) -> Vs; @@ -566,6 +574,168 @@ active_n(Config) when is_list(Config) -> ok. + +recvtos(_Config) -> + test_recv_opts( + inet, [{recvtos,tos,96}], + fun recvtos_ok/2). + +recvtosttl(_Config) -> + test_recv_opts( + inet, [{recvtos,tos,96},{recvttl,ttl,33}], + fun (OSType, OSVer) -> + recvtos_ok(OSType, OSVer) andalso recvttl_ok(OSType, OSVer) + end). + +recvttl(_Config) -> + test_recv_opts( + inet, [{recvttl,ttl,33}], + fun recvttl_ok/2). + +recvtclass(_Config) -> + {ok,IFs} = inet:getifaddrs(), + case + [Name || + {Name,Opts} <- IFs, + lists:member({addr,{0,0,0,0,0,0,0,1}}, Opts)] + of + [_] -> + test_recv_opts( + inet6, [{recvtclass,tclass,224}], + fun recvtclass_ok/2); + [] -> + {skip,ipv6_not_supported,IFs} + end. + +%% These version numbers are just above the highest noted in daily tests +%% where the test fails for a plausible reason, that is the lowest +%% where we can expect that the test mighe succeed, so +%% skip on platforms lower than this. +%% +%% On newer versions it might be fixed, but we'll see about that +%% when machines with newer versions gets installed... +%% If the test still fails for a plausible reason these +%% version numbers simply should be increased. +%% Or maybe we should change to only test on known good platforms? + +%% Using the option returns einval, so it is not implemented. +recvtos_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {17,6,0}); +%% Using the option returns einval, so it is not implemented. +recvtos_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,4,0}); +%% Using the option returns einval, so it is not implemented. +recvtos_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0}); +%% +recvtos_ok({unix,_}, _) -> true; +recvtos_ok(_, _) -> false. + +recvttl_ok({unix,_}, _) -> true; +recvttl_ok(_, _) -> false. + +%% Using the option returns einval, so it is not implemented. +recvtclass_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {9,9,0}); +recvtclass_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {2,6,11}); +%% +recvtclass_ok({unix,_}, _) -> true; +recvtclass_ok(_, _) -> false. + +semver_lt({X1,Y1,Z1}, {X2,Y2,Z2}) -> + if + X1 > X2 -> false; + X1 < X2 -> true; + Y1 > Y2 -> false; + Y1 < Y2 -> true; + Z1 > Z2 -> false; + Z1 < Z2 -> true; + true -> false + end; +semver_lt(_, {_,_,_}) -> false. + +test_recv_opts(Family, Spec, OSFilter) -> + OSType = os:type(), + OSVer = os:version(), + case OSFilter(OSType, OSVer) of + true -> + io:format("Os: ~p, ~p~n", [OSType,OSVer]), + test_recv_opts(Family, Spec, OSType, OSVer); + false -> + {skip,{not_supported_for_os_version,{OSType,OSVer}}} + end. +%% +test_recv_opts(Family, Spec, _OSType, _OSVer) -> + Timeout = 5000, + RecvOpts = [RecvOpt || {RecvOpt,_,_} <- Spec], + TrueRecvOpts = [{RecvOpt,true} || {RecvOpt,_,_} <- Spec], + FalseRecvOpts = [{RecvOpt,false} || {RecvOpt,_,_} <- Spec], + Opts = [Opt || {_,Opt,_} <- Spec], + OptsVals = [{Opt,Val} || {_,Opt,Val} <- Spec], + TrueRecvOpts_OptsVals = TrueRecvOpts ++ OptsVals, + Addr = + case Family of + inet -> + {127,0,0,1}; + inet6 -> + {0,0,0,0,0,0,0,1} + end, + %% + {ok,S1} = + gen_udp:open(0, [Family,binary,{active,false}|TrueRecvOpts]), + {ok,P1} = inet:port(S1), + {ok,TrueRecvOpts} = inet:getopts(S1, RecvOpts), + ok = inet:setopts(S1, FalseRecvOpts), + {ok,FalseRecvOpts} = inet:getopts(S1, RecvOpts), + ok = inet:setopts(S1, TrueRecvOpts_OptsVals), + {ok,TrueRecvOpts_OptsVals} = inet:getopts(S1, RecvOpts ++ Opts), + %% + {ok,S2} = + gen_udp:open(0, [Family,binary,{active,true}|FalseRecvOpts]), + {ok,P2} = inet:port(S2), + {ok,FalseRecvOpts_OptsVals2} = inet:getopts(S2, RecvOpts ++ Opts), + OptsVals2 = FalseRecvOpts_OptsVals2 -- FalseRecvOpts, + %% + ok = gen_udp:send(S2, Addr, P1, <<"abcde">>), + ok = gen_udp:send(S1, Addr, P2, <<"fghij">>), + {ok,{_,P2,OptsVals3,<<"abcde">>}} = gen_udp:recv(S1, 0, Timeout), + verify_sets_eq(OptsVals3, OptsVals2), + receive + {udp,S2,_,P1,<<"fghij">>} -> + ok; + Other1 -> + exit({unexpected,Other1}) + after Timeout -> + exit(timeout) + end, + %% + ok = inet:setopts(S1, FalseRecvOpts), + {ok,FalseRecvOpts} = inet:getopts(S1, RecvOpts), + ok = inet:setopts(S2, TrueRecvOpts), + {ok,TrueRecvOpts} = inet:getopts(S2, RecvOpts), + %% + ok = gen_udp:send(S2, Addr, P1, <<"klmno">>), + ok = gen_udp:send(S1, Addr, P2, <<"pqrst">>), + {ok,{_,P2,<<"klmno">>}} = gen_udp:recv(S1, 0, Timeout), + receive + {udp,S2,_,P1,OptsVals4,<<"pqrst">>} -> + verify_sets_eq(OptsVals4, OptsVals); + Other2 -> + exit({unexpected,Other2}) + after Timeout -> + exit(timeout) + end, + ok = gen_udp:close(S1), + ok = gen_udp:close(S2), +%% exit({{OSType,OSVer},success}), % In search for the truth + ok. + +verify_sets_eq(L1, L2) -> + L = lists:sort(L1), + case lists:sort(L2) of + L -> + ok; + _ -> + exit({sets_neq,L1,L2}) + end. + + local_basic(_Config) -> SFile = local_filename(server), SAddr = {local,bin_filename(SFile)}, @@ -757,9 +927,9 @@ ok({ok,V}) -> V; ok(NotOk) -> try throw(not_ok) catch - Thrown -> + throw:Thrown:Stacktrace -> erlang:raise( - error, {Thrown, NotOk}, tl(erlang:get_stacktrace())) + error, {Thrown, NotOk}, tl(Stacktrace)) end. diff --git a/lib/kernel/test/global_SUITE.erl b/lib/kernel/test/global_SUITE.erl index 0e7b7adc47..8eab36e308 100644 --- a/lib/kernel/test/global_SUITE.erl +++ b/lib/kernel/test/global_SUITE.erl @@ -1383,7 +1383,7 @@ ring(Config) when is_list(Config) -> rpc_cast(Cp8, ?MODULE, single_node, [Time, Cp7, Config]), %% sleep to make the partitioned net ready - ct:sleep(Time - msec()), + sleep(Time - msec()), pong = net_adm:ping(Cp0), pong = net_adm:ping(Cp1), @@ -1466,7 +1466,7 @@ simple_ring(Config) when is_list(Config) -> rpc_cast(Cp5, ?MODULE, single_node, [Time, Cp4, Config]), %% sleep to make the partitioned net ready - ct:sleep(Time - msec()), + sleep(Time - msec()), pong = net_adm:ping(Cp0), pong = net_adm:ping(Cp1), @@ -1542,7 +1542,7 @@ line(Config) when is_list(Config) -> rpc_cast(Cp8, ?MODULE, single_node, [Time, Cp7, Config]), %% Sleep to make the partitioned net ready - ct:sleep(Time - msec()), + sleep(Time - msec()), pong = net_adm:ping(Cp0), pong = net_adm:ping(Cp1), @@ -1626,7 +1626,7 @@ simple_line(Config) when is_list(Config) -> rpc_cast(Cp5, ?MODULE, single_node, [Time, Cp4, Config]), %% sleep to make the partitioned net ready - ct:sleep(Time - msec()), + sleep(Time - msec()), pong = net_adm:ping(Cp0), pong = net_adm:ping(Cp1), @@ -3555,7 +3555,7 @@ single_node(Time, Node, Config) -> lists:foreach(fun(N) -> _ = erlang:disconnect_node(N) end, nodes()), ?UNTIL(get_known(node()) =:= [node()]), spawn(?MODULE, init_2, []), - ct:sleep(Time - msec()), + sleep(Time - msec()), net_adm:ping(Node). init_2() -> @@ -4009,13 +4009,6 @@ collect_nodes(N, Max) -> [Node | collect_nodes(N+1, Max)] end. -only_element(_E, []) -> - true; -only_element(E, [E|R]) -> - only_element(E, R); -only_element(_E, _) -> - false. - exit_p(Pid) -> Ref = erlang:monitor(process, Pid), Pid ! die, @@ -4038,6 +4031,11 @@ wait_for_exit_fast(Pid) -> ok end. +sleep(Time) when Time > 0 -> + ct:sleep(Time); +sleep(_Time) -> + ok. + check_everywhere(Nodes, Name, Config) -> ?UNTIL(begin case rpc:multicall(Nodes, global, whereis_name, [Name]) of @@ -4162,10 +4160,10 @@ rpc_cast(Node, Module, Function, Args, File) -> %% The emulator now ensures that the node has been removed from %% nodes(). -rpc_disconnect_node(Node, DisconnectedNode, _Config) -> - True = rpc:call(Node, erlang, disconnect_node, [DisconnectedNode]), - False = lists:member(DisconnectedNode, rpc:call(Node, erlang, nodes, [])), - {true, false} = {True, False}. +rpc_disconnect_node(Node, DisconnectedNode, Config) -> + true = rpc:call(Node, erlang, disconnect_node, [DisconnectedNode]), + ?UNTIL + (not lists:member(DisconnectedNode, rpc:call(Node, erlang, nodes, []))). %%% %%% Utility diff --git a/lib/kernel/test/heart_SUITE.erl b/lib/kernel/test/heart_SUITE.erl index 22db24de5f..f5ca6d0e1d 100644 --- a/lib/kernel/test/heart_SUITE.erl +++ b/lib/kernel/test/heart_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2016. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -168,7 +168,7 @@ reboot(Config) when is_list(Config) -> {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME), ok = rpc:call(Node, heart, set_cmd, - [atom_to_list(lib:progname()) ++ + [ct:get_progname() ++ " -noshell -heart " ++ name(Node) ++ "&"]), rpc:call(Node, init, reboot, []), receive @@ -203,7 +203,7 @@ node_start_immediately_after_crash_test(Config) when is_list(Config) -> [{"ERL_CRASH_DUMP_SECONDS", "0"}]), ok = rpc:call(Node, heart, set_cmd, - [atom_to_list(lib:progname()) ++ + [ct:get_progname() ++ " -noshell -heart " ++ name(Node) ++ "&"]), Mod = exhaust_atoms, @@ -254,7 +254,7 @@ node_start_soon_after_crash_test(Config) when is_list(Config) -> [{"ERL_CRASH_DUMP_SECONDS", "10"}]), ok = rpc:call(Node, heart, set_cmd, - [atom_to_list(lib:progname()) ++ + [ct:get_progname() ++ " -noshell -heart " ++ name(Node) ++ "&"]), Mod = exhaust_atoms, @@ -309,7 +309,7 @@ set_cmd(Config) when is_list(Config) -> clear_cmd(Config) when is_list(Config) -> {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME), ok = rpc:call(Node, heart, set_cmd, - [atom_to_list(lib:progname()) ++ + [ct:get_progname() ++ " -noshell -heart " ++ name(Node) ++ "&"]), rpc:call(Node, init, reboot, []), receive diff --git a/lib/kernel/test/inet_SUITE.erl b/lib/kernel/test/inet_SUITE.erl index ba0d075ef2..f436eafad3 100644 --- a/lib/kernel/test/inet_SUITE.erl +++ b/lib/kernel/test/inet_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2017. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -40,7 +40,8 @@ lookup_bad_search_option/1, getif/1, getif_ifr_name_overflow/1,getservbyname_overflow/1, getifaddrs/1, - parse_strict_address/1, simple_netns/1, simple_netns_open/1, + parse_strict_address/1, ipv4_mapped_ipv6_address/1, + simple_netns/1, simple_netns_open/1, simple_bind_to_device/1, simple_bind_to_device_open/1]). -export([get_hosts/1, get_ipv6_hosts/1, parse_hosts/1, parse_address/1, @@ -667,6 +668,26 @@ parse_strict_address(Config) when is_list(Config) -> {ok, {3089,3106,23603,50240,0,0,119,136}} = inet:parse_strict_address("c11:0c22:5c33:c440::077:0088"). +ipv4_mapped_ipv6_address(Config) when is_list(Config) -> + {D1,D2,D3,D4} = IPv4Address = + {rand:uniform(256) - 1, + rand:uniform(256) - 1, + rand:uniform(256) - 1, + rand:uniform(256) - 1}, + E7 = (D1 bsl 8) bor D2, + E8 = (D3 bsl 8) bor D4, + io:format("IPv4Address: ~p.~n", [IPv4Address]), + {0,0,0,0,0,65535,E7,E8} = inet:ipv4_mapped_ipv6_address(IPv4Address), + IPv6Address = + {rand:uniform(65536) - 1, + rand:uniform(65536) - 1, + rand:uniform(65536) - 1, + rand:uniform(65536) - 1, + rand:uniform(65536) - 1, + rand:uniform(65536) - 1, E7, E8}, + IPv4Address = inet:ipv4_mapped_ipv6_address(IPv6Address), + ok. + t_gethostnative(Config) when is_list(Config) -> %% this will result in 26 bytes sent which causes problem in Windows %% if the port-program has not assured stdin to be read in BINARY mode @@ -1039,28 +1060,26 @@ getservbyname_overflow(Config) when is_list(Config) -> getifaddrs(Config) when is_list (Config) -> {ok,IfAddrs} = inet:getifaddrs(), io:format("IfAddrs = ~p.~n", [IfAddrs]), - case - {os:type(), - [If || - {If,Opts} <- IfAddrs, - lists:keymember(hwaddr, 1, Opts)]} of - {{unix,sunos},[]} -> ok; - {OT,[]} -> - ct:fail({should_have_hwaddr,OT}); - _ -> ok + case [If || {If,Opts} <- IfAddrs, lists:keymember(hwaddr, 1, Opts)] of + [] -> + case os:type() of + {unix,sunos} -> ok; + OT -> + ct:fail({should_have_hwaddr,OT}) + end; + [_|_] -> ok end, - Addrs = - [element(1, A) || A <- ifaddrs(IfAddrs)], + Addrs = ifaddrs(IfAddrs), io:format("Addrs = ~p.~n", [Addrs]), [check_addr(Addr) || Addr <- Addrs], ok. -check_addr({addr,Addr}) +check_addr(Addr) when tuple_size(Addr) =:= 8, element(1, Addr) band 16#FFC0 =:= 16#FE80 -> io:format("Addr: ~p link local; SKIPPED!~n", [Addr]), ok; -check_addr({addr,Addr}) -> +check_addr(Addr) -> io:format("Addr: ~p.~n", [Addr]), Ping = "ping", Pong = "pong", @@ -1076,78 +1095,86 @@ check_addr({addr,Addr}) -> ok = gen_tcp:close(S2), ok = gen_tcp:close(L). --record(ifopts, {name,flags,addrs=[],hwaddr}). - -ifaddrs([]) -> []; -ifaddrs([{If,Opts}|IOs]) -> - #ifopts{flags=F} = Ifopts = check_ifopts(Opts, #ifopts{name=If}), - case F of - {flags,Flags} -> - case lists:member(running, Flags) of - true -> Ifopts#ifopts.addrs; - false -> [] - end ++ ifaddrs(IOs); - undefined -> - ifaddrs(IOs) +ifaddrs(IfOpts) -> + IfMap = collect_ifopts(IfOpts), + ChkFun = + fun Self({{_,Flags} = Key, Opts}, ok) -> + Broadcast = lists:member(broadcast, Flags), + P2P = lists:member(pointtopoint, Flags), + case Opts of + [{addr,_},{netmask,_},{broadaddr,_}|Os] + when Broadcast -> + Self({Key, Os}, ok); + [{addr,_},{netmask,_},{dstaddr,_}|Os] + when P2P -> + Self({Key, Os}, ok); + [{addr,_},{netmask,_}|Os] -> + Self({Key, Os}, ok); + [{hwaddr,_}|Os] -> + Self({Key, Os}, ok); + [] -> + ok + end + end, + fold_ifopts(ChkFun, ok, IfMap), + AddrsFun = + fun ({{_,Flags}, Opts}, Acc) -> + case + lists:member(running, Flags) + andalso (not lists:member(pointtopoint, Flags)) + of + true -> + lists:reverse( + [Addr || {addr,Addr} <- Opts], + Acc); + false -> + Acc + end + end, + fold_ifopts(AddrsFun, [], IfMap). + +collect_ifopts(IfOpts) -> + collect_ifopts(IfOpts, #{}). +%% +collect_ifopts(IfOpts, IfMap) -> + case IfOpts of + [{If,[{flags,Flags}|Opts]}|IfOs] -> + Key = {If,Flags}, + case maps:is_key(Key, IfMap) of + true -> + ct:fail({unexpected_ifopts,IfOpts,IfMap}); + false -> + collect_ifopts(IfOs, IfMap, Opts, Key, []) + end; + [] -> + IfMap; + _ -> + ct:fail({unexpected_ifopts,IfOpts,IfMap}) + end. +%% +collect_ifopts(IfOpts, IfMap, Opts, Key, R) -> + case Opts of + [{flags,_}|_] -> + {If,_} = Key, + collect_ifopts( + [{If,Opts}|IfOpts], maps:put(Key, lists:reverse(R), IfMap)); + [OptVal|Os] -> + collect_ifopts(IfOpts, IfMap, Os, Key, [OptVal|R]); + [] -> + collect_ifopts(IfOpts, maps:put(Key, lists:reverse(R), IfMap)) end. -check_ifopts([], #ifopts{flags=F,addrs=Raddrs}=Ifopts) -> - Addrs = lists:reverse(Raddrs), - R = Ifopts#ifopts{addrs=Addrs}, - io:format("~p.~n", [R]), - %% See how we did... - {flags,Flags} = F, - case lists:member(broadcast, Flags) of - true -> - [case A of - {{addr,_},{netmask,_},{broadaddr,_}} -> - A; - {{addr,T},{netmask,_}} when tuple_size(T) =:= 8 -> - A - end || A <- Addrs]; - false -> - case lists:member(pointtopoint, Flags) of - true -> - [case A of - {{addr,_},{netmask,_},{dstaddr,_}} -> - A - end || A <- Addrs]; - false -> - [case A of - {{addr,_},{netmask,_}} -> - A - end || A <- Addrs] - end - end, - R; -check_ifopts([{flags,_}=F|Opts], #ifopts{flags=undefined}=Ifopts) -> - check_ifopts(Opts, Ifopts#ifopts{flags=F}); -check_ifopts([{flags,_}=F|Opts], #ifopts{flags=Flags}=Ifopts) -> - case F of - Flags -> - check_ifopts(Opts, Ifopts); - _ -> - ct:fail({multiple_flags,F,Ifopts}) - end; -check_ifopts( - [{addr,_}=A,{netmask,_}=N,{dstaddr,_}=D|Opts], - #ifopts{addrs=Addrs}=Ifopts) -> - check_ifopts(Opts, Ifopts#ifopts{addrs=[{A,N,D}|Addrs]}); -check_ifopts( - [{addr,_}=A,{netmask,_}=N,{broadaddr,_}=B|Opts], - #ifopts{addrs=Addrs}=Ifopts) -> - check_ifopts(Opts, Ifopts#ifopts{addrs=[{A,N,B}|Addrs]}); -check_ifopts( - [{addr,_}=A,{netmask,_}=N|Opts], - #ifopts{addrs=Addrs}=Ifopts) -> - check_ifopts(Opts, Ifopts#ifopts{addrs=[{A,N}|Addrs]}); -check_ifopts([{addr,_}=A|Opts], #ifopts{addrs=Addrs}=Ifopts) -> - check_ifopts(Opts, Ifopts#ifopts{addrs=[{A}|Addrs]}); -check_ifopts([{hwaddr,Hwaddr}=H|Opts], #ifopts{hwaddr=undefined}=Ifopts) - when is_list(Hwaddr) -> - check_ifopts(Opts, Ifopts#ifopts{hwaddr=H}); -check_ifopts([{hwaddr,_}=H|_], #ifopts{}=Ifopts) -> - ct:fail({multiple_hwaddrs,H,Ifopts}). +fold_ifopts(Fun, Acc, IfMap) -> + fold_ifopts(Fun, Acc, IfMap, maps:keys(IfMap)). +%% +fold_ifopts(Fun, Acc, IfMap, Keys) -> + case Keys of + [Key|Ks] -> + Opts = maps:get(Key, IfMap), + fold_ifopts(Fun, Fun({Key,Opts}, Acc), IfMap, Ks); + [] -> + Acc + end. %% Works just like lists:member/2, except that any {127,_,_,_} tuple %% matches any other {127,_,_,_}. We do this to handle Linux systems diff --git a/lib/kernel/test/inet_res_SUITE.erl b/lib/kernel/test/inet_res_SUITE.erl index 6691ad9c06..df6e48abae 100644 --- a/lib/kernel/test/inet_res_SUITE.erl +++ b/lib/kernel/test/inet_res_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2009-2016. All Rights Reserved. +%% Copyright Ericsson AB 2009-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -217,10 +217,10 @@ proxy_start(TC, {NS,P}) -> spawn_link( fun () -> try proxy_start(TC, NS, P, Parent, Tag) - catch C:X -> + catch C:X:Stacktrace -> io:format( "~w: ~w:~p ~p~n", - [self(),C,X,erlang:get_stacktrace()]) + [self(),C,X,Stacktrace]) end end), receive {started,Tag,Port} -> diff --git a/lib/kernel/test/inet_sockopt_SUITE.erl b/lib/kernel/test/inet_sockopt_SUITE.erl index ada9c2689c..27ff74e309 100644 --- a/lib/kernel/test/inet_sockopt_SUITE.erl +++ b/lib/kernel/test/inet_sockopt_SUITE.erl @@ -110,9 +110,14 @@ simple(Config) when is_list(Config) -> {S1,S2} = create_socketpair(Opt, Opt), {ok,Opt} = inet:getopts(S1,OptTags), {ok,Opt} = inet:getopts(S2,OptTags), - COpt = [{X,case X of nodelay -> false;_ -> Y end} || {X,Y} <- Opt], + NoPushOpt = case os:type() of + {unix, Osname} when Osname =:= linux; Osname =:= freebsd -> {nopush, true}; + {_,_} -> {nopush, false} + end, + COpt = [{X,case X of nodelay -> false;_ -> Y end} || {X,Y} <- [NoPushOpt|Opt]], + COptTags = [X || {X,_} <- COpt], inet:setopts(S1,COpt), - {ok,COpt} = inet:getopts(S1,OptTags), + {ok,COpt} = inet:getopts(S1,COptTags), {ok,Opt} = inet:getopts(S2,OptTags), gen_tcp:close(S1), gen_tcp:close(S2), diff --git a/lib/kernel/test/init_SUITE.erl b/lib/kernel/test/init_SUITE.erl index 2b59eb2bfe..6a006cdc01 100644 --- a/lib/kernel/test/init_SUITE.erl +++ b/lib/kernel/test/init_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2016. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -299,7 +299,7 @@ many_restarts() -> many_restarts(Config) when is_list(Config) -> {ok, Node} = loose_node:start(init_test, "", ?DEFAULT_TIMEOUT_SEC), - loop_restart(50,Node,rpc:call(Node,erlang,whereis,[error_logger])), + loop_restart(50,Node,rpc:call(Node,erlang,whereis,[logger])), loose_node:stop(Node), ok. @@ -316,13 +316,13 @@ loop_restart(N,Node,EHPid) -> ct:fail(not_stopping) end, ok = wait_for(30, Node, EHPid), - loop_restart(N-1,Node,rpc:call(Node,erlang,whereis,[error_logger])). + loop_restart(N-1,Node,rpc:call(Node,erlang,whereis,[logger])). wait_for(0,Node,_) -> loose_node:stop(Node), error; wait_for(N,Node,EHPid) -> - case rpc:call(Node, erlang, whereis, [error_logger]) of + case rpc:call(Node, erlang, whereis, [logger]) of Pid when is_pid(Pid), Pid =/= EHPid -> %% erlang:display(ok), ok; @@ -365,7 +365,9 @@ restart(Config) when is_list(Config) -> %% Ok, the node is up, now the real test test begins. erlang:monitor_node(Node, true), SysProcs0 = rpc:call(Node, ?MODULE, find_system_processes, []), - [InitPid, PurgerPid, LitCollectorPid, DirtyCodePid] = SysProcs0, + io:format("SysProcs0=~p~n", [SysProcs0]), + [InitPid, PurgerPid, LitCollectorPid, + DirtySigNPid, DirtySigHPid, DirtySigMPid] = SysProcs0, InitPid = rpc:call(Node, erlang, whereis, [init]), PurgerPid = rpc:call(Node, erlang, whereis, [erts_code_purger]), Procs = rpc:call(Node, erlang, processes, []), @@ -381,7 +383,9 @@ restart(Config) when is_list(Config) -> ok = wait_restart(30, Node), SysProcs1 = rpc:call(Node, ?MODULE, find_system_processes, []), - [InitPid1, PurgerPid1, LitCollectorPid1, DirtyCodePid1] = SysProcs1, + io:format("SysProcs1=~p~n", [SysProcs1]), + [InitPid1, PurgerPid1, LitCollectorPid1, + DirtySigNPid1, DirtySigHPid1, DirtySigMPid1] = SysProcs1, %% Still the same init process! InitPid1 = rpc:call(Node, erlang, whereis, [init]), @@ -394,20 +398,18 @@ restart(Config) when is_list(Config) -> PurgerP = pid_to_list(PurgerPid1), %% and same literal area collector process! - case LitCollectorPid of - undefined -> undefined = LitCollectorPid1; - _ -> - LitCollectorP = pid_to_list(LitCollectorPid), - LitCollectorP = pid_to_list(LitCollectorPid1) - end, - - %% and same dirty process code checker process! - case DirtyCodePid of - undefined -> undefined = DirtyCodePid1; - _ -> - DirtyCodeP = pid_to_list(DirtyCodePid), - DirtyCodeP = pid_to_list(DirtyCodePid1) - end, + LitCollectorP = pid_to_list(LitCollectorPid), + LitCollectorP = pid_to_list(LitCollectorPid1), + + %% and same normal dirty signal handler process! + DirtySigNP = pid_to_list(DirtySigNPid), + DirtySigNP = pid_to_list(DirtySigNPid1), + %% and same high dirty signal handler process! + DirtySigHP = pid_to_list(DirtySigHPid), + DirtySigHP = pid_to_list(DirtySigHPid1), + %% and same max dirty signal handler process! + DirtySigMP = pid_to_list(DirtySigMPid), + DirtySigMP = pid_to_list(DirtySigMPid1), NewProcs0 = rpc:call(Node, erlang, processes, []), NewProcs = NewProcs0 -- SysProcs1, @@ -433,7 +435,9 @@ restart(Config) when is_list(Config) -> -record(sys_procs, {init, code_purger, literal_collector, - dirty_proc_checker}). + dirty_sig_handler_normal, + dirty_sig_handler_high, + dirty_sig_handler_max}). find_system_processes() -> find_system_procs(processes(), #sys_procs{}). @@ -442,21 +446,32 @@ find_system_procs([], SysProcs) -> [SysProcs#sys_procs.init, SysProcs#sys_procs.code_purger, SysProcs#sys_procs.literal_collector, - SysProcs#sys_procs.dirty_proc_checker]; + SysProcs#sys_procs.dirty_sig_handler_normal, + SysProcs#sys_procs.dirty_sig_handler_high, + SysProcs#sys_procs.dirty_sig_handler_max]; find_system_procs([P|Ps], SysProcs) -> - case process_info(P, initial_call) of - {initial_call,{otp_ring0,start,2}} -> + case process_info(P, [initial_call, priority]) of + [{initial_call,{otp_ring0,start,2}},_] -> undefined = SysProcs#sys_procs.init, find_system_procs(Ps, SysProcs#sys_procs{init = P}); - {initial_call,{erts_code_purger,start,0}} -> + [{initial_call,{erts_code_purger,start,0}},_] -> undefined = SysProcs#sys_procs.code_purger, find_system_procs(Ps, SysProcs#sys_procs{code_purger = P}); - {initial_call,{erts_literal_area_collector,start,0}} -> + [{initial_call,{erts_literal_area_collector,start,0}},_] -> undefined = SysProcs#sys_procs.literal_collector, find_system_procs(Ps, SysProcs#sys_procs{literal_collector = P}); - {initial_call,{erts_dirty_process_code_checker,start,0}} -> - undefined = SysProcs#sys_procs.dirty_proc_checker, - find_system_procs(Ps, SysProcs#sys_procs{dirty_proc_checker = P}); + [{initial_call,{erts_dirty_process_signal_handler,start,0}}, + {priority,normal}] -> + undefined = SysProcs#sys_procs.dirty_sig_handler_normal, + find_system_procs(Ps, SysProcs#sys_procs{dirty_sig_handler_normal = P}); + [{initial_call,{erts_dirty_process_signal_handler,start,0}}, + {priority,high}] -> + undefined = SysProcs#sys_procs.dirty_sig_handler_high, + find_system_procs(Ps, SysProcs#sys_procs{dirty_sig_handler_high = P}); + [{initial_call,{erts_dirty_process_signal_handler,start,0}}, + {priority,max}] -> + undefined = SysProcs#sys_procs.dirty_sig_handler_max, + find_system_procs(Ps, SysProcs#sys_procs{dirty_sig_handler_max = P}); _ -> find_system_procs(Ps, SysProcs) end. diff --git a/lib/kernel/test/kernel_SUITE.erl b/lib/kernel/test/kernel_SUITE.erl index da56359294..3e5ed855b5 100644 --- a/lib/kernel/test/kernel_SUITE.erl +++ b/lib/kernel/test/kernel_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2016. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -30,14 +30,14 @@ -export([init_per_testcase/2, end_per_testcase/2]). %% Test cases must be exported. --export([app_test/1, appup_test/1]). +-export([app_test/1, appup_test/1, refc/1]). suite() -> [{ct_hooks,[ts_install_cth]}, {timetrap,{minutes,2}}]. all() -> - [app_test, appup_test]. + [app_test, appup_test, refc]. groups() -> []. @@ -163,3 +163,68 @@ check_appup([Vsn|Vsns],Instrs,Expected) -> end; check_appup([],_,_) -> ok. + +%%% Check that refc module handles the counters as expected +refc(_Config) -> + Enable = fun(Enable) -> erlang:system_flag(scheduler_wall_time, Enable) end, + IsOn = fun() -> undefined /= erlang:statistics(scheduler_wall_time) end, + Tester = self(), + Loop = fun Loop() -> + receive + die -> normal; + {apply, Bool} -> + Res = Enable(Bool), + Tester ! {self(), Res}, + Loop() + end + end, + + %% Counter should be 0 + false = Enable(false), + + false = Enable(true), + true = Enable(true), + true = Enable(false), + true = Enable(false), + + %% Counter should be 0 + false = IsOn(), + + P1 = spawn_link(Loop), + P1 ! {apply, true}, + receive {P1, R1} -> false = R1 end, + + %% P1 has turned it on counter should be one + true = IsOn(), + true = Enable(true), + true = Enable(false), + true = IsOn(), + + P1 ! {apply, false}, + receive {P1, R2} -> true = R2 end, + false = IsOn(), + + P1 ! {apply, true}, + receive {P1, R3} -> false = R3 end, + true = IsOn(), + true = Enable(false), + + + P1 ! die, + timer:sleep(100), + false = IsOn(), + false = Enable(false), + + P2 = spawn_link(Loop), + P2 ! {apply, true}, + receive {P2, R4} -> false = R4 end, + true = IsOn(), + P2 ! {apply, true}, + receive {P2, R5} -> true = R5 end, + true = IsOn(), + + P2 ! die, + timer:sleep(100), + false = IsOn(), + + ok. diff --git a/lib/kernel/test/kernel_bench.spec b/lib/kernel/test/kernel_bench.spec index 8de60dae31..4de133f21b 100644 --- a/lib/kernel/test/kernel_bench.spec +++ b/lib/kernel/test/kernel_bench.spec @@ -1 +1,2 @@ {groups,"../kernel_test",zlib_SUITE,[bench]}. +{groups,"../kernel_test",file_SUITE,[bench]}. diff --git a/lib/kernel/test/kernel_config_SUITE.erl b/lib/kernel/test/kernel_config_SUITE.erl index 9a4578917d..9207025a2c 100644 --- a/lib/kernel/test/kernel_config_SUITE.erl +++ b/lib/kernel/test/kernel_config_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2016. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -76,7 +76,7 @@ sync(Conf) when is_list(Conf) -> %% Reset wall_clock {T1,_} = erlang:statistics(wall_clock), io:format("~p~n", [{t1, T1}]), - Command = lists:concat([lib:progname(), + Command = lists:append([ct:get_progname(), " -detached -sname cp1 ", "-config ", Config, " -env ERL_CRASH_DUMP erl_crash_dump.cp1"]), diff --git a/lib/kernel/test/logger.cover b/lib/kernel/test/logger.cover new file mode 100644 index 0000000000..960bc0abff --- /dev/null +++ b/lib/kernel/test/logger.cover @@ -0,0 +1,14 @@ +%% -*- erlang -*- +{incl_mods,[error_logger, + logger, + logger_backend, + logger_config, + logger_disk_log_h, + logger_h_common, + logger_filters, + logger_formatter, + logger_server, + logger_simple_h, + logger_std_h, + logger_sup]}. + diff --git a/lib/kernel/test/logger.spec b/lib/kernel/test/logger.spec new file mode 100644 index 0000000000..1ab90b3e93 --- /dev/null +++ b/lib/kernel/test/logger.spec @@ -0,0 +1,11 @@ +%% -*-erlang-*- +{suites,"../kernel_test", [error_logger_SUITE, + error_logger_warn_SUITE, + logger_SUITE, + logger_disk_log_h_SUITE, + logger_env_var_SUITE, + logger_filters_SUITE, + logger_formatter_SUITE, + logger_legacy_SUITE, + logger_simple_h_SUITE, + logger_std_h_SUITE]}. diff --git a/lib/kernel/test/logger_SUITE.erl b/lib/kernel/test/logger_SUITE.erl new file mode 100644 index 0000000000..d831d0d108 --- /dev/null +++ b/lib/kernel/test/logger_SUITE.erl @@ -0,0 +1,1330 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). + +-define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++ + ":"++integer_to_list(?LINE)). +-define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}). +-define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]). + +-define(MY_LOC(N),#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY}, + file=>?FILE, line=>?LINE-N}). + +-define(TRY(X), my_try(fun() -> X end)). + + +suite() -> + [{timetrap,{seconds,30}}, + {ct_hooks,[logger_test_lib]}]. + +init_per_suite(Config) -> + case logger:get_handler_config(?STANDARD_HANDLER) of + {ok,StdH} -> + ok = logger:remove_handler(?STANDARD_HANDLER), + [{default_handler,StdH}|Config]; + _ -> + Config + end. + +end_per_suite(Config) -> + case ?config(default_handler,Config) of + #{module:=HMod} = HConfig -> + ok = logger:add_handler(?STANDARD_HANDLER,HMod,HConfig); + _ -> + ok + end. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + PC = logger:get_primary_config(), + [{logger_config,PC}|Config]. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [start_stop, + add_remove_handler, + multiple_handlers, + add_remove_filter, + change_config, + set_formatter, + log_no_levels, + log_all_levels_api, + macros, + set_level, + set_module_level, + set_application_level, + cache_module_level, + format_report, + filter_failed, + handler_failed, + config_sanity_check, + log_failed, + emulator, + via_logger_process, + other_node, + compare_levels, + process_metadata, + app_config, + kernel_config]. + +start_stop(_Config) -> + S = whereis(logger), + true = is_pid(S), + ok. + +add_remove_handler(_Config) -> + register(callback_receiver,self()), + Hs0 = logger:get_handler_config(), + {error,{not_found,h1}} = logger:get_handler_config(h1), + ok = logger:add_handler(h1,?MODULE,#{}), + [add] = test_server:messages_get(), + Hs = logger:get_handler_config(), + Hs0 = lists:filter(fun(#{id:=h1}) -> false; (_) -> true end, Hs), + {ok,#{module:=?MODULE,level:=all,filters:=[],filter_default:=log}} = %defaults + logger:get_handler_config(h1), + ok = logger:set_handler_config(h1,filter_default,stop), + [changing_config] = test_server:messages_get(), + ?LOG_NOTICE("hello",[]), + ok = check_no_log(), + ok = logger:set_handler_config(h1,filter_default,log), + [changing_config] = test_server:messages_get(), + {ok,#{filter_default:=log}} = logger:get_handler_config(h1), + ?LOG_NOTICE("hello",[]), + ok = check_logged(notice,"hello",[],?MY_LOC(1)), + ok = logger:remove_handler(h1), + [remove] = test_server:messages_get(), + Hs0 = logger:get_handler_config(), + {error,{not_found,h1}} = logger:get_handler_config(h1), + {error,{not_found,h1}} = logger:remove_handler(h1), + logger:notice("hello",[]), + ok = check_no_log(), + ok. + +add_remove_handler(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +multiple_handlers(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}), + ok = logger:add_handler(h2,?MODULE,#{level=>error,filter_default=>log}), + ?LOG_ERROR("hello",[]), + ok = check_logged(error,"hello",[],?MY_LOC(1)), + ok = check_logged(error,"hello",[],?MY_LOC(2)), + ?LOG_NOTICE("hello",[]), + ok = check_logged(notice,"hello",[],?MY_LOC(1)), + ok = check_no_log(), + ok. + +multiple_handlers(cleanup,_Config) -> + logger:remove_handler(h1), + logger:remove_handler(h2), + ok. + +add_remove_filter(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}), + LF = {fun(Log,_) -> Log#{level=>error} end, []}, + ok = logger:add_primary_filter(lf,LF), + {error,{already_exist,lf}} = logger:add_primary_filter(lf,LF), + {error,{already_exist,lf}} = logger:add_primary_filter(lf,{fun(Log,_) -> + Log + end, []}), + ?LOG_NOTICE("hello",[]), + ok = check_logged(error,"hello",[],?MY_LOC(1)), + ok = check_no_log(), + + ok = logger:add_handler(h2,?MODULE,#{level=>notice,filter_default=>log}), + HF = {fun(#{level:=error}=Log,_) -> + Log#{level=>mylevel}; + (_,_) -> + ignore + end, + []}, + ok = logger:add_handler_filter(h1,hf,HF), + {error,{already_exist,hf}} = logger:add_handler_filter(h1,hf,HF), + {error,{already_exist,hf}} = logger:add_handler_filter(h1,hf,{fun(Log,_) -> + Log + end, []}), + ?LOG_NOTICE("hello",[]), + ok = check_logged(mylevel,"hello",[],?MY_LOC(1)), + ok = check_logged(error,"hello",[],?MY_LOC(2)), + + ok = logger:remove_primary_filter(lf), + {error,{not_found,lf}} = logger:remove_primary_filter(lf), + + ?LOG_NOTICE("hello",[]), + ok = check_logged(notice,"hello",[],?MY_LOC(1)), + ok = check_logged(notice,"hello",[],?MY_LOC(2)), + + ?LOG_ERROR("hello",[]), + ok = check_logged(mylevel,"hello",[],?MY_LOC(1)), + ok = check_logged(error,"hello",[],?MY_LOC(2)), + + ok = logger:remove_handler_filter(h1,hf), + {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf), + ?LOG_NOTICE("hello",[]), + ok = check_logged(notice,"hello",[],?MY_LOC(1)), + ok = check_logged(notice,"hello",[],?MY_LOC(2)), + + ?LOG_ERROR("hello",[]), + ok = check_logged(error,"hello",[],?MY_LOC(1)), + ok = check_logged(error,"hello",[],?MY_LOC(2)), + ok. + +add_remove_filter(cleanup,_Config) -> + logger:remove_primary_filter(lf), + logger:remove_handler(h1), + logger:remove_handler(h2), + ok. + +change_config(_Config) -> + %% Overwrite handler config - check that defaults are added + {error,{not_found,h1}} = logger:set_handler_config(h1,#{}), + ok = logger:add_handler(h1,?MODULE,#{level=>notice,custom=>custom}), + {ok,#{module:=?MODULE,level:=notice,filter_default:=log,custom:=custom}} = + logger:get_handler_config(h1), + register(callback_receiver,self()), + ok = logger:set_handler_config(h1,#{filter_default=>stop}), + [changing_config] = test_server:messages_get(), + {ok,#{module:=?MODULE,level:=all,filter_default:=stop}=C2} = + logger:get_handler_config(h1), + false = maps:is_key(custom,C2), + {error,fail} = logger:set_handler_config(h1,#{conf_call=>fun() -> {error,fail} end}), + {error,{attempting_syncronous_call_to_self,_}} = + logger:set_handler_config( + h1,#{conf_call=>fun() -> logger:set_handler_config(?MODULE,#{}) end}), + ok = + logger:set_handler_config( + h1,#{conf_call=>fun() -> logger:set_module_level(?MODULE,debug) end}), + {ok,C2} = logger:get_handler_config(h1), + + %% Change handler config: Single key + {error,fail} = logger:set_handler_config(h1,conf_call,fun() -> {error,fail} end), + ok = logger:set_handler_config(h1,custom,custom), + [changing_config] = test_server:messages_get(), + {ok,#{custom:=custom}=C3} = logger:get_handler_config(h1), + C2 = maps:remove(custom,C3), + + %% Change handler config: Map + ok = logger:update_handler_config(h1,#{custom=>new_custom}), + [changing_config] = test_server:messages_get(), + {ok,C4} = logger:get_handler_config(h1), + C4 = C3#{custom:=new_custom}, + + %% Change handler config: Id and module can not be changed + {error,{illegal_config_change,Old,New}} = + logger:set_handler_config(h1,id,newid), + %% Check that only the faulty field is included in return + [{id,h1}] = maps:to_list(Old), + [{id,newid}] = maps:to_list(New), + %% Check that both fields are included when both are changed + {error,{illegal_config_change, + #{id:=h1,module:=?MODULE}, + #{id:=newid,module:=newmodule}}} = + logger:set_handler_config(h1,#{id=>newid,module=>newmodule}), + + %% Change primary config: Single key + PConfig0 = logger:get_primary_config(), + ok = logger:set_primary_config(level,warning), + PConfig1 = logger:get_primary_config(), + PConfig1 = PConfig0#{level:=warning}, + + %% Change primary config: Map + ok = logger:update_primary_config(#{level=>error}), + PConfig2 = logger:get_primary_config(), + PConfig2 = PConfig1#{level:=error}, + + %% Overwrite primary config - check that defaults are added + ok = logger:set_primary_config(#{filter_default=>stop}), + #{level:=notice,filters:=[],filter_default:=stop}=PC1 = + logger:get_primary_config(), + 3 = maps:size(PC1), + %% Check that internal 'handlers' field has not been changed + MS = [{{{?HANDLER_KEY,'$1'},'_','_'},[],['$1']}], + HIds1 = lists:sort(ets:select(?LOGGER_TABLE,MS)), % dirty, internal data + HIds2 = lists:sort(logger:get_handler_ids()), + HIds1 = HIds2, + + %% Cleanup + ok = logger:set_primary_config(PConfig0), + [] = test_server:messages_get(), + + ok. + +change_config(cleanup,Config) -> + logger:remove_handler(h1), + PC = ?config(logger_config,Config), + logger:set_primary_config(PC), + ok. + +set_formatter(_Config) -> + {error,{not_found,h1}}=logger:set_handler_config(h1,formatter,{?MODULE,[]}), + ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}), + ok = logger:set_handler_config(h1,formatter,{?MODULE,[]}), + logger:notice("hello",[]), + receive + {_Log,#{formatter:={?MODULE,[]}}} -> + ok + after 500 -> + ct:fail({timeout,no_log,process_info(self(),messages)}) + end, + ok. + +set_formatter(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +log_no_levels(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>all,filter_default=>log}), + logger:notice(M1=?map_rep), + ok = check_logged(notice,M1,#{}), + + Levels = [emergency,alert,critical,error,warning,notice,info,debug], + ok = logger:set_primary_config(level,none), + [logger:Level(#{Level=>rep}) || Level <- Levels], + ok = check_no_log(), + + ok = logger:set_primary_config(level,all), + M2 = ?map_rep, + ?LOG_NOTICE(M2), + ok = check_logged(notice,M2,#{}), + + ok = logger:set_module_level(?MODULE,none), + ?LOG_EMERGENCY(?map_rep), + ?LOG_ALERT(?map_rep), + ?LOG_CRITICAL(?map_rep), + ?LOG_ERROR(?map_rep), + ?LOG_WARNING(?map_rep), + ?LOG_NOTICE(?map_rep), + ?LOG_INFO(?map_rep), + ?LOG_DEBUG(?map_rep), + ok = check_no_log(), + + ok = logger:unset_module_level(?MODULE), + logger:notice(M3=?map_rep), + ok = check_logged(notice,M3,#{}), + + ok = logger:set_handler_config(h1,level,none), + [logger:Level(#{Level=>rep}) || Level <- Levels], + ok = check_no_log(), + + ok. +log_no_levels(cleanup,_Config) -> + logger:remove_handler(h1), + logger:set_primary_config(level,notice), + logger:unset_module_level(?MODULE), + ok. + +log_all_levels_api(_Config) -> + ok = logger:set_primary_config(level,all), + ok = logger:add_handler(h1,?MODULE,#{level=>all,filter_default=>log}), + test_api(emergency), + test_api(alert), + test_api(critical), + test_api(error), + test_api(warning), + test_api(notice), + test_api(info), + test_api(debug), + test_log_function(emergency), + ok. + +log_all_levels_api(cleanup,_Config) -> + logger:remove_handler(h1), + logger:set_primary_config(level,notice), + ok. + +macros(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>all,filter_default=>log}), + test_macros(emergency), + test_log_macro(alert), + ok. + +macros(cleanup,_Config) -> + logger:remove_handler(h1), + logger:unset_module_level(?MODULE), + ok. + +set_level(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>all,filter_default=>log}), + logger:debug(?map_rep), + ok = check_no_log(), + logger:notice(M1=?map_rep), + ok = check_logged(notice,M1,#{}), + ok = logger:set_primary_config(level,debug), + logger:debug(M2=?map_rep), + ok = check_logged(debug,M2,#{}), + ok. + +set_level(cleanup,_Config) -> + logger:remove_handler(h1), + logger:set_primary_config(level,notice), + ok. + +set_module_level(_Config) -> + [] = logger:get_module_level([?MODULE,other]), + [] = logger:get_module_level(?MODULE), + [] = logger:get_module_level(), + + ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}), + {error,{invalid_level,bad}} = logger:set_module_level(?MODULE,bad), + {error,{not_a_list_of_modules,{bad}}} = + logger:set_module_level({bad},warning), + {error,{not_a_list_of_modules,[{bad}]}} = + logger:set_module_level([{bad}],warning), + ok = logger:set_module_level(?MODULE,warning), + [{?MODULE,warning}] = logger:get_module_level([?MODULE,other]), + [{?MODULE,warning}] = logger:get_module_level(?MODULE), + [{?MODULE,warning}] = logger:get_module_level(), + logger:notice(?map_rep,?MY_LOC(0)), + ok = check_no_log(), + logger:warning(M1=?map_rep,?MY_LOC(0)), + ok = check_logged(warning,M1,?MY_LOC(1)), + ok = logger:set_module_level(?MODULE,notice), + [{?MODULE,notice}] = logger:get_module_level([?MODULE,other]), + [{?MODULE,notice}] = logger:get_module_level(?MODULE), + [{?MODULE,notice}] = logger:get_module_level(), + logger:notice(M2=?map_rep,?MY_LOC(0)), + ok = check_logged(notice,M2,?MY_LOC(1)), + + {error,{not_a_list_of_modules,{bad}}} = logger:unset_module_level({bad}), + {error,{not_a_list_of_modules,[{bad}]}} = logger:unset_module_level([{bad}]), + ok = logger:unset_module_level(?MODULE), + [] = logger:get_module_level([?MODULE,other]), + [] = logger:get_module_level(?MODULE), + [] = logger:get_module_level(), + + ok = logger:set_module_level([m1,m2,m3],notice), + [{m1,notice},{m2,notice},{m3,notice}] = logger:get_module_level(), + ok = logger:unset_module_level(m2), + [{m1,notice},{m3,notice}] = logger:get_module_level(), + ok = logger:unset_module_level(), + [] = logger:get_module_level(), + + ok. + +set_module_level(cleanup,_Config) -> + logger:remove_handler(h1), + logger:unset_module_level(?MODULE), + ok. + +set_application_level(_Config) -> + + {error,{not_loaded,mnesia}} = logger:set_application_level(mnesia, warning), + {error,{not_loaded,mnesia}} = logger:unset_application_level(mnesia), + + case application:load(mnesia) of + ok -> + {ok, Modules} = application:get_key(mnesia, modules), + [] = logger:get_module_level(Modules), + + {error,{invalid_level,warn}} = + logger:set_application_level(mnesia, warn), + + ok = logger:set_application_level(mnesia, debug), + DebugModules = lists:sort([{M,debug} || M <- Modules]), + DebugModules = lists:sort(logger:get_module_level(Modules)), + + ok = logger:set_application_level(mnesia, warning), + + WarnModules = lists:sort([{M,warning} || M <- Modules]), + WarnModules = lists:sort(logger:get_module_level(Modules)), + + ok = logger:unset_application_level(mnesia), + [] = logger:get_module_level(Modules); + {error,{"no such file or directory","mnesia.app"}} -> + {skip, "Cannot load mnesia, does not exist"} + end. + +set_application_level(cleanup,_Config) -> + _ = logger:unset_application_level(mnesia), + _ = application:unload(mnesia), + ok. + +cache_module_level(_Config) -> + ok = logger:unset_module_level(?MODULE), + [] = ets:lookup(?LOGGER_TABLE,?MODULE), %dirty - add API in logger_config? + ?LOG_NOTICE(?map_rep), + %% Caching is done asynchronously, so wait a bit for the update + timer:sleep(100), + [_] = ets:lookup(?LOGGER_TABLE,?MODULE), %dirty - add API in logger_config? + ok = logger:unset_module_level(?MODULE), + [] = ets:lookup(?LOGGER_TABLE,?MODULE), %dirty - add API in logger_config? + ok. + +cache_module_level(cleanup,_Config) -> + logger:unset_module_level(?MODULE), + ok. + +format_report(_Config) -> + {"~ts",["string"]} = logger:format_report("string"), + {"~tp",[term]} = logger:format_report(term), + {"~tp",[[]]} = logger:format_report([]), + {" ~tp: ~tp",[key,value]} = logger:format_report([{key,value}]), + KeyVals = [{key1,value1},{key2,"value2"},{key3,[]}], + KeyValRes = + {" ~tp: ~tp\n ~tp: ~ts\n ~tp: ~tp", + [key1,value1,key2,"value2",key3,[]]} = + logger:format_report(KeyVals), + KeyValRes = logger:format_report(maps:from_list(KeyVals)), + KeyValRes = logger:format_otp_report(#{label=>{?MODULE,test},report=>KeyVals}), + {" ~tp: ~tp\n ~tp: ~tp", + [label,{?MODULE,test},report,KeyVals]} = + logger:format_report(#{label=>{?MODULE,test},report=>KeyVals}), + + {" ~tp: ~tp\n ~tp",[key1,value1,term]} = + logger:format_report([{key1,value1},term]), + + {" ~tp: ~tp\n ~tp",[key1,value1,[]]} = + logger:format_report([{key1,value1},[]]), + + {"~tp",[[]]} = logger:format_report([[],[],[]]), + + ok. + +filter_failed(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}), + + %% Logger filters + {error,{invalid_filter,_}} = + logger:add_primary_filter(lf,{fun(_) -> ok end,args}), + ok = logger:add_primary_filter(lf, + {fun(_,_) -> + erlang:error({badmatch,b}) + end, + args}), + #{filters:=[_]} = logger:get_primary_config(), + ok = logger:notice(M1=?map_rep), + ok = check_logged(notice,M1,#{}), + {error,{not_found,lf}} = logger:remove_primary_filter(lf), + + ok = logger:add_primary_filter(lf,{fun(_,_) -> faulty_return end,args}), + #{filters:=[_]} = logger:get_primary_config(), + ok = logger:notice(M2=?map_rep), + ok = check_logged(notice,M2,#{}), + {error,{not_found,lf}} = logger:remove_primary_filter(lf), + + %% Handler filters + {error,{not_found,h0}} = + logger:add_handler_filter(h0,hf,{fun(_,_) -> ignore end,args}), + {error,{not_found,h0}} = logger:remove_handler_filter(h0,hf), + {error,{invalid_filter,_}} = + logger:add_handler_filter(h1,hf,{fun(_) -> ok end,args}), + ok = logger:add_handler_filter(h1,hf, + {fun(_,_) -> + erlang:error({badmatch,b}) + end, + args}), + {ok,#{filters:=[_]}} = logger:get_handler_config(h1), + ok = logger:notice(M3=?map_rep), + ok = check_logged(notice,M3,#{}), + {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf), + + ok = logger:add_handler_filter(h1,hf,{fun(_,_) -> faulty_return end,args}), + {ok,#{filters:=[_]}} = logger:get_handler_config(h1), + ok = logger:notice(M4=?map_rep), + ok = check_logged(notice,M4,#{}), + {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf), + + ok. + +filter_failed(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +handler_failed(_Config) -> + register(callback_receiver,self()), + {error,{invalid_id,1}} = logger:add_handler(1,?MODULE,#{}), + {error,{invalid_module,"nomodule"}} = logger:add_handler(h1,"nomodule",#{}), + {error,{invalid_config,bad}} = logger:add_handler(h1,?MODULE,bad), + {error,{invalid_filters,false}} = + logger:add_handler(h1,?MODULE,#{filters=>false}), + {error,{invalid_filter_default,true}} = + logger:add_handler(h1,?MODULE,#{filter_default=>true}), + {error,{invalid_formatter,[]}} = + logger:add_handler(h1,?MODULE,#{formatter=>[]}), + {error,{invalid_handler,_}} = logger:add_handler(h1,nomodule,#{filter_default=>log}), + logger:notice(?map_rep), + check_no_log(), + H1 = logger:get_handler_config(), + false = lists:search(fun(#{id:=h1}) -> true; (_) -> false end,H1), + {error,{not_found,h1}} = logger:remove_handler(h1), + + ok = logger:add_handler(h2,?MODULE, + #{filter_default => log, + log_call => fun() -> + erlang:error({badmatch,b}) + end}), + {error,{already_exist,h2}} = logger:add_handler(h2,othermodule,#{}), + [add] = test_server:messages_get(), + + logger:notice(?map_rep), + [remove] = test_server:messages_get(), + H2 = logger:get_handler_config(), + false = lists:search(fun(#{id:=h2}) -> true; (_) -> false end,H2), + {error,{not_found,h2}} = logger:remove_handler(h2), + + CallAddHandler = fun() -> logger:add_handler(h2,?MODULE,#{}) end, + CrashHandler = fun() -> erlang:error({badmatch,b}) end, + KillHandler = fun() -> exit(self(), die) end, + + {error,{handler_not_added,{attempting_syncronous_call_to_self,_}}} = + logger:add_handler(h1,?MODULE,#{add_call=>CallAddHandler}), + {error,{handler_not_added,{callback_crashed,_}}} = + logger:add_handler(h1,?MODULE,#{add_call=>CrashHandler}), + {error,{handler_not_added,{logger_process_exited,_,die}}} = + logger:add_handler(h1,?MODULE,#{add_call=>KillHandler}), + + check_no_log(), + ok = logger:add_handler(h1,?MODULE,#{}), + {error,{attempting_syncronous_call_to_self,_}} = + logger:set_handler_config(h1,#{conf_call=>CallAddHandler}), + {error,{callback_crashed,_}} = + logger:set_handler_config(h1,#{conf_call=>CrashHandler}), + {error,{logger_process_exited,_,die}} = + logger:set_handler_config(h1,#{conf_call=>KillHandler}), + + {error,{attempting_syncronous_call_to_self,_}} = + logger:set_handler_config(h1,conf_call,CallAddHandler), + {error,{callback_crashed,_}} = + logger:set_handler_config(h1,conf_call,CrashHandler), + {error,{logger_process_exited,_,die}} = + logger:set_handler_config(h1,conf_call,KillHandler), + + ok = logger:remove_handler(h1), + [add,remove] = test_server:messages_get(), + + check_no_log(), + ok = logger:add_handler(h1,?MODULE,#{rem_call=>CallAddHandler}), + ok = logger:remove_handler(h1), + ok = logger:add_handler(h1,?MODULE,#{rem_call=>CrashHandler}), + ok = logger:remove_handler(h1), + ok = logger:add_handler(h1,?MODULE,#{rem_call=>KillHandler}), + ok = logger:remove_handler(h1), + [add,add,add] = test_server:messages_get(), + + ok. + +handler_failed(cleanup,_Config) -> + logger:remove_handler(h1), + logger:remove_handler(h2), + ok. + +config_sanity_check(_Config) -> + %% Primary config + {error,{invalid_config,bad}} = logger:set_primary_config(bad), + {error,{invalid_filter_default,bad}} = + logger:set_primary_config(filter_default,bad), + {error,{invalid_level,bad}} = logger:set_primary_config(level,bad), + {error,{invalid_filters,bad}} = logger:set_primary_config(filters,bad), + {error,{invalid_filter,bad}} = logger:set_primary_config(filters,[bad]), + {error,{invalid_filter,{_,_}}} = + logger:set_primary_config(filters,[{id,bad}]), + {error,{invalid_filter,{_,{_,_}}}} = + logger:set_primary_config(filters,[{id,{bad,args}}]), + {error,{invalid_filter,{_,{_,_}}}} = + logger:set_primary_config(filters,[{id,{fun() -> ok end,args}}]), + {error,{invalid_primary_config,{bad,bad}}} = + logger:set_primary_config(bad,bad), + + %% Handler config + {error,{not_found,h1}} = logger:set_handler_config(h1,a,b), + ok = logger:add_handler(h1,?MODULE,#{}), + {error,{invalid_filter_default,bad}} = + logger:set_handler_config(h1,filter_default,bad), + {error,{invalid_level,bad}} = logger:set_handler_config(h1,level,bad), + {error,{invalid_filters,bad}} = logger:set_handler_config(h1,filters,bad), + {error,{invalid_filter,bad}} = logger:set_handler_config(h1,filters,[bad]), + {error,{invalid_filter,{_,_}}} = + logger:set_handler_config(h1,filters,[{id,bad}]), + {error,{invalid_filter,{_,{_,_}}}} = + logger:set_handler_config(h1,filters,[{id,{bad,args}}]), + {error,{invalid_filter,{_,{_,_}}}} = + logger:set_handler_config(h1,filters,[{id,{fun() -> ok end,args}}]), + {error,{invalid_formatter,bad}} = + logger:set_handler_config(h1,formatter,bad), + {error,{invalid_module,{bad}}} = + logger:set_handler_config(h1,formatter,{{bad},cfg}), + {error,{invalid_formatter_config,logger_formatter,bad}} = + logger:set_handler_config(h1,formatter,{logger_formatter,bad}), + {error,{invalid_formatter_config,logger_formatter,{bad,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter,#{bad=>bad}}), + {error,{invalid_formatter_template,logger_formatter,bad}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{template=>bad}}), + {error,{invalid_formatter_template,logger_formatter,[1]}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{template=>[1]}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{template=>[]}}), + {error,{invalid_formatter_config,logger_formatter,{single_line,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{single_line=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{single_line=>true}}), + {error,{invalid_formatter_config,logger_formatter,{legacy_header,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{legacy_header=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{legacy_header=>true}}), + {error,{invalid_formatter_config,logger_formatter,{report_cb,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{report_cb=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{report_cb=>fun(R) -> + {"~p",[R]} + end}}), + {error,{invalid_formatter_config,logger_formatter,{chars_limit,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{chars_limit=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{chars_limit=>unlimited}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{chars_limit=>4}}), + {error,{invalid_formatter_config,logger_formatter,{depth,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{depth=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{depth=>unlimited}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{depth=>4}}), + {error,{invalid_formatter_config,logger_formatter,{max_size,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{max_size=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{max_size=>unlimited}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{max_size=>4}}), + ok = logger:set_handler_config(h1,formatter,{module,config}), + {error,{callback_crashed,{error,{badmatch,3},[{?MODULE,check_config,1,_}]}}} = + logger:set_handler_config(h1,formatter,{?MODULE,crash}), + ok = logger:set_handler_config(h1,custom,custom), + + %% Old utc parameter is no longer allowed (replaced by time_offset) + {error,{invalid_formatter_config,logger_formatter,{utc,true}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{utc=>true}}), + {error,{invalid_formatter_config,logger_formatter,{time_offset,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>0}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>""}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>"Z"}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>"z"}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>"-0:0"}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>"+10:13"}}), + + {error,{invalid_formatter_config,logger_formatter,{time_offset,"+0"}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_offset=>"+0"}}), + + {error,{invalid_formatter_config,logger_formatter,{time_designator,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_designator=>bad}}), + {error,{invalid_formatter_config,logger_formatter,{time_designator,"s"}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_designator=>"s"}}), + {error,{invalid_formatter_config,logger_formatter,{time_designator,0}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_designator=>0}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{time_designator=>$\s}}), + ok. + +config_sanity_check(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +log_failed(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}), + {error,function_clause} = ?TRY(logger:log(bad,?map_rep)), + {error,function_clause} = ?TRY(logger:log(notice,?map_rep,bad)), + {error,function_clause} = ?TRY(logger:log(notice,fun() -> ?map_rep end,bad)), + {error,function_clause} = ?TRY(logger:log(notice,fun() -> ?map_rep end,bad,#{})), + {error,function_clause} = ?TRY(logger:log(notice,bad,bad,bad)), + {error,function_clause} = ?TRY(logger:log(notice,bad,bad,#{})), + check_no_log(), + ok = logger:log(notice,M1=?str,#{}), + check_logged(notice,M1,#{}), + ok = logger:log(notice,M2=?map_rep,#{}), + check_logged(notice,M2,#{}), + ok = logger:log(notice,M3=?keyval_rep,#{}), + check_logged(notice,M3,#{}), + + %% Should we check report input more thoroughly? + ok = logger:log(notice,M4=?keyval_rep++[other,stuff,in,list],#{}), + check_logged(notice,M4,#{}), + + %% This might break a handler since it is assumed to be a format + %% string and args, so it depends how the handler protects itself + %% against something like io_lib:format("ok","ok") + ok = logger:log(notice,"ok","ok",#{}), + check_logged(notice,"ok","ok",#{}), + + ok. + +log_failed(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +emulator(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log, + tc_proc=>self()}), + Msg = "Error in process ~p on node ~p with exit value:~n~p~n", + Error = {badmatch,4}, + Stack = [{module, function, 2, []}], + Pid = spawn(?MODULE, generate_error, [Error, Stack]), + check_logged(error, Msg, [Pid, node(), {Error, Stack}], + #{gl=>group_leader(), + error_logger=>#{tag=>error,emulator=>true}}), + ok. + +emulator(cleanup, _Config) -> + logger:remove_handler(h1), + ok. + +generate_error(Error, Stack) -> + erlang:raise(error, Error, Stack). + +via_logger_process(Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log, + tc_proc=>self()}), + + %% Explicitly send a message to the logger process + %% This is used by code_server, erl_prim_loader, init, prim_file, ... + Msg = ?str, + logger ! {log,error,Msg,[],#{}}, + check_logged(error, Msg, [], #{}), + + case os:type() of + {win32,_} -> + %% Skip this part on windows - cant change file mode" + ok; + _ -> + %% This should trigger the same thing from erl_prim_loader + Dir = filename:join(?config(priv_dir,Config),"dummydir"), + ok = file:make_dir(Dir), + ok = file:change_mode(Dir,8#0222), + error = erl_prim_loader:list_dir(Dir), + check_logged(error, + #{report=>"File operation error: eacces. Target: " ++ + Dir ++". Function: list_dir. "}, + #{pid=>self(), + gl=>group_leader(), + error_logger=>#{tag=>error_report, + type=>std_error}}), + ok + end. + +via_logger_process(cleanup, Config) -> + Dir = filename:join(?config(priv_dir,Config),"dummydir"), + _ = file:change_mode(Dir,8#0664), + _ = file:del_dir(Dir), + logger:remove_handler(h1), + ok. + +other_node(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log, + tc_proc=>self()}), + {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]), + rpc:call(Node,logger,error,[Msg=?str,#{}]), + check_logged(error,Msg,#{}), + ok. + +other_node(cleanup,_Config) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes], + logger:remove_handler(h1), + ok. + +compare_levels(_Config) -> + Levels = [emergency,alert,critical,error,warning,notice,info,debug], + ok = compare(Levels), + {error,badarg} = ?TRY(logger:compare_levels(bad,bad)), + {error,badarg} = ?TRY(logger:compare_levels({bad},notice)), + {error,badarg} = ?TRY(logger:compare_levels(notice,"bad")), + ok. + +compare([L|Rest]) -> + eq = logger:compare_levels(L,L), + [gt = logger:compare_levels(L,L1) || L1 <- Rest], + [lt = logger:compare_levels(L1,L) || L1 <- Rest], + compare(Rest); +compare([]) -> + ok. + +process_metadata(_Config) -> + undefined = logger:get_process_metadata(), + {error,badarg} = ?TRY(logger:set_process_metadata(bad)), + ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}), + Time = erlang:system_time(microsecond), + ProcMeta = #{time=>Time,line=>0,custom=>proc}, + ok = logger:set_process_metadata(ProcMeta), + S1 = ?str, + ?LOG_NOTICE(S1,#{custom=>macro}), + check_logged(notice,S1,#{time=>Time,line=>0,custom=>macro}), + + Time2 = erlang:system_time(microsecond), + S2 = ?str, + ?LOG_NOTICE(S2,#{time=>Time2,line=>1,custom=>macro}), + check_logged(notice,S2,#{time=>Time2,line=>1,custom=>macro}), + + logger:notice(S3=?str,#{custom=>func}), + check_logged(notice,S3,#{time=>Time,line=>0,custom=>func}), + + ProcMeta = logger:get_process_metadata(), + ok = logger:update_process_metadata(#{custom=>changed,custom2=>added}), + Expected = ProcMeta#{custom:=changed,custom2=>added}, + Expected = logger:get_process_metadata(), + ok = logger:unset_process_metadata(), + undefined = logger:get_process_metadata(), + + ok = logger:update_process_metadata(#{custom=>added_again}), + {error,badarg} = ?TRY(logger:update_process_metadata(bad)), + #{custom:=added_again} = logger:get_process_metadata(), + + ok. + +process_metadata(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +app_config(Config) -> + %% Start a node with default configuration + {ok,_,Node} = logger_test_lib:setup(Config,[]), + + App1Name = app1, + App1 = {application, App1Name, + [{description, "Test of app with logger config"}, + {applications, [kernel]}]}, + ok = rpc:call(Node,application,load,[App1]), + ok = rpc:call(Node,application,set_env, + [App1Name,logger,[{handler,default,logger_std_h,#{}}]]), + + %% Try to add an own default handler + {error,{bad_config,{handler,{app1,{already_exist,default}}}}} = + rpc:call(Node,logger,add_handlers,[App1Name]), + + %% Add a different handler + ok = rpc:call(Node,application,set_env,[App1Name,logger, + [{handler,myh,logger_std_h,#{}}]]), + ok = rpc:call(Node,logger,add_handlers,[App1Name]), + + {ok,#{filters:=DF}} = rpc:call(Node,logger,get_handler_config,[default]), + {ok,#{filters:=[]}} = rpc:call(Node,logger,get_handler_config,[myh]), + + true = test_server:stop_node(Node), + + %% Start a node with no default handler, then add an own default handler + {ok,#{handlers:=[#{id:=simple}]},Node} = + logger_test_lib:setup(Config,[{logger,[{handler,default,undefined}]}]), + + ok = rpc:call(Node,application,load,[App1]), + ok = rpc:call(Node,application,set_env, + [App1Name,logger,[{handler,default,logger_std_h,#{}}]]), + ok = rpc:call(Node,logger,add_handlers,[App1Name]), + + #{handlers:=[#{id:=default,filters:=DF}]} = + rpc:call(Node,logger,get_config,[]), + + true = test_server:stop_node(Node), + + %% Start a silent node, then add an own default handler + {ok,#{handlers:=[]},Node} = + logger_test_lib:setup(Config,[{error_logger,silent}]), + + {error,{bad_config,{handler,[{some,bad,config}]}}} = + rpc:call(Node,logger,add_handlers,[[{some,bad,config}]]), + ok = rpc:call(Node,logger,add_handlers, + [[{handler,default,logger_std_h,#{}}]]), + + #{handlers:=[#{id:=default,filters:=DF}]} = + rpc:call(Node,logger,get_config,[]), + + ok. + +%% This test case is maintly to see code coverage. Note that +%% logger_env_var_SUITE tests a lot of the same, and checks the +%% functionality more thoroughly, but since it all happens at node +%% start, it is not possible to see code coverage in that test. +kernel_config(Config) -> + %% Start a node with simple handler only, then simulate kernel + %% start by calling internally exported + %% internal_init_logger(). This is to test all variants of kernel + %% config, including bad config, and see the code coverage. + {ok,#{handlers:=[#{id:=simple,filters:=DF}]}=LC,Node} = + logger_test_lib:setup(Config,[{error_logger,false}]), + + %% Same once more, to get coverage + ok = rpc:call(Node,logger,internal_init_logger,[]), + ok = rpc:call(Node,logger,add_handlers,[kernel]), + LC = rpc:call(Node,logger,get_config,[]), + + %% This shall mean the same as above, but using 'logger' parameter + %% instead of 'error_logger' + ok = rpc:call(Node,application,unset_env,[kernel,error_logger]), + ok = rpc:call(Node,application,set_env, + [kernel,logger,[{handler,default,undefined}]]), + ok = rpc:call(Node,logger,internal_init_logger,[]), + ok = rpc:call(Node,logger,add_handlers,[kernel]), + LC = rpc:call(Node,logger,get_config,[]), + + %% Silent + ok = rpc:call(Node,application,unset_env,[kernel,logger]), + ok = rpc:call(Node,application,set_env,[kernel,error_logger,silent]), + ok = rpc:call(Node,logger,internal_init_logger,[]), + ok = rpc:call(Node,logger,add_handlers,[kernel]), + #{primary:=#{filter_default:=log,filters:=[]}, + handlers:=[], + module_levels:=[]} = rpc:call(Node,logger,get_config,[]), + + %% Default + ok = rpc:call(Node,application,unset_env,[kernel,error_logger]), + ok = rpc:call(Node,application,unset_env,[kernel,logger]), + ok = rpc:call(Node,logger,internal_init_logger,[]), + ok = rpc:call(Node,logger,add_handlers,[kernel]), + #{primary:=#{filter_default:=log,filters:=[]}, + handlers:=[#{id:=default,filters:=DF,config:=#{type:=standard_io}}], + module_levels:=[]} = rpc:call(Node,logger,get_config,[]), + + %% error_logger=tty (same as default) + ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again + ok = rpc:call(Node,application,set_env,[kernel,error_logger,tty]), + ok = rpc:call(Node,application,unset_env,[kernel,logger]), + ok = rpc:call(Node,logger,internal_init_logger,[]), + ok = rpc:call(Node,logger,add_handlers,[kernel]), + #{primary:=#{filter_default:=log,filters:=[]}, + handlers:=[#{id:=default,filters:=DF,config:=#{type:=standard_io}}], + module_levels:=[]} = rpc:call(Node,logger,get_config,[]), + + %% error_logger={file,File} + ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again + F = filename:join(?config(priv_dir,Config), + atom_to_list(?FUNCTION_NAME)++".log"), + ok = rpc:call(Node,application,set_env,[kernel,error_logger,{file,F}]), + ok = rpc:call(Node,application,unset_env,[kernel,logger]), + ok = rpc:call(Node,logger,internal_init_logger,[]), + ok = rpc:call(Node,logger,add_handlers,[kernel]), + #{primary:=#{filter_default:=log,filters:=[]}, + handlers:=[#{id:=default,filters:=DF,config:=#{type:={file,F}}}], + module_levels:=[]} = rpc:call(Node,logger,get_config,[]), + + %% Same, but using 'logger' parameter instead of 'error_logger' + ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again + ok = rpc:call(Node,application,unset_env,[kernel,error_logger]), + ok = rpc:call(Node,application,set_env,[kernel,logger, + [{handler,default,logger_std_h, + #{config=>#{type=>{file,F}}}}]]), + ok = rpc:call(Node,logger,internal_init_logger,[]), + ok = rpc:call(Node,logger,add_handlers,[kernel]), + #{primary:=#{filter_default:=log,filters:=[]}, + handlers:=[#{id:=default,filters:=DF,config:=#{type:={file,F}}}], + module_levels:=[]} = rpc:call(Node,logger,get_config,[]), + + %% Same, but with type={file,File,Modes} + ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again + ok = rpc:call(Node,application,unset_env,[kernel,error_logger]), + M = [raw,write,delayed_write], + ok = rpc:call(Node,application,set_env,[kernel,logger, + [{handler,default,logger_std_h, + #{config=>#{type=>{file,F,M}}}}]]), + ok = rpc:call(Node,logger,internal_init_logger,[]), + ok = rpc:call(Node,logger,add_handlers,[kernel]), + #{primary:=#{filter_default:=log,filters:=[]}, + handlers:=[#{id:=default,filters:=DF,config:=#{type:={file,F,M}}}], + module_levels:=[]} = rpc:call(Node,logger,get_config,[]), + + %% Same, but with disk_log handler + ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again + ok = rpc:call(Node,application,unset_env,[kernel,error_logger]), + M = [raw,write,delayed_write], + ok = rpc:call(Node,application,set_env,[kernel,logger, + [{handler,default,logger_disk_log_h, + #{config=>#{file=>F}}}]]), + ok = rpc:call(Node,logger,internal_init_logger,[]), + ok = rpc:call(Node,logger,add_handlers,[kernel]), + #{primary:=#{filter_default:=log,filters:=[]}, + handlers:=[#{id:=default,filters:=DF,config:=#{file:=F}}], + module_levels:=[]} = rpc:call(Node,logger,get_config,[]), + + %% Set primary filters and module level. No default handler. + ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again + ok = rpc:call(Node,application,unset_env,[kernel,error_logger]), + ok = rpc:call(Node,application,set_env, + [kernel,logger,[{handler,default,undefined}, + {filters,stop,[{f1,{fun(_,_) -> log end,ok}}]}, + {module_level,debug,[?MODULE]}]]), + ok = rpc:call(Node,logger,internal_init_logger,[]), + ok = rpc:call(Node,logger,add_handlers,[kernel]), + #{primary:=#{filter_default:=stop,filters:=[_]}, + handlers:=[], + module_levels:=[{?MODULE,debug}]} = rpc:call(Node,logger,get_config,[]), + + %% Bad config + ok = rpc:call(Node,application,unset_env,[kernel,logger]), + + ok = rpc:call(Node,application,set_env,[kernel,error_logger,bad]), + {error,{bad_config,{kernel,{error_logger,bad}}}} = + rpc:call(Node,logger,internal_init_logger,[]), + + ok = rpc:call(Node,application,unset_env,[kernel,error_logger]), + ok = rpc:call(Node,application,set_env,[kernel,logger_level,bad]), + {error,{bad_config,{kernel,{logger_level,bad}}}} = + rpc:call(Node,logger,internal_init_logger,[]), + + ok = rpc:call(Node,application,unset_env,[kernel,logger_level]), + ok = rpc:call(Node,application,set_env, + [kernel,logger,[{filters,stop,[bad]}]]), + {error,{bad_config,{kernel,{invalid_filters,[bad]}}}} = + rpc:call(Node,logger,internal_init_logger,[]), + + ok = rpc:call(Node,application,set_env, + [kernel,logger,[{filters,stop,[bad]}]]), + {error,{bad_config,{kernel,{invalid_filters,[bad]}}}} = + rpc:call(Node,logger,internal_init_logger,[]), + + ok = rpc:call(Node,application,set_env, + [kernel,logger,[{filters,stop,[{f1,bad}]}]]), + {error,{bad_config,{kernel,{invalid_filter,{f1,bad}}}}} = + rpc:call(Node,logger,internal_init_logger,[]), + + ok = rpc:call(Node,application,set_env, + [kernel,logger,MF=[{filters,stop,[]},{filters,log,[]}]]), + {error,{bad_config,{kernel,{multiple_filters,MF}}}} = + rpc:call(Node,logger,internal_init_logger,[]), + + ok = rpc:call(Node,application,set_env, + [kernel,logger,[{module_level,bad,[?MODULE]}]]), + {error,{bad_config,{kernel,{invalid_level,bad}}}} = + rpc:call(Node,logger,internal_init_logger,[]), + + ok. + +%%%----------------------------------------------------------------- +%%% Internal +check_logged(Level,Format,Args,Meta) -> + do_check_logged(Level,{Format,Args},Meta). + +check_logged(Level,Msg,Meta) when ?IS_REPORT(Msg) -> + do_check_logged(Level,{report,Msg},Meta); +check_logged(Level,Msg,Meta) when ?IS_STRING(Msg) -> + do_check_logged(Level,{string,Msg},Meta). + +do_check_logged(Level,Msg0,Meta0) -> + receive + {#{level:=Level,msg:=Msg,meta:=Meta},_} -> + check_msg(Msg0,Msg), + check_maps(Meta0,Meta,meta) + after 500 -> + ct:fail({timeout,no_log,process_info(self(),messages)}) + end. + +check_no_log() -> + receive + X -> ct:fail({got_unexpected_log,X}) + after 500 -> + ok + end. + +check_msg(Msg,Msg) -> + ok; +check_msg({report,Expected},{report,Got}) when is_map(Expected), is_map(Got) -> + check_maps(Expected,Got,msg); +check_msg(Expected,Got) -> + ct:fail({unexpected,msg,Expected,Got}). + +check_maps(Expected,Got,What) -> + case maps:merge(Got,Expected) of + Got -> + ok; + _ -> + ct:fail({unexpected,What,Expected,Got}) + end. + +%% Handler +adding_handler(#{add_call:=Fun}) -> + Fun(); +adding_handler(Config) -> + maybe_send(add), + {ok,Config}. + +removing_handler(#{rem_call:=Fun}) -> + Fun(); +removing_handler(_Config) -> + maybe_send(remove), + ok. +changing_config(_Old,#{conf_call:=Fun}) -> + Fun(); +changing_config(_Old,Config) -> + maybe_send(changing_config), + {ok,Config}. + +maybe_send(Msg) -> + case whereis(callback_receiver) of + undefined -> ok; + Pid -> Pid ! Msg + end. + +log(_Log,#{log_call:=Fun}) -> + Fun(); +log(Log,Config) -> + TcProc = maps:get(tc_proc,Config,self()), + TcProc ! {Log,Config}, + ok. + +test_api(Level) -> + logger:Level(#{Level=>rep}), + ok = check_logged(Level,#{Level=>rep},#{}), + logger:Level(#{Level=>rep},#{my=>meta}), + ok = check_logged(Level,#{Level=>rep},#{my=>meta}), + logger:Level("~w: ~w",[Level,fa]), + ok = check_logged(Level,"~w: ~w",[Level,fa],#{}), + logger:Level("~w: ~w ~w",[Level,fa,meta],#{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}), + logger:Level(fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end,x, + #{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],#{my=>meta}), + logger:Level(fun(x) -> #{Level=>fun_to_r,meta=>true} end,x, + #{my=>meta}), + ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},#{my=>meta}), + logger:Level(fun(x) -> <<"fun_to_s">> end,x,#{}), + ok = check_logged(Level,<<"fun_to_s">>,#{}), + logger:Level(F1=fun(x) -> {fun_to_bad} end,x,#{}), + ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp", + [{F1,x},{fun_to_bad}],#{}), + logger:Level(F2=fun(x) -> erlang:error(fun_that_crashes) end,x,#{}), + ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp", + [{F2,x},{error,fun_that_crashes}],#{}), + ok. + +test_log_function(Level) -> + logger:log(Level,#{Level=>rep}), + ok = check_logged(Level,#{Level=>rep},#{}), + logger:log(Level,#{Level=>rep},#{my=>meta}), + ok = check_logged(Level,#{Level=>rep},#{my=>meta}), + logger:log(Level,"~w: ~w",[Level,fa]), + ok = check_logged(Level,"~w: ~w",[Level,fa],#{}), + logger:log(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}), + logger:log(Level,fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end, + x, #{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],#{my=>meta}), + logger:log(Level,fun(x) -> #{Level=>fun_to_r,meta=>true} end, + x, #{my=>meta}), + ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},#{my=>meta}), + logger:log(Level,fun(x) -> <<"fun_to_s">> end,x,#{}), + ok = check_logged(Level,<<"fun_to_s">>,#{}), + logger:log(Level,F1=fun(x) -> {fun_to_bad} end,x,#{}), + ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp", + [{F1,x},{fun_to_bad}],#{}), + logger:log(Level,F2=fun(x) -> erlang:error(fun_that_crashes) end,x,#{}), + ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp", + [{F2,x},{error,fun_that_crashes}],#{}), + ok. + +test_macros(emergency=Level) -> + ?LOG_EMERGENCY(#{Level=>rep}), + ok = check_logged(Level,#{Level=>rep},?MY_LOC(1)), + ?LOG_EMERGENCY(#{Level=>rep},#{my=>meta}), + ok = check_logged(Level,#{Level=>rep},(?MY_LOC(1))#{my=>meta}), + ?LOG_EMERGENCY("~w: ~w",[Level,fa]), + ok = check_logged(Level,"~w: ~w",[Level,fa],?MY_LOC(1)), + ?LOG_EMERGENCY("~w: ~w ~w",[Level,fa,meta],#{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],(?MY_LOC(1))#{my=>meta}), + ?LOG_EMERGENCY(fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end, + x, #{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta], + (?MY_LOC(3))#{my=>meta}), + ?LOG_EMERGENCY(fun(x) -> #{Level=>fun_to_r,meta=>true} end, x, #{my=>meta}), + ok = check_logged(Level,#{Level=>fun_to_r,meta=>true}, + (?MY_LOC(2))#{my=>meta}), + ?LOG_EMERGENCY(fun(x) -> <<"fun_to_s">> end,x,#{}), + ok = check_logged(Level,<<"fun_to_s">>,?MY_LOC(1)), + F1=fun(x) -> {fun_to_bad} end, + ?LOG_EMERGENCY(F1,x,#{}), + ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp", + [{F1,x},{fun_to_bad}],#{}), + F2=fun(x) -> erlang:error(fun_that_crashes) end, + ?LOG_EMERGENCY(F2,x,#{}), + ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp", + [{F2,x},{error,fun_that_crashes}],#{}), + ok. + +test_log_macro(Level) -> + ?LOG(Level,#{Level=>rep}), + ok = check_logged(Level,#{Level=>rep},?MY_LOC(1)), + ?LOG(Level,#{Level=>rep},#{my=>meta}), + ok = check_logged(Level,#{Level=>rep},(?MY_LOC(1))#{my=>meta}), + ?LOG(Level,"~w: ~w",[Level,fa]), + ok = check_logged(Level,"~w: ~w",[Level,fa],?MY_LOC(1)), + ?LOG(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],(?MY_LOC(1))#{my=>meta}), + ?LOG(Level,fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end, + x, #{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta], + (?MY_LOC(3))#{my=>meta}), + ?LOG(Level,fun(x) -> #{Level=>fun_to_r,meta=>true} end, x, #{my=>meta}), + ok = check_logged(Level,#{Level=>fun_to_r,meta=>true}, + (?MY_LOC(2))#{my=>meta}), + ?LOG(Level,fun(x) -> <<"fun_to_s">> end,x,#{}), + ok = check_logged(Level,<<"fun_to_s">>,?MY_LOC(1)), + F1=fun(x) -> {fun_to_bad} end, + ?LOG(Level,F1,x,#{}), + ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp", + [{F1,x},{fun_to_bad}],#{}), + F2=fun(x) -> erlang:error(fun_that_crashes) end, + ?LOG(Level,F2,x,#{}), + ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp", + [{F2,x},{error,fun_that_crashes}],#{}), + ok. + +%%%----------------------------------------------------------------- +%%% Called by macro ?TRY(X) +my_try(Fun) -> + try Fun() catch C:R -> {C,R} end. + +check_config(crash) -> + erlang:error({badmatch,3}); +check_config(_) -> + ok. diff --git a/lib/kernel/test/logger_disk_log_h_SUITE.erl b/lib/kernel/test/logger_disk_log_h_SUITE.erl new file mode 100644 index 0000000000..87b8250781 --- /dev/null +++ b/lib/kernel/test/logger_disk_log_h_SUITE.erl @@ -0,0 +1,1693 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_disk_log_h_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). +-include_lib("kernel/src/logger_h_common.hrl"). +-include_lib("stdlib/include/ms_transform.hrl"). +-include_lib("kernel/include/file.hrl"). + +-define(check_no_log, [] = test_server:messages_get()). + +-define(check(Expected), + receive {log,Expected} -> + [] = test_server:messages_get() + after 1000 -> + ct:fail({report_not_received, + {line,?LINE}, + {got,test_server:messages_get()}}) + end). + +-define(msg,"Log from "++atom_to_list(?FUNCTION_NAME)++ + ":"++integer_to_list(?LINE)). +-define(bin(Msg), list_to_binary(Msg++"\n")). +-define(log_no(File,N), lists:concat([File,".",N])). +-define(domain,#{domain=>[?MODULE]}). + +suite() -> + [{timetrap,{seconds,30}}, + {ct_hooks,[logger_test_lib]}]. + +init_per_suite(Config) -> + timer:start(), % to avoid progress report + Config. + +end_per_suite(_Config) -> + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(TestHooksCase, Config) when + TestHooksCase == write_failure; + TestHooksCase == sync_failure -> + case (fun() -> ?TEST_HOOKS_TAB == undefined end)() of + true -> + {skip,"Define the TEST_HOOKS macro to run this test"}; + false -> + ct:print("********** ~w **********", [TestHooksCase]), + Config + end; +init_per_testcase(TestCase, Config) -> + ct:print("********** ~w **********", [TestCase]), + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [start_stop_handler, + create_log, + open_existing_log, + disk_log_opts, + default_formatter, + logging, + filter_config, + errors, + formatter_fail, + config_fail, + bad_input, + info_and_reset, + reconfig, + sync, + disk_log_full, + disk_log_wrap, + disk_log_events, + write_failure, + sync_failure, + op_switch_to_sync, + op_switch_to_drop, + op_switch_to_flush, + limit_burst_disabled, + limit_burst_enabled_one, + limit_burst_enabled_period, + kill_disabled, + qlen_kill_new, + %% qlen_kill_std, + mem_kill_new, + %% mem_kill_std, + restart_after, + handler_requests_under_load + ]. + +start_stop_handler(_Config) -> + ok = logger:add_handler(?MODULE, logger_disk_log_h, #{}), + {error,{already_exist,?MODULE}} = + logger:add_handler(?MODULE, logger_disk_log_h, #{}), + true = is_pid(whereis(h_proc_name())), + ok = logger:remove_handler(?MODULE), + timer:sleep(500), + undefined = whereis(h_proc_name()). +start_stop_handler(cleanup, _Config) -> + logger:remove_handler(?MODULE). + +create_log(Config) -> + PrivDir = ?config(priv_dir,Config), + %% test new handler + Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_A"])), + LogFile1 = filename:join(PrivDir, Name1), + ok = start_and_add(Name1, #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}, + #{file=>LogFile1}), + logger:notice("hello", ?domain), + logger_disk_log_h:filesync(Name1), + ct:pal("Checking contents of ~p", [?log_no(LogFile1,1)]), + try_read_file(?log_no(LogFile1,1), {ok,<<"hello\n">>}, 5000), + + %% test second handler + Name2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_B"])), + DLName = lists:concat([?FUNCTION_NAME,"_B_log"]), + LogFile2 = filename:join(PrivDir, DLName), + ok = start_and_add(Name2, #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}, + #{file=>LogFile2}), + logger:notice("dummy", ?domain), + logger_disk_log_h:filesync(Name2), + ct:pal("Checking contents of ~p", [?log_no(LogFile2,1)]), + try_read_file(?log_no(LogFile2,1), {ok,<<"dummy\n">>}, 5000), + + remove_and_stop(Name1), + remove_and_stop(Name2), + try_read_file(?log_no(LogFile1,1), {ok,<<"hello\ndummy\n">>}, 1), + try_read_file(?log_no(LogFile2,1), {ok,<<"dummy\n">>}, 5000), + ok. + +open_existing_log(Config) -> + PrivDir = ?config(priv_dir,Config), + %% test new handler + HName = ?FUNCTION_NAME, + DLName = lists:concat([?FUNCTION_NAME,"_log"]), + LogFile1 = filename:join(PrivDir, DLName), + ok = start_and_add(HName, #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}, + #{file=>LogFile1}), + logger:notice("one", ?domain), + logger_disk_log_h:filesync(HName), + ct:pal("Checking contents of ~p", [?log_no(LogFile1,1)]), + try_read_file(?log_no(LogFile1,1), {ok,<<"one\n">>}, 5000), + logger:notice("two", ?domain), + ok = remove_and_stop(HName), + try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\n">>}, 5000), + + logger:notice("two and a half", ?domain), + + ok = start_and_add(HName, #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}, + #{file=>LogFile1}), + logger:notice("three", ?domain), + logger_disk_log_h:filesync(HName), + try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\nthree\n">>}, 5000), + remove_and_stop(HName), + try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\nthree\n">>}, 5000). + +disk_log_opts(Config) -> + Get = fun(Key, PL) -> proplists:get_value(Key, PL) end, + PrivDir = ?config(priv_dir,Config), + WName = list_to_atom(lists:concat([?FUNCTION_NAME,"_W"])), + WFile = lists:concat([?FUNCTION_NAME,"_W_log"]), + Size = length("12345"), + ConfigW = #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter => {?MODULE,no_nl}}, + WFileFull = filename:join(PrivDir, WFile), + DLOptsW = #{file => WFileFull, + type => wrap, + max_no_bytes => Size, + max_no_files => 2}, + ok = start_and_add(WName, ConfigW, DLOptsW), + WInfo1 = disk_log:info(WName), + ct:log("Fullname = ~s", [WFileFull]), + {WFileFull,wrap,{Size,2},1} = {Get(file,WInfo1),Get(type,WInfo1), + Get(size,WInfo1),Get(current_file,WInfo1)}, + logger:notice("123", ?domain), + logger_disk_log_h:filesync(WName), + timer:sleep(500), + 1 = Get(current_file, disk_log:info(WName)), + + logger:notice("45", ?domain), + logger_disk_log_h:filesync(WName), + timer:sleep(500), + 1 = Get(current_file, disk_log:info(WName)), + + logger:notice("6", ?domain), + logger_disk_log_h:filesync(WName), + timer:sleep(500), + 2 = Get(current_file, disk_log:info(WName)), + + logger:notice("7890", ?domain), + logger_disk_log_h:filesync(WName), + timer:sleep(500), + 2 = Get(current_file, disk_log:info(WName)), + + HName1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_H1"])), + HFile1 = lists:concat([?FUNCTION_NAME,"_H1_log"]), + ConfigH = #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter => {?MODULE,no_nl}}, + HFile1Full = filename:join(PrivDir, HFile1), + DLOptsH1 = #{file => HFile1Full, + type => halt}, + ok = start_and_add(HName1, ConfigH, DLOptsH1), + HInfo1 = disk_log:info(HName1), + ct:log("Fullname = ~s", [HFile1Full]), + {HFile1Full,halt,infinity} = {Get(file,HInfo1),Get(type,HInfo1), + Get(size,HInfo1)}, + logger:notice("12345", ?domain), + logger_disk_log_h:filesync(HName1), + timer:sleep(500), + 1 = Get(no_written_items, disk_log:info(HName1)), + + HName2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_H2"])), + HFile2 = lists:concat([?FUNCTION_NAME,"_H2_log"]), + HFile2Full = filename:join(PrivDir, HFile2), + DLOptsH2 = DLOptsH1#{file => HFile2Full, + max_no_bytes => 1000}, + ok = start_and_add(HName2, ConfigH, DLOptsH2), + HInfo3 = disk_log:info(HName2), + ct:log("Fullname = ~s", [HFile2Full]), + {HFile2Full,halt,1000} = {Get(file,HInfo3),Get(type,HInfo3), + Get(size,HInfo3)}, + + remove_and_stop(WName), + remove_and_stop(HName1), + remove_and_stop(HName2), + ok. + +default_formatter(Config) -> + PrivDir = ?config(priv_dir,Config), + LogFile = filename:join(PrivDir,atom_to_list(?FUNCTION_NAME)), + HandlerConfig = #{config => #{file=>LogFile}, + filter_default=>log}, + ct:pal("Log: ~p", [LogFile]), + ok = logger:add_handler(?MODULE, logger_disk_log_h, HandlerConfig), + ok = logger:set_handler_config(?MODULE,formatter, + {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}), + LogName = lists:concat([LogFile, ".1"]), + logger:notice("dummy"), + wait_until_written(LogName), + {ok,Bin} = file:read_file(LogName), + match = re:run(Bin, "=NOTICE REPORT====.*\ndummy", [{capture,none}]), + ok. +default_formatter(cleanup, _Config) -> + logger:remove_handler(?MODULE). + +logging(Config) -> + PrivDir = ?config(priv_dir,Config), + %% test new handler + Name = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])), + LogFile = filename:join(PrivDir, Name), + ok = start_and_add(Name, #{filter_default=>log, + formatter=>{?MODULE,self()}}, + #{file => LogFile}), + MsgFormatter = fun(Term) -> {io_lib:format("Term:~p",[Term]),[]} end, + logger:notice([{x,y}], #{report_cb => MsgFormatter}), + logger:notice([{x,y}], #{}), + ct:pal("Checking contents of ~p", [?log_no(LogFile,1)]), + try_read_file(?log_no(LogFile,1), {ok,<<"Term:[{x,y}]\n x: y\n">>}, 5000). + +logging(cleanup, _Config) -> + Name = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])), + remove_and_stop(Name). + +filter_config(_Config) -> + ok = logger:add_handler(?MODULE,logger_disk_log_h,#{}), + {ok,#{config:=HConfig}=Config} = logger:get_handler_config(?MODULE), + HConfig = maps:without([handler_pid,mode_tab],HConfig), + + FakeFullHConfig = HConfig#{handler_pid=>self(),mode_tab=>erlang:make_ref()}, + #{config:=HConfig} = + logger_disk_log_h:filter_config(Config#{config=>FakeFullHConfig}), + ok. + +filter_config(cleanup,_Config) -> + logger:remove_handler(?MODULE), + ok. + +errors(Config) -> + PrivDir = ?config(priv_dir,Config), + Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])), + LogFile1 = filename:join(PrivDir,Name1), + HandlerConfig = #{config=>#{file=>LogFile1}, + filter_default=>log, + formatter=>{?MODULE,self()}}, + ok = logger:add_handler(Name1, logger_disk_log_h, HandlerConfig), + {error,{already_exist,Name1}} = + logger:add_handler(Name1, logger_disk_log_h, #{}), + + %%! TODO: + %%! Check how bad log_opts are handled! + + {error,{illegal_config_change, + logger_disk_log_h, + #{type:=wrap}, + #{type:=halt}}} = + logger:update_handler_config(Name1, + config, + #{type=>halt, + file=>LogFile1}), + + {error,{illegal_config_change, + logger_disk_log_h, + #{file:=LogFile1}, + #{file:="newfilename"}}} = + logger:update_handler_config(Name1, + config, + #{file=>"newfilename"}), + + %% Read-only fields may (accidentially) be included in the change, + %% but it won't take effect + {ok,C} = logger:get_handler_config(Name1), + ok = logger:set_handler_config(Name1,config, + #{handler_pid=>self(), + mode_tab=>erlang:make_ref()}), + {ok,C} = logger:get_handler_config(Name1), + + + ok = logger:remove_handler(Name1), + {error,{not_found,Name1}} = logger:remove_handler(Name1), + ok. + +errors(cleanup, _Config) -> + Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])), + _ = logger:remove_handler(Name1). + +formatter_fail(Config) -> + PrivDir = ?config(priv_dir,Config), + Name = ?FUNCTION_NAME, + LogFile = filename:join(PrivDir,Name), + ct:pal("Log = ~p", [LogFile]), + HandlerConfig = #{config => #{file=>LogFile}, + filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE])}, + %% no formatter! + logger:add_handler(Name, logger_disk_log_h, HandlerConfig), + Pid = whereis(h_proc_name(Name)), + true = is_pid(Pid), + H = logger:get_handler_ids(), + true = lists:member(Name,H), + + %% Formatter is added automatically + {ok,#{formatter:={logger_formatter,_}}} = logger:get_handler_config(Name), + logger:notice(M1=?msg,?domain), + Got1 = try_match_file(?log_no(LogFile,1),"[0-9\\+\\-T:\\.]* notice: "++M1,5000), + + ok = logger:set_handler_config(Name,formatter,{nonexistingmodule,#{}}), + logger:notice(M2=?msg,?domain), + Got2 = try_match_file(?log_no(LogFile,1), + escape(Got1)++"[0-9\\+\\-T:\\.]* notice: FORMATTER CRASH: .*"++M2, + 5000), + + ok = logger:set_handler_config(Name,formatter,{?MODULE,crash}), + logger:notice(M3=?msg,?domain), + Got3 = try_match_file(?log_no(LogFile,1), + escape(Got2)++"[0-9\\+\\-T:\\.]* notice: FORMATTER CRASH: .*"++M3, + 5000), + + ok = logger:set_handler_config(Name,formatter,{?MODULE,bad_return}), + logger:notice(?msg,?domain), + try_match_file(?log_no(LogFile,1), + escape(Got3)++"FORMATTER ERROR: bad return value", + 5000), + + %% Check that handler is still alive and was never dead + Pid = whereis(h_proc_name(Name)), + H = logger:get_handler_ids(), + ok. + +formatter_fail(cleanup,_Config) -> + _ = logger:remove_handler(?FUNCTION_NAME), + ok. + +config_fail(_Config) -> + {error,{handler_not_added,{invalid_config,logger_disk_log_h,#{bad:=bad}}}} = + logger:add_handler(?MODULE,logger_disk_log_h, + #{config => #{bad => bad}, + filter_default=>log, + formatter=>{?MODULE,self()}}), + + {error,{handler_not_added,{invalid_config,logger_disk_log_h, + {invalid_levels,#{drop_mode_qlen:=1}}}}} = + logger:add_handler(?MODULE,logger_disk_log_h, + #{config => #{drop_mode_qlen=>1}}), + {error,{handler_not_added,{invalid_config,logger_disk_log_h, + {invalid_levels,#{sync_mode_qlen:=43, + drop_mode_qlen:=42}}}}} = + logger:add_handler(?MODULE,logger_disk_log_h, + #{config => #{sync_mode_qlen=>43, + drop_mode_qlen=>42}}), + {error,{handler_not_added,{invalid_config,logger_disk_log_h, + {invalid_levels,#{drop_mode_qlen:=43, + flush_qlen:=42}}}}} = + logger:add_handler(?MODULE,logger_disk_log_h, + #{config => #{drop_mode_qlen=>43, + flush_qlen=>42}}), + + ok = logger:add_handler(?MODULE,logger_disk_log_h, + #{filter_default=>log, + formatter=>{?MODULE,self()}}), + %% can't change the disk log options for a log already in use + {error,{illegal_config_change,logger_disk_log_h,_,_}} = + logger:update_handler_config(?MODULE,config, + #{max_no_files=>2}), + %% incorrect values of OP params + {ok,#{config := HConfig}} = logger:get_handler_config(?MODULE), + {error,{invalid_config,logger_disk_log_h,{invalid_levels,_}}} = + logger:update_handler_config(?MODULE,config, + HConfig#{sync_mode_qlen=>100, + flush_qlen=>99}), + %% invalid name of config parameter + {error,{invalid_config,logger_disk_log_h,#{filesync_rep_int:=2000}}} = + logger:update_handler_config(?MODULE, config, + HConfig#{filesync_rep_int => 2000}), + ok. +config_fail(cleanup,_Config) -> + logger:remove_handler(?MODULE). + +bad_input(_Config) -> + {error,{badarg,{filesync,["BadType"]}}} = + logger_disk_log_h:filesync("BadType"), + {error,{badarg,{info,["BadType"]}}} = logger_disk_log_h:info("BadType"), + {error,{badarg,{reset,["BadType"]}}} = logger_disk_log_h:reset("BadType"). + +info_and_reset(_Config) -> + ok = logger:add_handler(?MODULE,logger_disk_log_h, + #{filter_default=>log, + formatter=>{?MODULE,self()}}), + #{id := ?MODULE} = logger_disk_log_h:info(?MODULE), + ok = logger_disk_log_h:reset(?MODULE). +info_and_reset(cleanup,_Config) -> + logger:remove_handler(?MODULE). + +reconfig(Config) -> + Dir = ?config(priv_dir,Config), + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + #{id := ?MODULE, + sync_mode_qlen := ?SYNC_MODE_QLEN, + drop_mode_qlen := ?DROP_MODE_QLEN, + flush_qlen := ?FLUSH_QLEN, + burst_limit_enable := ?BURST_LIMIT_ENABLE, + burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT, + burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME, + overload_kill_enable := ?OVERLOAD_KILL_ENABLE, + overload_kill_qlen := ?OVERLOAD_KILL_QLEN, + overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE, + overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER, + filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL, + handler_state := + #{log_opts := #{type := ?DISK_LOG_TYPE, + max_no_files := ?DISK_LOG_MAX_NO_FILES, + max_no_bytes := ?DISK_LOG_MAX_NO_BYTES, + file := DiskLogFile}}} = + logger_disk_log_h:info(?MODULE), + {ok,#{config := + #{sync_mode_qlen := ?SYNC_MODE_QLEN, + drop_mode_qlen := ?DROP_MODE_QLEN, + flush_qlen := ?FLUSH_QLEN, + burst_limit_enable := ?BURST_LIMIT_ENABLE, + burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT, + burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME, + overload_kill_enable := ?OVERLOAD_KILL_ENABLE, + overload_kill_qlen := ?OVERLOAD_KILL_QLEN, + overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE, + overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER, + filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL, + file := DiskLogFile, + max_no_files := ?DISK_LOG_MAX_NO_FILES, + max_no_bytes := ?DISK_LOG_MAX_NO_BYTES, + type := wrap} = HConfig0}} = + logger:get_handler_config(?MODULE), + + HConfig1 = HConfig0#{sync_mode_qlen => 1, + drop_mode_qlen => 2, + flush_qlen => 3, + burst_limit_enable => false, + burst_limit_max_count => 10, + burst_limit_window_time => 10, + overload_kill_enable => true, + overload_kill_qlen => 100000, + overload_kill_mem_size => 10000000, + overload_kill_restart_after => infinity, + filesync_repeat_interval => no_repeat}, + ok = logger:set_handler_config(?MODULE, config, HConfig1), + #{id := ?MODULE, + sync_mode_qlen := 1, + drop_mode_qlen := 2, + flush_qlen := 3, + burst_limit_enable := false, + burst_limit_max_count := 10, + burst_limit_window_time := 10, + overload_kill_enable := true, + overload_kill_qlen := 100000, + overload_kill_mem_size := 10000000, + overload_kill_restart_after := infinity, + filesync_repeat_interval := no_repeat} = + logger_disk_log_h:info(?MODULE), + {ok,#{config:=HConfig1}} = logger:get_handler_config(?MODULE), + + ok = logger:update_handler_config(?MODULE, config, + #{flush_qlen => ?FLUSH_QLEN}), + {ok,#{config:=C1}} = logger:get_handler_config(?MODULE), + ct:log("C1: ~p",[C1]), + C1 = HConfig1#{flush_qlen => ?FLUSH_QLEN}, + + ok = logger:set_handler_config(?MODULE, config, #{sync_mode_qlen => 1}), + {ok,#{config:=C2}} = logger:get_handler_config(?MODULE), + ct:log("C2: ~p",[C2]), + C2 = HConfig0#{sync_mode_qlen => 1}, + + ok = logger:set_handler_config(?MODULE, config, #{drop_mode_qlen => 100}), + {ok,#{config:=C3}} = logger:get_handler_config(?MODULE), + ct:log("C3: ~p",[C3]), + C3 = HConfig0#{drop_mode_qlen => 100}, + + ok = logger:update_handler_config(?MODULE, config, #{sync_mode_qlen => 1}), + {ok,#{config:=C4}} = logger:get_handler_config(?MODULE), + ct:log("C4: ~p",[C4]), + C4 = HConfig0#{sync_mode_qlen => 1, + drop_mode_qlen => 100}, + + ok = logger:remove_handler(?MODULE), + + File = filename:join(Dir, "logfile"), + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}, + config=> + #{type => halt, + max_no_files => 1, + max_no_bytes => 1024, + file => File}}), + #{handler_state := + #{log_opts := #{type := halt, + max_no_files := 1, + max_no_bytes := 1024, + file := File}}} = + logger_disk_log_h:info(?MODULE), + {ok,#{config := + #{type := halt, + max_no_files := 1, + max_no_bytes := 1024, + file := File}=HaltHConfig} = Config2} = + logger:get_handler_config(?MODULE), + + ok = logger:update_handler_config(?MODULE, level, notice), + {ok,C5} = logger:get_handler_config(?MODULE), + ct:log("C5: ~p",[C5]), + C5 = Config2#{level => notice}, + + ok = logger:set_handler_config(?MODULE, level, info), + {ok,C6} = logger:get_handler_config(?MODULE), + ct:log("C6: ~p",[C6]), + C6 = Config2#{level => info}, + + %% You are not allowed to actively set the write once fields + %% (type, max_no_files, max_no_bytes, file) in runtime. + {error, {illegal_config_change,_,_,_}} = + logger:set_handler_config(?MODULE,config,#{type=>wrap}), + {error, {illegal_config_change,_,_,_}} = + logger:set_handler_config(?MODULE,config,#{max_no_files=>2}), + {error, {illegal_config_change,_,_,_}} = + logger:set_handler_config(?MODULE,config,#{max_no_bytes=>2048}), + {error, {illegal_config_change,_,_,_}} = + logger:set_handler_config(?MODULE,config,#{file=>"otherfile.log"}), + {ok,C7} = logger:get_handler_config(?MODULE), + ct:log("C7: ~p",[C7]), + C7 = C6, + + %% ... but if you don't specify the write once fields, then + %% set_handler_config shall NOT reset them to their default value + ok = logger:set_handler_config(?MODULE,config,#{sync_mode_qlen=>1}), + {ok,#{config:=C8}} = logger:get_handler_config(?MODULE), + ct:log("C8: ~p",[C8]), + C8 = HaltHConfig#{sync_mode_qlen=>1}, + ok. + +reconfig(cleanup, _Config) -> + logger:remove_handler(?MODULE). + +sync(Config) -> + Dir = ?config(priv_dir,Config), + File = filename:join(Dir, ?FUNCTION_NAME), + Log = lists:concat([File,".1"]), + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{config => #{file => File}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,nl}}), + + start_tracer([{logger_disk_log_h,disk_log_write,3}, + {disk_log,sync,1}], + [{logger_disk_log_h,disk_log_write,<<"first\n">>}, + {disk_log,sync}]), + + logger:notice("first", ?domain), + %% wait for automatic disk_log_sync + check_tracer(?FILESYNC_REPEAT_INTERVAL*2), + + %% check that if there's no repeated filesync active, + %% a disk_log_sync is still performed when handler goes idle + {ok,#{config := HConfig}} = logger:get_handler_config(?MODULE), + HConfig1 = HConfig#{filesync_repeat_interval => no_repeat}, + ok = logger:update_handler_config(?MODULE, config, HConfig1), + + no_repeat = maps:get(filesync_repeat_interval, + logger_disk_log_h:info(?MODULE)), + %% The following timer is to make sure the time from last log + %% ("first") to next ("second") is long enough, so the a flush is + %% triggered by the idle timeout between "fourth" and "fifth". + timer:sleep(?IDLE_DETECT_TIME_MSEC*2), + + start_tracer([{logger_disk_log_h,disk_log_write,3}, + {disk_log,sync,1}], + [{logger_disk_log_h,disk_log_write,<<"second\n">>}, + {disk_log,sync}, + {logger_disk_log_h,disk_log_write,<<"third\n">>}, + {disk_log,sync}]), + + logger:notice("second", ?domain), + timer:sleep(?IDLE_DETECT_TIME_MSEC*2), + logger:notice("third", ?domain), + %% wait for automatic disk_log_sync + check_tracer(?IDLE_DETECT_TIME_MSEC*2), + + try_read_file(Log, {ok,<<"first\nsecond\nthird\n">>}, 1000), + + %% switch repeated filesync on and verify that the looping works + SyncInt = 1000, + WaitT = 4500, + OneSync = {logger_h_common,handle_cast,repeated_filesync}, + %% receive 1 repeated_filesync per sec + start_tracer([{logger_h_common,handle_cast,2}], + [OneSync || _ <- lists:seq(1, trunc(WaitT/SyncInt))]), + + HConfig2 = HConfig#{filesync_repeat_interval => SyncInt}, + ok = logger:update_handler_config(?MODULE, config, HConfig2), + + SyncInt = maps:get(filesync_repeat_interval, + logger_disk_log_h:info(?MODULE)), + timer:sleep(WaitT), + HConfig3 = HConfig#{filesync_repeat_interval => no_repeat}, + ok = logger:update_handler_config(?MODULE, config, HConfig3), + check_tracer(100), + ok. +sync(cleanup,_Config) -> + dbg:stop_clear(), + logger:remove_handler(?MODULE). + +disk_log_wrap(Config) -> + Get = fun(Key, PL) -> proplists:get_value(Key, PL) end, + Dir = ?config(priv_dir,Config), + File = filename:join(Dir, ?FUNCTION_NAME), + ct:pal("Log = ~p", [File]), + MaxFiles = 3, + MaxBytes = 5, + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}, + config=> + #{type => wrap, + max_no_files => MaxFiles, + max_no_bytes => MaxBytes, + file => File}}), + Info = disk_log:info(?MODULE), + {File,wrap,{MaxBytes,MaxFiles},1} = + {Get(file,Info),Get(type,Info),Get(size,Info),Get(current_file,Info)}, + Tester = self(), + TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) -> + Pid ! {trace,Mod,Func,Details}, + Pid + end, + {ok,_} = dbg:tracer(process, {TraceFun, Tester}), + {ok,_} = dbg:p(whereis(h_proc_name()), [c]), + {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 3, []), + + Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,MaxBytes)], + ct:pal("String = ~p (~w)", [Text, erts_debug:size(Text)]), + %% fill first file + lists:foreach(fun(N) -> + Log = lists:concat([File,".",N]), + logger:notice(Text, ?domain), + wait_until_written(Log), + ct:pal("N = ~w", + [N = Get(current_file, + disk_log:info(?MODULE))]) + end, lists:seq(1,MaxFiles)), + + %% wait for trace messages + timer:sleep(1000), + dbg:stop_clear(), + Received = lists:flatmap(fun({trace,_M,handle_info, + [_,{disk_log,_Node,_Name,What},_]}) -> + [{trace,What}]; + ({log,_}) -> + [] + end, test_server:messages_get()), + ct:pal("Trace =~n~p", [Received]), + Received = [{trace,{wrap,0}} || _ <- lists:seq(1,MaxFiles-1)], + ok. + +disk_log_wrap(cleanup,_Config) -> + dbg:stop_clear(), + logger:remove_handler(?MODULE). + +disk_log_full(Config) -> + Dir = ?config(priv_dir,Config), + File = filename:join(Dir, ?FUNCTION_NAME), + ct:pal("Log = ~p", [File]), + MaxBytes = 50, + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}, + config=> + #{type => halt, + max_no_files => 1, + max_no_bytes => MaxBytes, + file => File}}), + + Tester = self(), + TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) -> + Pid ! {trace,Mod,Func,Details}, + Pid + end, + {ok,_} = dbg:tracer(process, {TraceFun, Tester}), + {ok,_} = dbg:p(whereis(h_proc_name()), [c]), + {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 3, []), + + NoOfChars = 5, + Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,NoOfChars)], + [logger:notice(Text, ?domain) || _ <- lists:seq(1,trunc(MaxBytes/NoOfChars)+1)], + + %% wait for trace messages + timer:sleep(2000), + dbg:stop_clear(), + Received = lists:flatmap(fun({trace,_M,handle_info, + [_,{disk_log,_Node,_Name,What},_]}) -> + [{trace,What}]; + ({log,_}) -> + [] + end, test_server:messages_get()), + ct:pal("Trace =~n~p", [Received]), + + %% The tail here could be an error_status notification, if the + %% last write was synchronous, but in most cases it will not be + [{trace,full}|_] = Received, + %% [{trace,full}, + %% {trace,{error_status,{error,{full,_}}}}] = Received, + ok. +disk_log_full(cleanup, _Config) -> + dbg:stop_clear(), + logger:remove_handler(?MODULE). + +disk_log_events(Config) -> + Node = node(), + Log = ?MODULE, + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + + %% Events copied from disk_log API + Events = + [{disk_log, Node, Log, {wrap, 0}}, + {disk_log, Node, Log, {truncated, 0}}, + {disk_log, Node, Log, {read_only, 42}}, + {disk_log, Node, Log, {blocked_log, 42}}, + {disk_log, Node, Log, {format_external, 42}}, + {disk_log, Node, Log, full}, + {disk_log, Node, Log, {error_status, ok}}], + + Tester = self(), + TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) -> + Pid ! {trace,Mod,Func,Details}, + Pid + end, + {ok,_} = dbg:tracer(process, {TraceFun, Tester}), + {ok,_} = dbg:p(whereis(h_proc_name()), [c]), + {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 3, []), + + [whereis(h_proc_name()) ! E || E <- Events], + %% wait for trace messages + timer:sleep(2000), + dbg:stop_clear(), + Received = lists:map(fun({trace,_M,handle_info, + [_,Got,_]}) -> Got + end, test_server:messages_get()), + ct:pal("Trace =~n~p", [Received]), + NoOfEvents = length(Events), + NoOfEvents = length(Received), + lists:foreach(fun(Event) -> + true = lists:member(Event, Received) + end, Received), + ok. +disk_log_events(cleanup, _Config) -> + dbg:stop_clear(), + logger:remove_handler(?MODULE). + +write_failure(Config) -> + Dir = ?config(priv_dir, Config), + File = filename:join(Dir, ?FUNCTION_NAME), + Log = lists:concat([File,".1"]), + ct:pal("Log = ~p", [Log]), + + Node = start_h_on_new_node(Config, File), + false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), + rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), + rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), + rpc:call(Node, ?MODULE, set_result, [disk_log_write,ok]), + HState = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]), + ct:pal("LogOpts = ~p", [LogOpts = maps:get(log_opts, + maps:get(handler_state,HState))]), + + %% ?check and ?check_no_log in this test only check for internal log events + ok = log_on_remote_node(Node, "Logged1"), + rpc:call(Node, logger_disk_log_h, filesync, [?STANDARD_HANDLER]), + ?check_no_log, % no internal log when write ok + + SyncRepInt = case (fun() -> is_atom(?FILESYNC_REPEAT_INTERVAL) end)() of + true -> 5500; + false -> ?FILESYNC_REPEAT_INTERVAL + 500 + end, + + try_read_file(Log, {ok,<<"Logged1\n">>}, SyncRepInt), + + rpc:call(Node, ?MODULE, set_result, [disk_log_write,{error,no_such_log}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + + %% this should have caused an internal log + ?check({error,{?STANDARD_HANDLER,log,LogOpts,{error,no_such_log}}}), + + ok = log_on_remote_node(Node, "No second error printout"), + ?check_no_log, % but don't log same error twice + + rpc:call(Node, ?MODULE, set_result, [disk_log_write, + {error,{full,?STANDARD_HANDLER}}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + %% this was a different error, so it should be logged + ?check({error,{?STANDARD_HANDLER,log,LogOpts, + {error,{full,?STANDARD_HANDLER}}}}), + + rpc:call(Node, ?MODULE, set_result, [disk_log_write,ok]), + ok = log_on_remote_node(Node, "Logged2"), + rpc:call(Node, logger_disk_log_h, filesync, [?STANDARD_HANDLER]), + ?check_no_log, % no internal log when write ok + try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, SyncRepInt), + ok. +write_failure(cleanup, _Config) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes]. + + +sync_failure(Config) -> + Dir = ?config(priv_dir, Config), + FileName = lists:concat([?MODULE,"_",?FUNCTION_NAME]), + File = filename:join(Dir, FileName), + + + Node = start_h_on_new_node(Config, File), + false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), + rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), + rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), + rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]), + HState = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]), + LogOpts = maps:get(log_opts, maps:get(handler_state,HState)), + + SyncInt = 500, + ok = rpc:call(Node, logger, update_handler_config, + [?STANDARD_HANDLER, config, + #{filesync_repeat_interval => SyncInt}]), + Info = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]), + SyncInt = maps:get(filesync_repeat_interval, Info), + + ok = log_on_remote_node(Node, "Logged1"), + ?check_no_log, + + rpc:call(Node, ?MODULE, set_result, [disk_log_sync,{error,no_such_log}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + + ?check({error,{?STANDARD_HANDLER,filesync,LogOpts,{error,no_such_log}}}), + + ok = log_on_remote_node(Node, "No second error printout"), + ?check_no_log, + + rpc:call(Node, ?MODULE, set_result, + [disk_log_sync,{error,{blocked_log,?STANDARD_HANDLER}}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + ?check({error,{?STANDARD_HANDLER,filesync,LogOpts, + {error,{blocked_log,?STANDARD_HANDLER}}}}), + + rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]), + ok = log_on_remote_node(Node, "Logged2"), + ?check_no_log, + ok. +sync_failure(cleanup, _Config) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes]. + +start_h_on_new_node(Config, File) -> + {ok,_,Node} = + logger_test_lib:setup( + Config, + [{logger,[{handler,default,logger_disk_log_h, + #{ config => #{ file => File }}}]}]), + ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter, + {?MODULE,nl}]), + Node. + +log_on_remote_node(Node,Msg) -> + _ = spawn_link(Node, + fun() -> erlang:group_leader(whereis(user),self()), + logger:notice(Msg) + end), + ok. + +%% functions for test hook macros to be called by rpc +set_internal_log(_Mod, _Func) -> + ?set_internal_log({_Mod,_Func}). +set_result(_Op, _Result) -> + ?set_result(_Op, _Result). +set_defaults() -> + ?set_defaults(). + +%% internal log function that sends the term to the test case process +internal_log(Type, Term) -> + [{tester,Tester}] = ets:lookup(?TEST_HOOKS_TAB, tester), + Tester ! {log,{Type,Term}}, + logger:internal_log(Type, Term), + ok. + + +%%%----------------------------------------------------------------- +%%% Overload protection tests + +op_switch_to_sync(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NumOfReqs = 500, + NewHConfig = + HConfig#{config => DLHConfig#{sync_mode_qlen => 2, + drop_mode_qlen => NumOfReqs+1, + flush_qlen => 2*NumOfReqs, + burst_limit_enable => false}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + Lines = count_lines(Log), + NumOfReqs = Lines, + ok = file_delete(Log), + ok. +op_switch_to_sync(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +op_switch_to_drop() -> + [{timetrap,{seconds,180}}]. +op_switch_to_drop(Config) -> + Test = + fun() -> + {Log,HConfig,DLHConfig} = + start_handler(?MODULE, ?FUNCTION_NAME, Config), + NumOfReqs = 300, + Procs = 2, + Bursts = 10, + NewHConfig = + HConfig#{config => + DLHConfig#{sync_mode_qlen => 1, + drop_mode_qlen => 2, + flush_qlen => Procs*NumOfReqs*Bursts, + burst_limit_enable => false}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + %% It sometimes happens that the handler either gets + %% the requests in a slow enough pace so that dropping + %% never occurs. Therefore, lets generate a number of + %% bursts to increase the chance of message buildup. + [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice) || + _ <- lists:seq(1, Bursts)], + Logged = count_lines(Log), + ok = stop_handler(?MODULE), + ct:pal("Number of messages dropped = ~w (~w)", + [Procs*NumOfReqs*Bursts-Logged,Procs*NumOfReqs*Bursts]), + true = (Logged < (Procs*NumOfReqs*Bursts)), + true = (Logged > 0), + _ = file_delete(Log), + ok + end, + %% As it's tricky to get the timing right in only one go, we perform the + %% test repeatedly, hoping that will generate a successful result. + case repeat_until_ok(Test, 10) of + {ok,{Failures,_Result}} -> + ct:log("Failed ~w times before success!", [Failures]); + {fails,Reason} -> + ct:fail(Reason) + end. +op_switch_to_drop(cleanup, _Config) -> + _ = stop_handler(?MODULE). + +op_switch_to_flush() -> + [{timetrap,{minutes,3}}]. +op_switch_to_flush(Config) -> + Test = + fun() -> + {Log,HConfig,DLHConfig} = + start_handler(?MODULE, ?FUNCTION_NAME, Config), + + %% NOTE: it's important that both async and sync + %% requests have been queued when the flush happens + %% (verify with coverage of flush_log_requests/2) + + NewHConfig = + HConfig#{config => + DLHConfig#{sync_mode_qlen => 2, + %% disable drop mode + drop_mode_qlen => 300, + flush_qlen => 300, + burst_limit_enable => false}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + NumOfReqs = 1500, + Procs = 10, + Bursts = 10, + %% It sometimes happens that the handler either gets + %% the requests in a slow enough pace so that flushing + %% never occurs, or it gets all messages at once, + %% causing all messages to get flushed (no dropping of + %% sync messages gets tested). Therefore, lets + %% generate a number of bursts to increase the chance + %% of message buildup in some random fashion. + [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice) || + _ <- lists:seq(1,Bursts)], + Logged = count_lines(Log), + ok= stop_handler(?MODULE), + ct:pal("Number of messages flushed/dropped = ~w (~w)", + [NumOfReqs*Procs*Bursts-Logged,NumOfReqs*Procs*Bursts]), + true = (Logged < (NumOfReqs*Procs*Bursts)), + true = (Logged > 0), + _ = file_delete(Log), + ok + end, + %% As it's tricky to get the timing right in only one go, we perform the + %% test repeatedly, hoping that will generate a successful result. + case repeat_until_ok(Test, 10) of + {ok,{Failures,_Result}} -> + ct:log("Failed ~w times before success!", [Failures]); + {fails,Reason} -> + ct:fail(Reason) + end. +op_switch_to_flush(cleanup, _Config) -> + _ = stop_handler(?MODULE). + + +limit_burst_disabled(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{config => DLHConfig#{burst_limit_enable => false, + burst_limit_max_count => 10, + burst_limit_window_time => 2000, + drop_mode_qlen => 200, + flush_qlen => 300}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + NumOfReqs = Logged, + ok = file_delete(Log), + ok. +limit_burst_disabled(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +limit_burst_enabled_one(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + ReqLimit = 10, + NewHConfig = + HConfig#{config => DLHConfig#{burst_limit_enable => true, + burst_limit_max_count => ReqLimit, + burst_limit_window_time => 2000, + drop_mode_qlen => 200, + flush_qlen => 300}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + ReqLimit = Logged, + ok = file_delete(Log), + ok. +limit_burst_enabled_one(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +limit_burst_enabled_period(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + ReqLimit = 10, + BurstTWin = 1000, + NewHConfig = + HConfig#{config => DLHConfig#{burst_limit_enable => true, + burst_limit_max_count => ReqLimit, + burst_limit_window_time => BurstTWin, + drop_mode_qlen => 20000, + flush_qlen => 20001}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + + Windows = 3, + Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, notice), + Logged = count_lines(Log), + ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w", + [Sent,Logged]), + true = (Logged > (ReqLimit*Windows)) andalso + (Logged < (ReqLimit*(Windows+2))), + ok = file_delete(Log), + ok. +limit_burst_enabled_period(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +kill_disabled(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{config=>DLHConfig#{overload_kill_enable=>false, + overload_kill_qlen=>10, + overload_kill_mem_size=>100}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + ok = file_delete(Log), + true = is_pid(whereis(h_proc_name())), + ok. +kill_disabled(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +qlen_kill_new(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + Pid0 = whereis(h_proc_name()), + {_,Mem0} = process_info(Pid0, memory), + RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER, + NewHConfig = + HConfig#{config => + DLHConfig#{overload_kill_enable=>true, + overload_kill_qlen=>10, + overload_kill_mem_size=>Mem0+50000, + overload_kill_restart_after=>RestartAfter}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + MRef = erlang:monitor(process, Pid0), + NumOfReqs = 100, + Procs = 4, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice), + %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + receive + {'DOWN', MRef, _, _, Info} -> + case Info of + {shutdown,{overloaded,?MODULE,QLen,Mem}} -> + ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]); + killed -> + ct:pal("Slow shutdown, handler process was killed!", []) + end, + file_delete(Log), + {ok,_} = wait_for_process_up(RestartAfter * 3), + ok + after + 5000 -> + Info = logger_disk_log_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info]), + ct:fail("Handler not dead! It should not have survived this!") + end. +qlen_kill_new(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +mem_kill_new(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + Pid0 = whereis(h_proc_name()), + {_,Mem0} = process_info(Pid0, memory), + RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER, + NewHConfig = + HConfig#{config => + DLHConfig#{overload_kill_enable=>true, + overload_kill_qlen=>50000, + overload_kill_mem_size=>Mem0+500, + overload_kill_restart_after=>RestartAfter}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + MRef = erlang:monitor(process, Pid0), + NumOfReqs = 100, + Procs = 4, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice), + %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + receive + {'DOWN', MRef, _, _, Info} -> + case Info of + {shutdown,{overloaded,?MODULE,QLen,Mem}} -> + ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]); + killed -> + ct:pal("Slow shutdown, handler process was killed!", []) + end, + file_delete(Log), + {ok,_} = wait_for_process_up(RestartAfter * 3), + ok + after + 5000 -> + Info = logger_disk_log_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info]), + ct:fail("Handler not dead! It should not have survived this!") + end. +mem_kill_new(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +restart_after() -> + [{timetrap,{minutes,2}}]. +restart_after(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig1 = + HConfig#{config=>DLHConfig#{overload_kill_enable=>true, + overload_kill_qlen=>10, + overload_kill_restart_after=>infinity}}, + ok = logger:update_handler_config(?MODULE, NewHConfig1), + MRef1 = erlang:monitor(process, whereis(h_proc_name())), + %% kill handler + send_burst({n,100}, {spawn,4,0}, {chars,79}, notice), + receive + {'DOWN', MRef1, _, _, _Reason1} -> + file_delete(Log), + error = wait_for_process_up(?OVERLOAD_KILL_RESTART_AFTER * 3), + ok + after + 5000 -> + Info1 = logger_std_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info1]), + ct:fail("Handler not dead! It should not have survived this!") + end, + + {Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER, + NewHConfig2 = + HConfig#{config=>DLHConfig#{overload_kill_enable=>true, + overload_kill_qlen=>10, + overload_kill_restart_after=>RestartAfter}}, + ok = logger:update_handler_config(?MODULE, NewHConfig2), + Pid0 = whereis(h_proc_name()), + MRef2 = erlang:monitor(process, Pid0), + %% kill handler + send_burst({n,100}, {spawn,4,0}, {chars,79}, notice), + receive + {'DOWN', MRef2, _, _, _Reason2} -> + file_delete(Log), + {ok,Pid1} = wait_for_process_up(RestartAfter * 3), + false = (Pid1 == Pid0), + ok + after + 5000 -> + Info2 = logger_std_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info2]), + ct:fail("Handler not dead! It should not have survived this!") + end, + ok. +restart_after(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +%% send handler requests (sync, info, reset, change_config) +%% during high load to verify that sync, dropping and flushing is +%% handled correctly. +handler_requests_under_load() -> + [{timetrap,{minutes,5}}]. +handler_requests_under_load(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{config => DLHConfig#{sync_mode_qlen => 2, + drop_mode_qlen => 1000, + flush_qlen => 2000, + burst_limit_enable => false}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]}, + {info,[]}, + {reset,[]}, + {change_config,[]}]) + end), + Procs = 100, + Sent = Procs * send_burst({n,5000}, {spawn,Procs,10}, {chars,79}, notice), + Pid ! {self(),finish}, + ReqResult = receive {Pid,Result} -> Result end, + Logged = count_lines(Log), + ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w", + [Sent,Logged]), + FindError = fun(Res) -> + [E || E <- Res, + is_tuple(E) andalso (element(1,E) == error)] + end, + Errors = [{Req,FindError(Res)} || {Req,Res} <- ReqResult], + NoOfReqs = lists:foldl(fun({_,Res}, N) -> N + length(Res) end, 0, ReqResult), + ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]), + ok = file_delete(Log). +handler_requests_under_load(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) -> + receive + {From,finish} -> + From ! {self(),Reqs} + after + TO -> + Result = + case Req of + change_config -> + logger:update_handler_config(HName, logger_disk_log_h, + #{overload_kill_enable => + false}); + Func -> + logger_disk_log_h:Func(HName) + end, + send_requests(HName, TO, Rs ++ [{Req,[Result|Res]}]) + end. + +%%%----------------------------------------------------------------- +%%% +start_handler(Name, FuncName, Config) -> + Dir = ?config(priv_dir,Config), + File = filename:join(Dir, FuncName), + ct:pal("Logging to ~tp", [File]), + FullFile = lists:concat([File,".1"]), + _ = file_delete(FullFile), + ok = logger:add_handler(Name, + logger_disk_log_h, + #{config=>#{file => File, + max_no_files => 1, + max_no_bytes => 100000000}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([Name]), + formatter=>{?MODULE,op}}), + {ok,HConfig = #{config := DLHConfig}} = logger:get_handler_config(Name), + {FullFile,HConfig,DLHConfig}. + +stop_handler(Name) -> + ct:pal("Stopping handler ~p!", [Name]), + logger:remove_handler(Name). + +send_burst(NorT, Type, {chars,Sz}, Class) -> + Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,Sz)], + case NorT of + {n,N} -> + %% process_flag(priority, high), + send_n_burst(N, Type, Text, Class), + %% process_flag(priority, normal), + N; + {t,T} -> + ct:pal("Sending messages sequentially for ~w ms", [T]), + T0 = erlang:monotonic_time(millisecond), + send_t_burst(T0, T, Text, Class, 0) + end. + +send_n_burst(0, _, _Text, _Class) -> + ok; +send_n_burst(N, seq, Text, Class) -> + ok = logger:Class(Text, ?domain), + send_n_burst(N-1, seq, Text, Class); +send_n_burst(N, {spawn,Ps,TO}, Text, Class) -> + ct:pal("~w processes each sending ~w messages", [Ps,N]), + MRefs = [begin if TO == 0 -> ok; true -> timer:sleep(TO) end, + monitor(process,spawn_link(per_proc_fun(N,Text,Class,X))) + end || X <- lists:seq(1,Ps)], + lists:foreach(fun(MRef) -> + receive + {'DOWN', MRef, _, _, _} -> + ok + end + end, MRefs), + ct:pal("Message burst sent", []), + ok. + +send_t_burst(T0, T, Text, Class, N) -> + T1 = erlang:monotonic_time(millisecond), + if (T1-T0) > T -> + N; + true -> + ok = logger:Class(Text, ?domain), + send_t_burst(T0, T, Text, Class, N+1) + end. + +per_proc_fun(N,Text,Class,X) when X rem 2 == 0 -> + fun() -> + process_flag(priority,high), + send_n_burst(N, seq, Text, Class) + end; +per_proc_fun(N,Text,Class,_) -> + fun() -> + send_n_burst(N, seq, Text, Class) + end. + +%%%----------------------------------------------------------------- +%%% Formatter callback +%%% Using this to send the formatted string back to the test case +%%% process - so it can check for logged events. +format(_,bad_return) -> + bad_return; +format(_,crash) -> + erlang:error(formatter_crashed); +format(#{msg:={report,R},meta:=#{report_cb:=Fun}}=Log,Config) -> + format(Log#{msg=>Fun(R)},Config); +format(#{msg:={string,String0}},no_nl) -> + String = unicode:characters_to_list(String0), + String; +format(#{msg:={string,String0}},nl) -> + String = unicode:characters_to_list(String0), + String++"\n"; +format(#{msg:={string,String0}},op) -> + String = unicode:characters_to_list(String0), + String++"\n"; +format(#{msg:={report,#{label:={supervisor,progress}}}},op) -> + ""; +format(#{msg:={report,#{label:={gen_server,terminate}}}},op) -> + ""; +format(#{msg:={report,#{label:={proc_lib,crash}}}},op) -> + ""; +format(#{msg:={F,A}},OpOrPid) when is_list(F), is_list(A) -> + String = lists:flatten(io_lib:format(F,A)), + if is_pid(OpOrPid) -> OpOrPid ! {log,String}; + true -> ok + end, + String++"\n"; +format(#{msg:={string,String0}},Pid) -> + String = unicode:characters_to_list(String0), + Pid ! {log,String}, + String++"\n"; +format(Msg,Tag) -> + Error = {unexpected_format,Msg,Tag}, + erlang:display(Error), + exit(Error). + +remove(Handler, LogName) -> + logger_disk_log_h:remove(Handler, LogName), + HState = #{log_names := Logs} = logger_disk_log_h:info(), + false = maps:is_key(LogName, HState), + false = lists:member(LogName, Logs), + false = logger_config:exist(?LOGGER_TABLE, LogName), + {error,no_such_log} = disk_log:info(LogName), + ok. + +start_and_add(Name, Config, LogOpts) -> + HConfig = maps:get(config, Config, #{}), + HConfig1 = maps:merge(HConfig, LogOpts), + Config1 = Config#{config=>HConfig1}, + ct:pal("Adding handler ~w with: ~p", [Name,Config1]), + ok = logger:add_handler(Name, logger_disk_log_h, Config1), + Pid = whereis(h_proc_name(Name)), + true = is_pid(Pid), + Name = proplists:get_value(name, disk_log:info(Name)), + ok. + +remove_and_stop(Handler) -> + ok = logger:remove_handler(Handler), + timer:sleep(500), + undefined = whereis(h_proc_name(Handler)), + ok. + +try_read_file(FileName, Expected, Time) -> + try_read_file(FileName, Expected, Time, undefined). + +try_read_file(FileName, Expected, Time, _) when Time > 0 -> + case file:read_file(FileName) of + Expected -> + ok; + Error = {error,_Reason} -> + erlang:error(Error); + SomethingElse -> + ct:pal("try_read_file read unexpected: ~p~n", [SomethingElse]), + timer:sleep(500), + try_read_file(FileName, Expected, Time-500, SomethingElse) + end; + +try_read_file(_, _, _, Incorrect) -> + ct:pal("try_read_file got incorrect pattern: ~p~n", [Incorrect]), + erlang:error({error,not_matching_pattern,Incorrect}). + +try_match_file(FileName, Pattern, Time) -> + try_match_file(FileName, Pattern, Time, <<>>). + +try_match_file(FileName, Pattern, Time, _) when Time > 0 -> + case file:read_file(FileName) of + {ok, Bin} -> + case re:run(Bin,Pattern,[{capture,none}]) of + match -> + unicode:characters_to_list(Bin); + _ -> + timer:sleep(100), + try_match_file(FileName, Pattern, Time-100, Bin) + end; + Error -> + erlang:error(Error) + end; +try_match_file(_,Pattern,_,Incorrect) -> + ct:pal("try_match_file did not match pattern: ~p~nGot: ~p~n", + [Pattern,Incorrect]), + erlang:error({error,not_matching_pattern,Pattern,Incorrect}). + +count_lines(File) -> + wait_until_written(File), + count_lines1(File). + +wait_until_written(File) -> + wait_until_written(File, -1). + +wait_until_written(File, Sz) -> + timer:sleep(2000), + case file:read_file_info(File) of + {ok,#file_info{size = Sz}} -> + timer:sleep(1000), + case file:read_file_info(File) of + {ok,#file_info{size = Sz}} -> + ok; + {ok,#file_info{size = Sz1}} -> + wait_until_written(File, Sz1) + end; + {ok,#file_info{size = Sz1}} -> + wait_until_written(File, Sz1) + end. + +count_lines1(File) -> + {_,Dev} = file:open(File, [read]), + Lines = count_lines2(Dev, 0), + file:close(Dev), + Lines. + +count_lines2(Dev, LC) -> + case file:read_line(Dev) of + {ok,"Handler logger_disk_log_h_SUITE " ++_} -> + %% Not counting handler info + count_lines2(Dev,LC); + {ok,_} -> + count_lines2(Dev,LC+1); + eof -> LC + end. + +repeat_until_ok(Fun, N) -> + repeat_until_ok(Fun, 0, N, undefined). + +repeat_until_ok(_Fun, Stop, Stop, Reason) -> + {fails,Reason}; + +repeat_until_ok(Fun, C, Stop, FirstReason) -> + if C > 0 -> timer:sleep(5000); + true -> ok + end, + try Fun() of + Result -> + {ok,{C,Result}} + catch + _:Reason:Stack -> + ct:pal("Test fails: ~p (~p)~n", [Reason,hd(Stack)]), + if FirstReason == undefined -> + repeat_until_ok(Fun, C+1, Stop, {Reason,Stack}); + true -> + repeat_until_ok(Fun, C+1, Stop, FirstReason) + end + end. + +start_tracer(Trace,Expected) -> + Pid = self(), + dbg:tracer(process,{fun tracer/2,{Pid,Expected}}), + dbg:p(h_proc_name(),[c]), + tpl(Trace), + ok. + +tpl([{M,F,A}|Trace]) -> + {ok,Match} = dbg:tpl(M,F,A,c), + case lists:keyfind(matched,1,Match) of + {_,_,1} -> + ok; + _ -> + dbg:stop_clear(), + throw({skip,"Can't trace "++atom_to_list(M)++":"++ + atom_to_list(F)++"/"++integer_to_list(A)}) + end, + tpl(Trace); +tpl([]) -> + ok. + +tracer({trace,_,call,{logger_h_common,handle_cast,[Op|_]},Caller}, + {Pid,[{Mod,Func,Op}|Expected]}) -> + maybe_tracer_done(Pid,Expected,{Mod,Func,Op},Caller); +tracer({trace,_,call,{Mod=logger_disk_log_h,Func=disk_log_write,[_,_,Data]},Caller}, {Pid,[{Mod,Func,Data}|Expected]}) -> + maybe_tracer_done(Pid,Expected,{Mod,Func,Data},Caller); +tracer({trace,_,call,{Mod,Func,_},Caller}, {Pid,[{Mod,Func}|Expected]}) -> + maybe_tracer_done(Pid,Expected,{Mod,Func},Caller); +tracer({trace,_,call,Call,Caller}, {Pid,Expected}) -> + ct:log("Tracer got unexpected: ~p~nCaller: ~p~nExpected: ~p~n",[Call,Caller,Expected]), + Pid ! {tracer_got_unexpected,Call,Expected}, + {Pid,Expected}. + +maybe_tracer_done(Pid,[],Got,Caller) -> + ct:log("Tracer got: ~p~nCaller: ~p~n",[Got,Caller]), + Pid ! tracer_done; +maybe_tracer_done(Pid,Expected,Got,Caller) -> + ct:log("Tracer got: ~p~nCaller: ~p~n",[Got,Caller]), + {Pid,Expected}. + +check_tracer(T) -> + receive + tracer_done -> + dbg:stop_clear(), + ok; + {tracer_got_unexpected,Got,Expected} -> + dbg:stop_clear(), + ct:fail({tracer_got_unexpected,Got,Expected}) + after T -> + dbg:stop_clear(), + ct:fail({timeout,tracer}) + end. + +escape([$+|Rest]) -> + [$\\,$+|escape(Rest)]; +escape([H|T]) -> + [H|escape(T)]; +escape([]) -> + []. + +h_proc_name() -> + h_proc_name(?MODULE). +h_proc_name(Name) -> + list_to_atom(lists:concat([logger_disk_log_h,"_",Name])). + +wait_for_process_up(T) -> + wait_for_process_up(?MODULE, h_proc_name(), T). + +wait_for_process_up(Name, RegName, T) -> + N = (T div 500) + 1, + wait_for_process_up1(Name, RegName, N). + +wait_for_process_up1(_Name, _RegName, 0) -> + error; +wait_for_process_up1(Name, RegName, N) -> + timer:sleep(500), + case whereis(RegName) of + Pid when is_pid(Pid) -> + case logger:get_handler_config(Name) of + {ok,_} -> + %% ct:pal("Process ~p up (~p tries left)",[Name,N]), + {ok,Pid}; + _ -> + wait_for_process_up1(Name, RegName, N-1) + end; + undefined -> + %% ct:pal("Waiting for process ~p (~p tries left)",[Name,N]), + wait_for_process_up1(Name, RegName, N-1) + end. + +file_delete(Log) -> + file:delete(Log). diff --git a/lib/kernel/test/logger_env_var_SUITE.erl b/lib/kernel/test/logger_env_var_SUITE.erl new file mode 100644 index 0000000000..e8d1a313dc --- /dev/null +++ b/lib/kernel/test/logger_env_var_SUITE.erl @@ -0,0 +1,683 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_env_var_SUITE). + +-compile(export_all). + +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). + +-import(logger_test_lib,[setup/2,log/3,sync_and_read/3]). + +suite() -> + [{timetrap,{seconds,60}}, + {ct_hooks,[logger_test_lib]}]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + ok. + +groups() -> + [{error_logger,[],[error_logger_tty, + error_logger_tty_sasl_compatible, + error_logger_false, + error_logger_false_progress, + error_logger_false_sasl_compatible, + error_logger_silent, + error_logger_silent_sasl_compatible, + error_logger_file]}, + {logger,[],[logger_file, + logger_file_sasl_compatible, + logger_file_log_progress, + logger_file_no_filter, + logger_file_no_filter_level, + logger_file_formatter, + logger_filters, + logger_filters_stop, + logger_module_level, + logger_disk_log, + logger_disk_log_formatter, + logger_undefined, + logger_many_handlers_default_first, + logger_many_handlers_default_last, + logger_many_handlers_default_last_broken_filter + ]}, + {bad,[],[bad_error_logger, + bad_level, + bad_sasl_compatibility]}]. + +all() -> + [default, + default_sasl_compatible, + sasl_compatible_false, + sasl_compatible_false_no_progress, + sasl_compatible, + all_logger_level, + {group,bad}, + {group,error_logger}, + {group,logger} + ]. + +default(Config) -> + {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},_Node} = setup(Config,[]), + notice = maps:get(level,P), + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters), + false = exists(simple,Hs), + false = exists(sasl,Hs), + [] = ML, + ok. + +default_sasl_compatible(Config) -> + {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},_Node} = + setup(Config,[{logger_sasl_compatible,true}]), + info = maps:get(level,P), + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[otp]}}} = lists:keyfind(domain,1,StdFilters), + false = exists(simple,Hs), + true = exists(sasl,Hs), + [] = ML, + ok. + +error_logger_tty(Config) -> + {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},_Node} = + setup(Config,[{error_logger,tty}]), + notice = maps:get(level,P), + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters), + false = exists(simple,Hs), + false = exists(sasl,Hs), + [] = ML, + ok. + +error_logger_tty_sasl_compatible(Config) -> + {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},_Node} = + setup(Config, + [{error_logger,tty}, + {logger_sasl_compatible,true}]), + info = maps:get(level,P), + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[otp]}}} = lists:keyfind(domain,1,StdFilters), + false = exists(simple,Hs), + true = exists(sasl,Hs), + [] = ML, + ok. + +error_logger_false(Config) -> + {ok,#{handlers:=Hs,primary:=P,module_levels:=ML},_Node} = + setup(Config, + [{error_logger,false}, + {logger_level,notice}]), + false = exists(?STANDARD_HANDLER,Hs), + #{module:=logger_simple_h} = SimpleC = find(simple,Hs), + all = maps:get(level,SimpleC), + notice = maps:get(level,P), + SimpleFilters = maps:get(filters,SimpleC), + {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,SimpleFilters), + false = exists(sasl,Hs), + [] = ML, + ok. + +error_logger_false_progress(Config) -> + {ok,#{handlers:=Hs,primary:=P,module_levels:=ML},_Node} = + setup(Config, + [{error_logger,false}, + {logger_level,notice}]), + false = exists(?STANDARD_HANDLER,Hs), + #{module:=logger_simple_h} = SimpleC = find(simple,Hs), + all = maps:get(level,SimpleC), + notice = maps:get(level,P), + SimpleFilters = maps:get(filters,SimpleC), + {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,SimpleFilters), + false = exists(sasl,Hs), + [] = ML, + ok. + +error_logger_false_sasl_compatible(Config) -> + {ok,#{handlers:=Hs,primary:=P,module_levels:=ML},_Node} = + setup(Config, + [{error_logger,false}, + {logger_level,notice}, + {logger_sasl_compatible,true}]), + false = exists(?STANDARD_HANDLER,Hs), + #{module:=logger_simple_h} = SimpleC = find(simple,Hs), + all = maps:get(level,SimpleC), + info = maps:get(level,P), + SimpleFilters = maps:get(filters,SimpleC), + {domain,{_,{log,super,[otp]}}} = lists:keyfind(domain,1,SimpleFilters), + true = exists(sasl,Hs), + [] = ML, + ok. + +error_logger_silent(Config) -> + {ok,#{handlers:=Hs},_Node} = setup(Config, + [{error_logger,silent}]), + false = exists(?STANDARD_HANDLER,Hs), + false = exists(simple,Hs), + false = exists(sasl,Hs), + ok. + +error_logger_silent_sasl_compatible(Config) -> + {ok,#{handlers:=Hs},_Node} = setup(Config, + [{error_logger,silent}, + {logger_sasl_compatible,true}]), + false = exists(?STANDARD_HANDLER,Hs), + false = exists(simple,Hs), + true = exists(sasl,Hs), + ok. + + +error_logger_file(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,_,Node} = setup(Config, + [{error_logger,{file,Log}}]), + check_default_log(Node,Log, + file,% dest + 0),% progress in std logger + ok. + + +logger_file(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{config=>#{type=>{file,Log}}}}]}]), + check_default_log(Node,Log, + file,% dest + 0),% progress in std logger + + notice = maps:get(level,P), + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters), + false = exists(simple,Hs), + false = exists(sasl,Hs), + [] = ML, + ok. + +logger_file_sasl_compatible(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},Node} + = setup(Config, + [{logger_sasl_compatible,true}, + {logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{config=>#{type=>{file,Log}}}}]}]), + check_default_log(Node,Log, + file,% dest + 0),% progress in std logger + + info = maps:get(level,P), + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[otp]}}} = lists:keyfind(domain,1,StdFilters), + false = exists(simple,Hs), + true = exists(sasl,Hs), + [] = ML, + ok. + +logger_file_log_progress(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},Node} + = setup(Config, + [{logger_level,info}, + {logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{config=>#{type=>{file,Log}}}}]}]), + check_default_log(Node,Log, + file,% dest + 6,% progress in std logger + info), + + info = maps:get(level,P), + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters), + false = exists(simple,Hs), + false = exists(sasl,Hs), + [] = ML, + ok. + +logger_file_no_filter(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{filter_default=>log,filters=>[], + config=>#{type=>{file,Log}}}}]}]), + check_default_log(Node,Log, + file,% dest + 6),% progress in std logger + + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + [] = maps:get(filters,StdC), + false = exists(simple,Hs), + false = exists(sasl,Hs), + + ok. + +logger_file_no_filter_level(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{filters=>[],level=>error, + config=>#{type=>{file,Log}}}}]}]), + check_default_log(Node,Log, + file,% dest + 0,% progress in std logger + error),% level + + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + error = maps:get(level,StdC), + [] = maps:get(filters,StdC), + false = exists(simple,Hs), + false = exists(sasl,Hs), + + ok. + +logger_file_formatter(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{filters=>[], + formatter=>{logger_formatter,#{}}, + config=>#{type=>{file,Log}}}}]}]), + check_single_log(Node,Log, + file,% dest + 6),% progress in std logger + + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + [] = maps:get(filters,StdC), + false = exists(simple,Hs), + false = exists(sasl,Hs), + + ok. + +logger_filters(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs,primary:=P},Node} + = setup(Config, + [{logger_level,info}, + {logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{config=>#{type=>{file,Log}}}}, + {filters,log,[{stop_progress,{fun logger_filters:progress/2,stop}}]} + ]}]), + check_default_log(Node,Log, + file,% dest + 0,% progress in std logger + info), + + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters), + false = exists(simple,Hs), + false = exists(sasl,Hs), + LoggerFilters = maps:get(filters,P), + true = lists:keymember(stop_progress,1,LoggerFilters), + + ok. + +logger_filters_stop(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs,primary:=P},Node} + = setup(Config, + [{logger_level,info}, + {logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{filters=>[], + config=>#{type=>{file,Log}}}}, + {filters,stop,[{log_error,{fun logger_filters:level/2,{log,gt,info}}}]} + ]}]), + check_default_log(Node,Log, + file,% dest + 0,% progress in std logger + info), + + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + [] = maps:get(filters,StdC), + false = exists(simple,Hs), + false = exists(sasl,Hs), + LoggerFilters = maps:get(filters,P), + true = lists:keymember(log_error,1,LoggerFilters), + + ok. + +logger_module_level(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs,module_levels:=ModuleLevels},Node} + = setup(Config, + [{logger_level,info}, + {logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{config=>#{type=>{file,Log}}}}, + {module_level,error,[supervisor]} + ]}]), + check_default_log(Node,Log, + file,% dest + 3,% progress in std logger + info), + + #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters), + false = exists(simple,Hs), + false = exists(sasl,Hs), + [{supervisor,error}] = ModuleLevels, + ok. + +logger_disk_log(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_disk_log_h, + #{config=>#{file=>Log}}}]}]), + check_default_log(Node,Log, + disk_log,% dest + 0),% progress in std logger + + #{module:=logger_disk_log_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters), + false = exists(simple,Hs), + false = exists(sasl,Hs), + + ok. + +logger_disk_log_formatter(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,#{handlers:=Hs},Node} + = setup(Config, + [{logger, + [{handler,?STANDARD_HANDLER,logger_disk_log_h, + #{filters=>[], + formatter=>{logger_formatter,#{}}, + config=>#{file=>Log}}}]}]), + check_single_log(Node,Log, + disk_log,% dest + 6),% progress in std logger + + #{module:=logger_disk_log_h} = StdC = find(?STANDARD_HANDLER,Hs), + all = maps:get(level,StdC), + [] = maps:get(filters,StdC), + false = exists(simple,Hs), + false = exists(sasl,Hs), + + ok. + +logger_undefined(Config) -> + {ok,#{handlers:=Hs,primary:=P},_Node} = + setup(Config,[{logger,[{handler,?STANDARD_HANDLER,undefined}]}]), + false = exists(?STANDARD_HANDLER,Hs), + #{module:=logger_simple_h} = SimpleC = find(simple,Hs), + all = maps:get(level,SimpleC), + notice = maps:get(level,P), + SimpleFilters = maps:get(filters,SimpleC), + {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,SimpleFilters), + false = exists(sasl,Hs), + ok. + + +%% Test that we can add multiple handlers with the default first +logger_many_handlers_default_first(Config) -> + LogErr = file(Config,logger_many_handlers_default_first_error), + LogInfo = file(Config,logger_many_handlers_default_first_info), + + logger_many_handlers( + Config,[{logger, + [{handler,?STANDARD_HANDLER,logger_std_h, + #{level=>error, + filters=>[], + formatter=>{logger_formatter,#{}}, + config=>#{type=>{file,LogErr}}} + }, + {handler,info,logger_std_h, + #{level=>info, + filters=>[{level,{fun logger_filters:level/2,{stop,gteq,error}}}], + config=>#{type=>{file,LogInfo}}} + } + ]}, + {logger_level,info}], LogErr, LogInfo, 6). + +%% Test that we can add multiple handlers with the default last +logger_many_handlers_default_last(Config) -> + LogErr = file(Config,logger_many_handlers_default_last_error), + LogInfo = file(Config,logger_many_handlers_default_last_info), + logger_many_handlers( + Config,[{logger, + [{handler,info,logger_std_h, + #{level=>info, + filters=>[{level,{fun logger_filters:level/2,{stop,gteq,error}}}], + config=>#{type=>{file,LogInfo}}} + }, + {handler,?STANDARD_HANDLER,logger_std_h, + #{level=>error, + filters=>[], + formatter=>{logger_formatter,#{}}, + config=>#{type=>{file,LogErr}}} + } + ]}, + {logger_level,info}], LogErr, LogInfo, 7). + +%% Check that we can handle that an added logger has a broken filter +%% This used to cause a deadlock. +logger_many_handlers_default_last_broken_filter(Config) -> + LogErr = file(Config,logger_many_handlers_default_first_broken_filter_error), + LogInfo = file(Config,logger_many_handlers_default_first_broken_filter_info), + + logger_many_handlers( + Config,[{logger, + [{handler,info,logger_std_h, + #{level=>info, + filters=>[{broken,{fun logger_filters:level/2,broken_state}}, + {level,{fun logger_filters:level/2,{stop,gteq,error}}}], + config=>#{type=>{file,LogInfo}}} + }, + {handler,?STANDARD_HANDLER,logger_std_h, + #{level=>error, + filters=>[], + formatter=>{logger_formatter,#{}}, + config=>#{type=>{file,LogErr}}} + } + ]}, + {logger_level,info}], LogErr, LogInfo, 7). + +logger_many_handlers(Config, Env, LogErr, LogInfo, NumProgress) -> + {ok,_,Node} = setup(Config,Env), + check_single_log(Node,LogErr, + file,% dest + 0,% progress in std logger + error), % level + ok = rpc:call(Node,logger_std_h,filesync,[info]), + {ok, Bin} = file:read_file(LogInfo), + ct:log("Log content:~n~s",[Bin]), + match(Bin,<<"info:">>,NumProgress,info,info), + match(Bin,<<"notice:">>,1,notice,info), + match(Bin,<<"alert:">>,0,alert,info), + + ok. + +sasl_compatible_false(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,_,Node} = setup(Config, + [{error_logger,{file,Log}}, + {logger_sasl_compatible,false}, + {logger_level,info}]), % to get progress + check_default_log(Node,Log, + file,% dest + 6,% progress in std logger + info), + ok. + +sasl_compatible_false_no_progress(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,_,Node} = setup(Config, + [{error_logger,{file,Log}}, + {logger_sasl_compatible,false}]), + check_default_log(Node,Log, + file,% dest + 0),% progress in std logger + ok. + +sasl_compatible(Config) -> + Log = file(Config,?FUNCTION_NAME), + {ok,_,Node} = setup(Config, + [{error_logger,{file,Log}}, + {sasl_compatible,true}]), + check_default_log(Node,Log, + file,% dest + 0),% progress in std logger + ok. + +all_logger_level(Config) -> + [all_logger_level(Config,Level) || Level <- [none, + emergency, + alert, + critical, + error, + warning, + notice, + info, + debug, + all]], + ok. + +all_logger_level(Config,Level) -> + {ok,#{primary:=#{level:=Level}},Node} = setup(Config,[{logger_level,Level}]), + true = test_server:stop_node(Node), + ok. + +bad_error_logger(Config) -> + error = setup(Config,[{error_logger,baddest}]). + +bad_level(Config) -> + error = setup(Config,[{logger_level,badlevel}]). + +bad_sasl_compatibility(Config) -> + error = setup(Config,[{logger_sasl_compatible,badcomp}]). + +%%%----------------------------------------------------------------- +%%% Internal +file(Config,Func) -> + filename:join(proplists:get_value(priv_dir,Config), + lists:concat([Func,".log"])). + +check_default_log(Node,Log,Dest,NumProgress) -> + check_default_log(Node,Log,Dest,NumProgress,notice). +check_default_log(Node,Log,Dest,NumProgress,Level) -> + + {ok,Bin1,Bin2} = check_log(Node,Log,Dest), + + match(Bin1,<<"PROGRESS REPORT">>,NumProgress,info,Level), + match(Bin1,<<"ALERT REPORT">>,1,alert,Level), + match(Bin1,<<"INFO REPORT">>,0,notice,Level), + match(Bin1,<<"DEBUG REPORT">>,0,debug,Level), + + match(Bin2,<<"INFO REPORT">>,1,notice,Level), + match(Bin2,<<"DEBUG REPORT">>,0,debug,Level), + ok. + +check_single_log(Node,Log,Dest,NumProgress) -> + check_single_log(Node,Log,Dest,NumProgress,notice). +check_single_log(Node,Log,Dest,NumProgress,Level) -> + + {ok,Bin1,Bin2} = check_log(Node,Log,Dest), + + match(Bin1,<<"info:">>,NumProgress,info,Level), + match(Bin1,<<"alert:">>,1,alert,Level), + match(Bin1,<<"debug:">>,0,debug,Level), + + match(Bin2,<<"info:">>,NumProgress+1,info,Level), + match(Bin2,<<"debug:">>,0,debug,Level), + + ok. + +check_log(Node,Log,Dest) -> + + ok = log(Node,alert,["dummy1"]), + ok = log(Node,debug,["dummy1"]), + + %% Check that there are progress reports (supervisor and + %% application_controller) and an error report (the call above) in + %% the log. There should not be any info reports yet. + {ok,Bin1} = sync_and_read(Node,Dest,Log), + ct:log("Log content:~n~s",[Bin1]), + + %% Then stop sasl and see that the info report from + %% application_controller is there + ok = rpc:call(Node,application,stop,[sasl]), + {ok,Bin2} = sync_and_read(Node,Dest,Log), + ct:log("Log content:~n~s",[Bin2]), + {ok,Bin1,Bin2}. + +match(Bin,Pattern,0,_,_) -> + nomatch = re:run(Bin,Pattern,[{capture,none}]); +match(Bin,Pattern,N,LogLevel,ConfLevel) -> + case logger:compare_levels(LogLevel,ConfLevel) of + lt -> match(Bin,Pattern,0,LogLevel,ConfLevel); + _ -> + {match,M} = re:run(Bin,Pattern,[{capture,all},global]), + N = length(M) + end. + +find(Id,Handlers) -> + case lists:search(fun(#{id:=Id0}) when Id0=:=Id-> true; + (_) -> false end, + Handlers) of + {value,Config} -> + Config; + false -> + false + end. + +exists(Id,Handlers) -> + case find(Id,Handlers) of + false -> + false; + _ -> + true + end. diff --git a/lib/kernel/test/logger_filters_SUITE.erl b/lib/kernel/test/logger_filters_SUITE.erl new file mode 100644 index 0000000000..11cce8fd20 --- /dev/null +++ b/lib/kernel/test/logger_filters_SUITE.erl @@ -0,0 +1,227 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_filters_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). + +-define(ndlog, + #{level=>info,msg=>{"Line: ~p",[?LINE]},meta=>#{}}). +-define(dlog(Domain), + #{level=>info,msg=>{"Line: ~p",[?LINE]},meta=>#{domain=>Domain}}). +-define(llog(Level), + #{level=>Level,msg=>{"Line: ~p",[?LINE]},meta=>#{}}). +-define(plog, + #{level=>info, + msg=>{report,#{label=>{?MODULE,progress}}}, + meta=>#{line=>?LINE}}). +-define(rlog(Node), + #{level=>info, + msg=>{"Line: ~p",[?LINE]}, + meta=>#{gl=>rpc:call(Node,erlang,whereis,[user])}}). + +-define(TRY(X), my_try(fun() -> X end)). + +suite() -> + [{timetrap,{seconds,30}}]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [domain, + level, + progress, + remote_gl]. + +domain(_Config) -> + L1 = logger_filters:domain(L1=?dlog([]),{log,super,[]}), + stop = logger_filters:domain(?dlog([]),{stop,super,[]}), + L2 = logger_filters:domain(L2=?dlog([]),{log,sub,[]}), + stop = logger_filters:domain(?dlog([]),{stop,sub,[]}), + L3 = logger_filters:domain(L3=?dlog([]),{log,equal,[]}), + stop = logger_filters:domain(?dlog([]),{stop,equal,[]}), + ignore = logger_filters:domain(?dlog([]),{log,not_equal,[]}), + ignore = logger_filters:domain(?dlog([]),{stop,not_equal,[]}), + ignore = logger_filters:domain(?dlog([]),{log,undefined,[]}), + ignore = logger_filters:domain(?dlog([]),{stop,undefined,[]}), + + L4 = logger_filters:domain(L4=?dlog([a]),{log,super,[a,b]}), + stop = logger_filters:domain(?dlog([a]),{stop,super,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{log,sub,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{stop,sub,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{log,equal,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{stop,equal,[a,b]}), + L5 = logger_filters:domain(L5=?dlog([a]),{log,not_equal,[a,b]}), + stop = logger_filters:domain(?dlog([a]),{stop,not_equal,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{log,undefined,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{stop,undefined,[a,b]}), + + ignore = logger_filters:domain(?dlog([a,b]),{log,super,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{stop,super,[a]}), + L6 = logger_filters:domain(L6=?dlog([a,b]),{log,sub,[a]}), + stop = logger_filters:domain(?dlog([a,b]),{stop,sub,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{log,equal,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{stop,equal,[a]}), + L7 = logger_filters:domain(L7=?dlog([a,b]),{log,not_equal,[a]}), + stop = logger_filters:domain(?dlog([a,b]),{stop,not_equal,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{log,undefined,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{stop,undefined,[a]}), + + ignore = logger_filters:domain(?ndlog,{log,super,[a]}), + ignore = logger_filters:domain(?ndlog,{stop,super,[a]}), + ignore = logger_filters:domain(?ndlog,{log,sub,[a]}), + ignore = logger_filters:domain(?ndlog,{stop,sub,[a]}), + ignore = logger_filters:domain(?ndlog,{log,equal,[a]}), + ignore = logger_filters:domain(?ndlog,{stop,equal,[a]}), + L8 = logger_filters:domain(L8=?ndlog,{log,not_equal,[a]}), + stop = logger_filters:domain(?ndlog,{stop,not_equal,[a]}), + L9 = logger_filters:domain(L9=?ndlog,{log,undefined,[a]}), + stop = logger_filters:domain(?ndlog,{stop,undefined,[a]}), + + L10 = logger_filters:domain(L10=?dlog([a,b,c,d]),{log,super,[a,b,c,d]}), + stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,super,[a,b,c,d]}), + L11 = logger_filters:domain(L11=?dlog([a,b,c,d]),{log,sub,[a,b,c,d]}), + stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,sub,[a,b,c,d]}), + L12 = logger_filters:domain(L12=?dlog([a,b,c,d]),{log,equal,[a,b,c,d]}), + stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,equal,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog([a,b,c,d]),{log,not_equal,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog([a,b,c,d]),{stop,not_equal,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog([a,b,c,d]),{log,undefined,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog([a,b,c,d]),{stop,undefined,[a,b,c,d]}), + + %% A domain field in meta which is not a list is allowed by the + %% filter, but since MatchDomain is always a list of atoms, only + %% Action=not_equal can ever match. + ignore = logger_filters:domain(?dlog(dummy),{log,super,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,super,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{log,sub,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,sub,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{log,equal,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,equal,[a,b,c,d]}), + L13 = logger_filters:domain(L13=?dlog(dummy),{log,not_equal,[a,b,c,d]}), + stop = logger_filters:domain(?dlog(dummy),{stop,not_equal,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{log,undefined,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,undefined,[a,b,c,d]}), + + {error,badarg} = ?TRY(logger_filters:domain(?ndlog,bad)), + {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{bad,super,[]})), + {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,bad,[]})), + {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,super,bad})), + + ok. + +level(_Config) -> + ignore = logger_filters:level(?llog(info),{log,lt,info}), + ignore = logger_filters:level(?llog(info),{stop,lt,info}), + ignore = logger_filters:level(?llog(info),{log,gt,info}), + ignore = logger_filters:level(?llog(info),{stop,gt,info}), + L1 = logger_filters:level(L1=?llog(info),{log,lteq,info}), + stop = logger_filters:level(?llog(info),{stop,lteq,info}), + L2 = logger_filters:level(L2=?llog(info),{log,gteq,info}), + stop = logger_filters:level(?llog(info),{stop,gteq,info}), + L3 = logger_filters:level(L3=?llog(info),{log,eq,info}), + stop = logger_filters:level(?llog(info),{stop,eq,info}), + ignore = logger_filters:level(?llog(info),{log,neq,info}), + ignore = logger_filters:level(?llog(info),{stop,neq,info}), + + ignore = logger_filters:level(?llog(error),{log,lt,info}), + ignore = logger_filters:level(?llog(error),{stop,lt,info}), + L4 = logger_filters:level(L4=?llog(error),{log,gt,info}), + stop = logger_filters:level(?llog(error),{stop,gt,info}), + ignore = logger_filters:level(?llog(error),{log,lteq,info}), + ignore = logger_filters:level(?llog(error),{stop,lteq,info}), + L5 = logger_filters:level(L5=?llog(error),{log,gteq,info}), + stop = logger_filters:level(?llog(error),{stop,gteq,info}), + ignore = logger_filters:level(?llog(error),{log,eq,info}), + ignore = logger_filters:level(?llog(error),{stop,eq,info}), + L6 = logger_filters:level(L6=?llog(error),{log,neq,info}), + stop = logger_filters:level(?llog(error),{stop,neq,info}), + + L7 = logger_filters:level(L7=?llog(info),{log,lt,error}), + stop = logger_filters:level(?llog(info),{stop,lt,error}), + ignore = logger_filters:level(?llog(info),{log,gt,error}), + ignore = logger_filters:level(?llog(info),{stop,gt,error}), + L8 = logger_filters:level(L8=?llog(info),{log,lteq,error}), + stop = logger_filters:level(?llog(info),{stop,lteq,error}), + ignore = logger_filters:level(?llog(info),{log,gteq,error}), + ignore = logger_filters:level(?llog(info),{stop,gteq,error}), + ignore = logger_filters:level(?llog(info),{log,eq,error}), + ignore = logger_filters:level(?llog(info),{stop,eq,error}), + L9 = logger_filters:level(L9=?llog(info),{log,neq,error}), + stop = logger_filters:level(?llog(info),{stop,neq,error}), + + {error,badarg} = ?TRY(logger_filters:level(?llog(info),bad)), + {error,badarg} = ?TRY(logger_filters:level(?llog(info),{bad,eq,info})), + {error,badarg} = ?TRY(logger_filters:level(?llog(info),{log,bad,info})), + {error,badarg} = ?TRY(logger_filters:level(?llog(info),{log,eq,bad})), + + ok. + +progress(_Config) -> + L1 = logger_filters:progress(L1=?plog,log), + stop = logger_filters:progress(?plog,stop), + ignore = logger_filters:progress(?ndlog,log), + ignore = logger_filters:progress(?ndlog,stop), + + {error,badarg} = ?TRY(logger_filters:progress(?plog,bad)), + + ok. + +remote_gl(_Config) -> + {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]), + L1 = logger_filters:remote_gl(L1=?rlog(Node),log), + stop = logger_filters:remote_gl(?rlog(Node),stop), + ignore = logger_filters:remote_gl(?ndlog,log), + ignore = logger_filters:remote_gl(?ndlog,stop), + + {error,badarg} = ?TRY(logger_filters:remote_gl(?rlog(Node),bad)), + ok. + +remote_gl(cleanup,_Config) -> + [test_server:stop_node(N) || N<-nodes()]. + +%%%----------------------------------------------------------------- +%%% Called by macro ?TRY(X) +my_try(Fun) -> + try Fun() catch C:R -> {C,R} end. diff --git a/lib/kernel/test/logger_formatter_SUITE.erl b/lib/kernel/test/logger_formatter_SUITE.erl new file mode 100644 index 0000000000..8c13f0f908 --- /dev/null +++ b/lib/kernel/test/logger_formatter_SUITE.erl @@ -0,0 +1,886 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_formatter_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). + +-define(TRY(X), my_try(fun() -> X end)). + +suite() -> + [{timetrap,{seconds,30}}]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [default, + legacy_header, + error_logger_notice_header, + single_line, + template, + format_msg, + report_cb, + max_size, + depth, + chars_limit, + format_mfa, + format_time, + level_or_msg_in_meta, + faulty_log, + faulty_config, + faulty_msg, + check_config, + update_config]. + +default(_Config) -> + String1 = format(info,{"~p",[term]},#{},#{}), + ct:log(String1), + [_DateTime,"info:","term\n"] = string:lexemes(String1," "), + + Time = timestamp(), + ExpectedTimestamp = default_time_format(Time), + String2 = format(info,{"~p",[term]},#{time=>Time},#{}), + ct:log(String2), + " info: term\n" = string:prefix(String2,ExpectedTimestamp), + ok. + +legacy_header(_Config) -> + Time = timestamp(), + String1 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>true, + single_line=>false}), + ct:log(String1), + "=INFO REPORT==== "++Rest = String1, + [Timestamp,"\nterm\n"] = string:lexemes(Rest," ="), + [D,M,Y,H,Min,S,Micro] = string:lexemes(Timestamp,"-:."), + integer(D,31), + integer(Y,2018,infinity), + integer(H,23), + integer(Min,59), + integer(S,59), + integer(Micro,999999), + true = lists:member(M,["Jan","Feb","Mar","Apr","May","Jun", + "Jul","Aug","Sep","Oct","Nov","Dec"]), + + String2 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>false, + single_line=>false}), + ct:log(String2), + ExpectedTimestamp = default_time_format(Time), + " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp), + + String3 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>bad, + single_line=>false}), + ct:log(String3), + String3 = String2, + + String4 = format(info,{"~p",[term]},#{time=>Time}, + #{legacy_header=>true, + single_line=>true}), % <---ignored + ct:log(String4), + String4 = String1, + + String5 = format(info,{"~p",[term]},#{}, % <--- no time + #{legacy_header=>true, + single_line=>false}), + ct:log(String5), + "=INFO REPORT==== "++_ = String5, + ok. + +error_logger_notice_header(_Config) -> + Meta1 = #{error_logger=>#{tag => info_report,type => std_info}}, + String1 = format(notice,{"~p",[term]},Meta1, + #{legacy_header=>true, + error_logger_notice_header=>notice}), + ct:log(String1), + "=NOTICE REPORT==== "++_ = String1, + + String2 = format(notice,{"~p",[term]},Meta1, + #{legacy_header=>true, + error_logger_notice_header=>info}), + ct:log(String2), + "=INFO REPORT==== "++_ = String2, + + String3 = format(notice,{"~p",[term]},#{}, + #{legacy_header=>true, + error_logger_notice_header=>notice}), + ct:log(String3), + "=NOTICE REPORT==== "++_ = String3, + + String4 = format(notice,{"~p",[term]},#{}, + #{legacy_header=>true, + error_logger_notice_header=>info}), + ct:log(String4), + "=NOTICE REPORT==== "++_ = String4, + + ok. + +single_line(_Config) -> + Time = timestamp(), + ExpectedTimestamp = default_time_format(Time), + String1 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>true}), + ct:log(String1), + " info: term\n" = string:prefix(String1,ExpectedTimestamp), + + String2 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>false}), + ct:log(String2), + " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp), + + String2 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>bad}), + + + %% Test that no extra commas/spaces are added when removing + %% newlines, especially not after "=>" in a map association (as + %% was the case in OTP-21.0, when the only single_line adjustment + %% was done by regexp replacement of "\n" by ", "). + Prefix = + "Some characters to fill the line ------------------------------------- ", + String3 = format(info,{"~s~p~n~s~p~n",[Prefix, + lists:seq(1,10), + Prefix, + #{a=>map,with=>a,few=>accociations}]}, + #{time=>Time}, + #{single_line=>true}), + ct:log(String3), + match = re:run(String3,"\\[1,2,3,4,5,6,7,8,9,10\\]",[{capture,none}]), + match = re:run(String3, + "#{a => map,few => accociations,with => a}", + [{capture,none}]), + + %% This part is added to make sure that the previous test made + %% sense, i.e. that there would actually be newlines inside the + %% list and map. + String4 = format(info,{"~s~p~n~s~p~n",[Prefix, + lists:seq(1,10), + Prefix, + #{a=>map,with=>a,few=>accociations}]}, + #{time=>Time}, + #{single_line=>false}), + ct:log(String4), + match = re:run(String4,"\\[1,2,3,\n",[global,{capture,none}]), + {match,Match4} = re:run(String4,"=>\n",[global,{capture,all}]), + 3 = length(Match4), + + %% Test that big metadata fields do not get line breaks + String5 = format(info,"", + #{mymeta=>lists:seq(1,100)}, + #{single_line=>true,template=>[mymeta,"\n"]}), + ct:log(String5), + [_] = string:lexemes(String5,"\n"), + + %% Ensure that the previous test made sense, i.e. that the + %% metadata field does produce multiple lines if + %% single_line==false. + String6 = format(info,"", + #{mymeta=>lists:seq(1,100)}, + #{single_line=>false,template=>[mymeta,"\n"]}), + ct:log(String6), + [_,_|_] = string:lexemes(String6,"\n"), + + ok. + +template(_Config) -> + Time = timestamp(), + + Template1 = [msg], + String1 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template1}), + ct:log(String1), + "term" = String1, + + Template2 = [msg,unknown], + String2 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template2}), + ct:log(String2), + "term" = String2, + + Template3 = ["string"], + String3 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template3}), + ct:log(String3), + "string" = String3, + + Template4 = ["string\nnewline"], + String4 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template4, + single_line=>true}), + ct:log(String4), + "string\nnewline" = String4, + + Template5 = [], + String5 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template5}), + ct:log(String5), + "" = String5, + + Ref6 = erlang:make_ref(), + Meta6 = #{atom=>some_atom, + integer=>632, + list=>[list,"string",4321,#{},{tuple}], + mfa=>{mod,func,0}, + pid=>self(), + ref=>Ref6, + string=>"some string", + time=>Time, + tuple=>{1,atom,"list"}, + nested=>#{subkey=>subvalue}}, + Template6 = lists:join(";",lists:sort(maps:keys(maps:remove(nested,Meta6))) ++ + [[nested,subkey]]), + String6 = format(info,{"~p",[term]},Meta6,#{template=>Template6, + single_line=>true}), + ct:log(String6), + SelfStr = pid_to_list(self()), + RefStr6 = ref_to_list(Ref6), + ListStr = "[list,\"string\",4321,#{},{tuple}]", + ExpectedTime6 = default_time_format(Time), + ["some_atom", + "632", + ListStr, + "mod:func/0", + SelfStr, + RefStr6, + "some string", + ExpectedTime6, + "{1,atom,\"list\"}", + "subvalue"] = string:lexemes(String6,";"), + + Meta7 = #{time=>Time, + nested=>#{key1=>#{subkey1=>value1}, + key2=>value2}}, + Template7 = lists:join(";",[nested, + [nested,key1], + [nested,key1,subkey1], + [nested,key2], + [nested,key2,subkey2], + [nested,key3], + [nested,key3,subkey3]]), + String7 = format(info,{"~p",[term]},Meta7,#{template=>Template7, + single_line=>true}), + ct:log(String7), + [MultipleKeysStr7, + "#{subkey1 => value1}", + "value1", + "value2", + "", + "", + ""] = string:split(String7,";",all), + %% Order of keys is not fixed + case MultipleKeysStr7 of + "#{key2 => value2,key1 => #{subkey1 => value1}}" -> ok; + "#{key1 => #{subkey1 => value1},key2 => value2}" -> ok; + _ -> ct:fail({full_nested_map_unexpected,MultipleKeysStr7}) + end, + + Meta8 = #{time=>Time, + nested=>#{key1=>#{subkey1=>value1}, + key2=>value2}}, + Template8 = + lists:join( + ";", + [{nested,["exist:",nested],["noexist"]}, + {[nested,key1],["exist:",[nested,key1]],["noexist"]}, + {[nested,key1,subkey1],["exist:",[nested,key1,subkey1]],["noexist"]}, + {[nested,key2],["exist:",[nested,key2]],["noexist"]}, + {[nested,key2,subkey2],["exist:",[nested,key2,subkey2]],["noexist"]}, + {[nested,key3],["exist:",[nested,key3]],["noexist"]}, + {[nested,key3,subkey3],["exist:",[nested,key3,subkey3]],["noexist"]}]), + String8 = format(info,{"~p",[term]},Meta8,#{template=>Template8, + single_line=>true}), + ct:log(String8), + [MultipleKeysStr8, + "exist:#{subkey1 => value1}", + "exist:value1", + "exist:value2", + "noexist", + "noexist", + "noexist"] = string:split(String8,";",all), + %% Order of keys is not fixed + case MultipleKeysStr8 of + "exist:#{key2 => value2,key1 => #{subkey1 => value1}}" -> ok; + "exist:#{key1 => #{subkey1 => value1},key2 => value2}" -> ok; + _ -> ct:fail({full_nested_map_unexpected,MultipleKeysStr8}) + end, + + ok. + +format_msg(_Config) -> + Template = [msg], + + String1 = format(info,{"~p",[term]},#{},#{template=>Template}), + ct:log(String1), + "term" = String1, + + String2 = format(info,{"list",[term]},#{},#{template=>Template}), + ct:log(String2), + "FORMAT ERROR: \"list\" - [term]" = String2, + + String3 = format(info,{report,term},#{},#{template=>Template}), + ct:log(String3), + "term" = String3, + + String4 = format(info,{report,term}, + #{report_cb=>fun(_)-> {"formatted",[]} end}, + #{template=>Template}), + ct:log(String4), + "formatted" = String4, + + String5 = format(info,{report,term}, + #{report_cb=>fun(_)-> faulty_return end}, + #{template=>Template}), + ct:log(String5), + "REPORT_CB/1 ERROR: term; Returned: faulty_return" = String5, + + String6 = format(info,{report,term}, + #{report_cb=>fun(_)-> erlang:error(fun_crashed) end}, + #{template=>Template}), + ct:log(String6), + "REPORT_CB/1 CRASH: term; Reason: {error,fun_crashed,"++_ = String6, + + String7 = format(info,{report,term}, + #{report_cb=>fun(_,_)-> ['not',a,string] end}, + #{template=>Template}), + ct:log(String7), + "REPORT_CB/2 ERROR: term; Returned: ['not',a,string]" = String7, + + String8 = format(info,{report,term}, + #{report_cb=>fun(_,_)-> faulty_return end}, + #{template=>Template}), + ct:log(String8), + "REPORT_CB/2 ERROR: term; Returned: faulty_return" = String8, + + String9 = format(info,{report,term}, + #{report_cb=>fun(_,_)-> erlang:error(fun_crashed) end}, + #{template=>Template}), + ct:log(String9), + "REPORT_CB/2 CRASH: term; Reason: {error,fun_crashed,"++_ = String9, + + %% strings are not formatted + String10 = format(info,{string,"string"}, + #{report_cb=>fun(_)-> {"formatted",[]} end}, + #{template=>Template}), + ct:log(String10), + "string" = String10, + + String11 = format(info,{string,['not',printable,list]}, + #{report_cb=>fun(_)-> {"formatted",[]} end}, + #{template=>Template}), + ct:log("~ts",[String11]), % avoiding ct_log crash + "FORMAT ERROR: \"~ts\" - [['not',printable,list]]" = String11, + + String12 = format(info,{string,"string"},#{},#{template=>Template}), + ct:log(String12), + "string" = String12, + + ok. + +report_cb(_Config) -> + Template = [msg], + MetaFun = fun(_) -> {"meta_rcb",[]} end, + ConfigFun = fun(_) -> {"config_rcb",[]} end, + "term" = format(info,{report,term},#{},#{template=>Template}), + "meta_rcb" = + format(info,{report,term},#{report_cb=>MetaFun},#{template=>Template}), + "config_rcb" = + format(info,{report,term},#{},#{template=>Template, + report_cb=>ConfigFun}), + "config_rcb" = + format(info,{report,term},#{report_cb=>MetaFun},#{template=>Template, + report_cb=>ConfigFun}), + ok. + +max_size(_Config) -> + Cfg = #{template=>[msg], + single_line=>false}, + "12345678901234567890" = + format(info,{"12345678901234567890",[]},#{},Cfg), + %% application:set_env(kernel,logger_max_size,11), + %% "12345678901234567890" = % min value is 50, so this is not limited + %% format(info,{"12345678901234567890",[]},#{},Cfg), + %% "12345678901234567890123456789012345678901234567..." = % 50 + %% format(info, + %% {"123456789012345678901234567890123456789012345678901234567890", + %% []}, + %% #{}, + %% Cfg), + %% application:set_env(kernel,logger_max_size,53), + %% "12345678901234567890123456789012345678901234567890..." = %53 + %% format(info, + %% {"123456789012345678901234567890123456789012345678901234567890", + %% []}, + %% #{}, + %% Cfg), + "123456789012..." = + format(info,{"12345678901234567890",[]},#{},Cfg#{max_size=>15}), + "12345678901234567890" = + format(info,{"12345678901234567890",[]},#{},Cfg#{max_size=>unlimited}), + %% Check that one newline at the end of the line is kept (if it exists) + "12345678901...\n" = + format(info,{"12345678901234567890\n",[]},#{},Cfg#{max_size=>15}), + "12345678901...\n" = + format(info,{"12345678901234567890",[]},#{},Cfg#{template=>[msg,"\n"], + max_size=>15}), + ok. +max_size(cleanup,_Config) -> + application:unset_env(kernel,logger_max_size), + ok. + +depth(_Config) -> + Template = [msg], + "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]" = + format(info, + {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, + #{}, + #{template=>Template}), + application:set_env(kernel,error_logger_format_depth,11), + "[1,2,3,4,5,6,7,8,9,0|...]" = + format(info, + {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, + #{}, + #{template=>Template}), + "[1,2,3,4,5,6,7,8,9,0,1,2|...]" = + format(info, + {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, + #{}, + #{template=>Template, + depth=>13}), + "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]" = + format(info, + {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, + #{}, + #{template=>Template, + depth=>unlimited}), + ok. +depth(cleanup,_Config) -> + application:unset_env(kernel,error_logger_format_depth), + ok. + +chars_limit(_Config) -> + FA = {"LoL: ~p~nL: ~p~nMap: ~p~n", + [lists:duplicate(10,lists:seq(1,100)), + lists:seq(1,100), + maps:from_list(lists:zip(lists:seq(1,100), + lists:duplicate(100,value)))]}, + Meta = #{time=>timestamp()}, + Template = [time," - ", msg, "\n"], + FC = #{template=>Template, + depth=>unlimited, + max_size=>unlimited, + chars_limit=>unlimited, + single_line=>true}, + CL1 = 80, + String1 = format(info,FA,Meta,FC#{chars_limit=>CL1}), + L1 = string:length(String1), + ct:log("String1: ~p~nLength1: ~p~n",[lists:flatten(String1),L1]), + true = L1 > CL1, + true = L1 < CL1 + 15, + + String2 = format(info,FA,Meta,FC#{chars_limit=>CL1,depth=>10}), + L2 = string:length(String2), + ct:log("String2: ~p~nLength2: ~p~n",[lists:flatten(String2),L2]), + String2 = String1, + + CL3 = 200, + String3 = format(info,FA,Meta,FC#{chars_limit=>CL3}), + L3 = string:length(String3), + ct:log("String3: ~p~nLength3: ~p~n",[lists:flatten(String3),L3]), + true = L3 > CL3, + true = L3 < CL3 + 15, + + String4 = format(info,FA,Meta,FC#{chars_limit=>CL3,depth=>10}), + L4 = string:length(String4), + ct:log("String4: ~p~nLength4: ~p~n",[lists:flatten(String4),L4]), + true = L4 > CL3, + true = L4 < CL3 + 15, + + %% Test that max_size truncates the string which is limited by + %% depth and chars_limit + MS5 = 150, + String5 = format(info,FA,Meta,FC#{chars_limit=>CL3,depth=>10,max_size=>MS5}), + L5 = string:length(String5), + ct:log("String5: ~p~nLength5: ~p~n",[String5,L5]), + L5 = MS5, + true = lists:prefix(lists:sublist(String5,L5-4),String4), + + %% Test that chars_limit limits string also + Str = "123456789012345678901234567890123456789012345678901234567890123456789", + CL6 = 80, + String6 = format(info,{string,Str},Meta,FC#{chars_limit=>CL6}), + L6 = string:length(String6), + ct:log("String6: ~p~nLength6: ~p~n",[String6,L6]), + L6 = CL6, + + ok. + +format_mfa(_Config) -> + Template = [mfa], + + Meta1 = #{mfa=>{mod,func,0}}, + String1 = format(info,{"~p",[term]},Meta1,#{template=>Template}), + ct:log(String1), + "mod:func/0" = String1, + + Meta2 = #{mfa=>{mod,func,[]}}, + String2 = format(info,{"~p",[term]},Meta2,#{template=>Template}), + ct:log(String2), + "mod:func/0" = String2, + + Meta3 = #{mfa=>"mod:func/0"}, + String3 = format(info,{"~p",[term]},Meta3,#{template=>Template}), + ct:log(String3), + "mod:func/0" = String3, + + Meta4 = #{mfa=>othermfa}, + String4 = format(info,{"~p",[term]},Meta4,#{template=>Template}), + ct:log(String4), + "othermfa" = String4, + + ok. + +format_time(_Config) -> + Time = timestamp(), + Meta = #{time=>Time}, + FC = #{template=>[time]}, + Msg = {string,""}, + ExpectedLocal = default_time_format(Time,false), + ExpectedUtc = default_time_format(Time,true), + + %% default - local time + ExpectedLocal = format(info,Msg,Meta,FC), + + %% time_offset config parameter to formatter + ExpectedLocal = format(info,Msg,Meta,FC#{time_offset=>""}), + ExpectedUtc = format(info,Msg,Meta,FC#{time_offset=>"Z"}), + + %% stdlib utc_log works when time_offset parameter is not set + application:set_env(stdlib,utc_log,true), + ExpectedUtc = format(info,Msg,Meta,FC), + + %% sasl utc_log overwrites stdlib utc_log + application:set_env(sasl,utc_log,false), + ExpectedLocal = format(info,Msg,Meta,FC), + + %% sasl utc_log overwrites stdlib utc_log + application:set_env(sasl,utc_log,true), + application:set_env(stdlib,utc_log,false), + ExpectedUtc = format(info,Msg,Meta,FC), + + %% time_offset config parameter to formatter + %% overwrites sasl and stdlib utc_log + application:set_env(sasl,utc_log,false), + ExpectedUtc = format(info,Msg,Meta,FC#{time_offset=>"Z"}), + + %% time_offset config parameter to formatter + %% overwrites sasl and stdlib utc_log + application:set_env(sasl,utc_log,true), + application:set_env(stdlib,utc_log,true), + ExpectedLocal = format(info,Msg,Meta,FC#{time_offset=>""}), + + %% time_designator config parameter to formatter + ExpectedLocalS = default_time_format(Time,false,$\s), + ExpectedUtcS = default_time_format(Time,true,$\s), + + ExpectedLocalS = format(info,Msg,Meta,FC#{time_offset=>"", + time_designator=>$\s}), + ExpectedUtcS = format(info,Msg,Meta,FC#{time_offset=>"Z", + time_designator=>$\s}), + + ok. + +format_time(cleanup,_Config) -> + application:unset_env(sasl,utc_log), + application:unset_env(stdlib,utc_log), + ok. + +level_or_msg_in_meta(_Config) -> + %% The template contains atoms to pick out values from meta, + %% or level/msg to add these from the log event. What if you have + %% a key named 'level' or 'msg' in meta and want to display + %% its value? + %% For now we simply ignore Meta on this and display the + %% actual level and msg from the log event. + + Meta = #{level=>mylevel, + msg=>"metamsg"}, + Template = [level,";",msg], + String = format(info,{"~p",[term]},Meta,#{template=>Template}), + ct:log(String), + "info;term" = String, % so mylevel and "metamsg" are ignored + + ok. + +faulty_log(_Config) -> + %% Unexpected log (should be type logger:log_event()) - print error + {error, + function_clause, + {logger_formatter,format,[_,_],_}} = + ?TRY(logger_formatter:format(unexp_log,#{})), + ok. + +faulty_config(_Config) -> + {error, + function_clause, + {logger_formatter,format,[_,_],_}} = + ?TRY(logger_formatter:format(#{level=>info, + msg=>{"~p",[term]}, + meta=>#{time=>timestamp()}}, + unexp_config)), + ok. + +faulty_msg(_Config) -> + {error, + function_clause, + {logger_formatter,_,_,_}} = + ?TRY(logger_formatter:format(#{level=>info, + msg=>term, + meta=>#{time=>timestamp()}}, + #{})), + ok. + +-define(cfgerr(X), {error,{invalid_formatter_config,logger_formatter,X}}). +check_config(_Config) -> + ok = logger_formatter:check_config(#{}), + ?cfgerr(bad) = logger_formatter:check_config(bad), + + C1 = #{chars_limit => 1, + depth => 1, + legacy_header => true, + error_logger_notice_header => info, + max_size => 1, + report_cb => fun(R) -> {"~p",[R]} end, + single_line => false, + template => [], + time_designator => $T, + time_offset => 0}, + ok = logger_formatter:check_config(C1), + + ok = logger_formatter:check_config(#{chars_limit => unlimited}), + ?cfgerr({chars_limit,bad}) = + logger_formatter:check_config(#{chars_limit => bad}), + + ok = logger_formatter:check_config(#{depth => unlimited}), + ?cfgerr({depth,bad}) = + logger_formatter:check_config(#{depth => bad}), + + ok = logger_formatter:check_config(#{legacy_header => false}), + ?cfgerr({legacy_header,bad}) = + logger_formatter:check_config(#{legacy_header => bad}), + + ok = logger_formatter:check_config(#{error_logger_notice_header => notice}), + ?cfgerr({error_logger_notice_header,bad}) = + logger_formatter:check_config(#{error_logger_notice_header => bad}), + + ok = logger_formatter:check_config(#{max_size => unlimited}), + ?cfgerr({max_size,bad}) = + logger_formatter:check_config(#{max_size => bad}), + + ok = + logger_formatter:check_config(#{report_cb => fun(_,_) -> "" end}), + ?cfgerr({report_cb,F}) = + logger_formatter:check_config(#{report_cb => F=fun(_,_,_) -> {"",[]} end}), + ?cfgerr({report_cb,bad}) = + logger_formatter:check_config(#{report_cb => bad}), + + ok = logger_formatter:check_config(#{single_line => true}), + ?cfgerr({single_line,bad}) = + logger_formatter:check_config(#{single_line => bad}), + + Ts = [[key], + [[key1,key2]], + [{key,[key],[]}], + [{[key1,key2],[[key1,key2]],["noexist"]}], + ["string"]], + [begin + ct:log("check template: ~p",[T]), + ok = logger_formatter:check_config(#{template => T}) + end + || T <- Ts], + + ETs = [bad, + [{key,bad}], + [{key,[key],bad}], + [{key,[key],"bad"}], + "bad", + [[key,$a,$b,$c]], + [[$a,$b,$c,key]]], + [begin + ct:log("check template: ~p",[T]), + {error,{invalid_formatter_template,logger_formatter,T}} = + logger_formatter:check_config(#{template => T}) + end + || T <- ETs], + + ?cfgerr({time_designator,bad}) = + logger_formatter:check_config(#{time_designator => bad}), + ?cfgerr({time_designator,"b"}) = + logger_formatter:check_config(#{time_designator => "b"}), + + ok = logger_formatter:check_config(#{time_offset => -1}), + ok = logger_formatter:check_config(#{time_offset => "+02:00"}), + ok = logger_formatter:check_config(#{time_offset => "-23:59"}), + ok = logger_formatter:check_config(#{time_offset => "+24:00"}), + ok = logger_formatter:check_config(#{time_offset => "-25:00"}), + ?cfgerr({time_offset,bad}) = + logger_formatter:check_config(#{time_offset => bad}), + ?cfgerr({time_offset,"02:00"}) = + logger_formatter:check_config(#{time_offset => "02:00"}), + ?cfgerr({time_offset,"+02"}) = + logger_formatter:check_config(#{time_offset => "+02"}), + + ok. + +%% Test that formatter config can be changed, and that the default +%% template is updated accordingly +update_config(_Config) -> + {error,{not_found,?MODULE}} = logger:update_formatter_config(?MODULE,#{}), + + logger:add_handler_filter(default,silence,{fun(_,_) -> stop end,ok}), + ok = logger:add_handler(?MODULE,?MODULE,#{}), + D = lists:seq(1,1000), + logger:notice("~p~n",[D]), + {Lines1,C1} = check_log(), + [ct:log(L) || L <- Lines1], + ct:log("~p",[C1]), + [Line1] = Lines1, + [_Time,"notice: "++D1] = string:split(Line1," "), + true = length(D1)>3000, + true = #{}==C1, + + ok = logger:update_formatter_config(?MODULE,single_line,false), + logger:notice("~p~n",[D]), + {Lines2,C2} = check_log(), + [ct:log(L) || L <- Lines2], + ct:log("~p",[C2]), + true = length(Lines2)>50, + true = #{single_line=>false}==C2, + + ok = logger:update_formatter_config(?MODULE,#{legacy_header=>true}), + logger:notice("~p~n",[D]), + {Lines3,C3} = check_log(), + [ct:log(L) || L <- Lines3], + ct:log("~p",[C3]), + ["=NOTICE REPORT==== "++_|D3] = Lines3, + true = length(D3)>50, + true = #{legacy_header=>true,single_line=>false}==C3, + + ok = logger:update_formatter_config(?MODULE,single_line,true), + logger:notice("~p~n",[D]), + {Lines4,C4} = check_log(), + [ct:log(L) || L <- Lines4], + ct:log("~p",[C4]), + ["=NOTICE REPORT==== "++_,D4] = Lines4, + true = length(D4)>3000, + true = #{legacy_header=>true,single_line=>true}==C4, + + %% Finally, check that error_logger_notice_header works, default=info + error_logger:info_msg("~p",[D]), + {Lines5,C5} = check_log(), + [ct:log(L) || L <- Lines5], + ct:log("~p",[C5]), + ["=INFO REPORT==== "++_,_D5] = Lines5, + + ok=logger:update_formatter_config(?MODULE,error_logger_notice_header,notice), + error_logger:info_msg("~p",[D]), + {Lines6,C6} = check_log(), + [ct:log(L) || L <- Lines6], + ct:log("~p",[C6]), + ["=NOTICE REPORT==== "++_,_D6] = Lines6, + + {error,{invalid_formatter_config,bad}} = + logger:update_formatter_config(?MODULE,bad), + {error,{invalid_formatter_config,logger_formatter,{depth,bad}}} = + logger:update_formatter_config(?MODULE,depth,bad), + + ok. + +update_config(cleanup,_Config) -> + _ = logger:remove_handler(?MODULE), + _ = logger:remove_handler_filter(default,silence), + ok. + +%%%----------------------------------------------------------------- +%%% Internal +format(Level,Msg,Meta,Config) -> + format(#{level=>Level,msg=>Msg,meta=>add_time(Meta)},Config). + +format(Log,Config) -> + lists:flatten(logger_formatter:format(Log,Config)). + +default_time_format(Timestamp) -> + default_time_format(Timestamp,false). + +default_time_format(Timestamp,Utc) -> + default_time_format(Timestamp,Utc,$T). + +default_time_format(Timestamp,Utc,Sep) -> + Offset = if Utc -> "Z"; + true -> "" + end, + calendar:system_time_to_rfc3339(Timestamp,[{unit,microsecond}, + {time_designator,Sep}, + {offset,Offset}]). + +integer(Str) -> + is_integer(list_to_integer(Str)). +integer(Str,Max) -> + integer(Str,0,Max). +integer(Str,Min,Max) -> + Int = list_to_integer(Str), + Int >= Min andalso Int =<Max. + +%%%----------------------------------------------------------------- +%%% Called by macro ?TRY(X) +my_try(Fun) -> + try Fun() catch C:R:S -> {C,R,hd(S)} end. + +timestamp() -> + erlang:system_time(microsecond). + +%% necessary? +add_time(#{time:=_}=Meta) -> + Meta; +add_time(Meta) -> + Meta#{time=>timestamp()}. + +%%%----------------------------------------------------------------- +%%% handler callback +log(Log,#{formatter:={M,C}}) -> + put(log,{M:format(Log,C),C}), + ok. + +check_log() -> + {S,C} = erase(log), + {string:lexemes(S,"\n"),C}. diff --git a/lib/kernel/test/logger_legacy_SUITE.erl b/lib/kernel/test/logger_legacy_SUITE.erl new file mode 100644 index 0000000000..c3cab07d81 --- /dev/null +++ b/lib/kernel/test/logger_legacy_SUITE.erl @@ -0,0 +1,288 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_legacy_SUITE). + +-compile(export_all). +-compile({nowarn_deprecated_function,[{gen_fsm,start,3}, + {gen_fsm,send_all_state_event,2}]}). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). + +%%%----------------------------------------------------------------- +%%% This test suite test that log events from within OTP can be +%%% delivered to legacy error_logger event handlers on the same format +%%% as before 'logger' was introduced. +%%% +%%% Before changing the expected format of any of the log events in +%%% this suite, please make sure that the backwards incompatibility it +%%% introduces is ok. +%%% ----------------------------------------------------------------- + +-define(check(Expected), + receive Expected -> + [] = test_server:messages_get() + after 1000 -> + ct:fail({report_not_received, + {line,?LINE}, + {got,test_server:messages_get()}}) + end). +-define(check_no_flush(Expected), + receive Expected -> + ok + after 1000 -> + ct:fail({report_not_received, + {line,?LINE}, + {got,test_server:messages_get()}}) + end). + +suite() -> + [{timetrap,{seconds,30}}]. + +init_per_suite(Config) -> + logger:add_handler(error_logger,error_logger, + #{level=>info,filter_default=>stop}), + Config. + +end_per_suite(_Config) -> + logger:remove_handler(error_logger), + ok. + +init_per_group(std, Config) -> + ok = logger:set_handler_config( + error_logger,filters, + [{domain,{fun logger_filters:domain/2,{log,super,[otp]}}}]), + Config; +init_per_group(sasl, Config) -> + %% Since default level is notice, and progress reports are info, + %% we need to raise the global logger level to info in order to + %% receive these. + ok = logger:set_primary_config(level,info), + ok = logger:set_handler_config( + error_logger,filters, + [{domain,{fun logger_filters:domain/2,{log,super,[otp,sasl]}}}]), + + %% cth_log_redirect checks if sasl is started before displaying + %% any sasl reports - so just to see the real sasl reports in tc + %% log: + {ok,Apps} = application:ensure_all_started(sasl), + [{stop_apps,Apps}|Config]; +init_per_group(_Group, Config) -> + Config. + +end_per_group(sasl, Config) -> + Apps = ?config(stop_apps,Config), + [application:stop(App) || App <- Apps], + ok = logger:set_primary_config(level,notice), + ok; +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + error_logger:add_report_handler(?MODULE,{event_handler,self()}), + Config. + +end_per_testcase(Case, Config) -> + %% Using gen_event directly here, instead of + %% error_logger:delete_report_handler. This is to avoid + %% automatically stopping the error_logger process due to removing + %% the last handler. + gen_event:delete_handler(error_logger,?MODULE,[]), + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + [{std,[],[gen_server, + gen_event, + gen_fsm, + gen_statem]}, + {sasl,[],[sasl_reports, + supervisor_handle_info]}]. + +all() -> + [{group,std}, + {group,sasl}]. + +gen_server(_Config) -> + {ok,Pid} = gen_server:start(?MODULE,gen_server,[]), + Msg = fun() -> erlang:error({badmatch,b}) end, + Pid ! Msg, + ?check({warning_msg,"** Undefined handle_info in ~p"++_,[?MODULE,Msg]}), + ok = gen_server:cast(Pid,Msg), + ?check({error,"** Generic server ~tp terminating"++_, + [Pid,{'$gen_cast',Msg},gen_server,{{badmatch,b},_}]}). + +gen_event(_Config) -> + {ok,Pid} = gen_event:start(), + ok = gen_event:add_handler(Pid,?MODULE,gen_event), + Msg = fun() -> erlang:error({badmatch,b}) end, + Pid ! Msg, + ?check({warning_msg,"** Undefined handle_info in ~tp"++_,[?MODULE,Msg]}), + gen_event:notify(Pid,Msg), + ?check({error,"** gen_event handler ~p crashed."++_, + [?MODULE,Pid,Msg,gen_event,{{badmatch,b},_}]}). + +gen_fsm(_Config) -> + {ok,Pid} = gen_fsm:start(?MODULE,gen_fsm,[]), + Msg = fun() -> erlang:error({badmatch,b}) end, + Pid ! Msg, + ?check({warning_msg,"** Undefined handle_info in ~p"++_,[?MODULE,Msg]}), + gen_fsm:send_all_state_event(Pid,Msg), + ?check({error,"** State machine ~tp terminating"++_, + [Pid,Msg,mystate,gen_fsm,{{badmatch,b},_}]}). + +gen_statem(_Config) -> + {ok,Pid} = gen_statem:start(?MODULE,gen_statem,[]), + Msg = fun() -> erlang:error({badmatch,b}) end, + Pid ! Msg, + ?check({error,"** State machine ~tp terminating"++_, + [Pid,{info,Msg},{mystate,gen_statem},error,{badmatch,b}|_]}). + +sasl_reports(Config) -> + App = {application,?MODULE,[{description, ""}, + {vsn, "1.0"}, + {modules, [?MODULE]}, + {registered, []}, + {applications, []}, + {mod, {?MODULE, []}}]}, + AppStr = io_lib:format("~p.",[App]), + Dir = ?config(priv_dir,Config), + AppFile = filename:join(Dir,?MODULE_STRING++".app"), + ok = file:write_file(AppFile,AppStr), + true = code:add_patha(Dir), + ok = application:start(?MODULE), + SupName = sup_name(), + Pid = whereis(SupName), + [{ch,ChPid,_,_}] = supervisor:which_children(Pid), + Node = node(), + ?check_no_flush({info_report,progress,[{application,?MODULE}, + {started_at,Node}]}), + ?check({info_report,progress,[{supervisor,{local,SupName}}, + {started,[{pid,ChPid}|_]}]}), + ok = gen_server:cast(ChPid, fun() -> + spawn_link(fun() -> receive x->ok end end) + end), + Msg = fun() -> erlang:error({badmatch,b}) end, + ok = gen_server:cast(ChPid,Msg), + ?check_no_flush({error,"** Generic server ~tp terminating"++_, + [ChPid,{'$gen_cast',Msg},gen_server,{{badmatch,b},_}]}), + ?check_no_flush({error_report,crash_report, + [[{initial_call,_}, + {pid,ChPid}, + {registered_name,[]}, + {error_info,{error,{badmatch,b},_}}, + {ancestors,_}, + {message_queue_len,_}, + {messages,_}, + {links,[Pid,Neighbour]}, + {dictionary,_}, + {trap_exit,_}, + {status,_}, + {heap_size,_}, + {stack_size,_}, + {reductions,_}], + [{neighbour,[{pid,Neighbour}, + {registered_name,_}, + {initial_call,_}, + {current_function,_}, + {ancestors,_}, + {message_queue_len,_}, + {links,[ChPid]}, + {trap_exit,_}, + {status,_}, + {heap_size,_}, + {stack_size,_}, + {reductions,_}, + {current_stacktrace,_}]}]]}), + ?check_no_flush({error_report,supervisor_report, + [{supervisor,{local,SupName}}, + {errorContext,child_terminated}, + {reason,{{badmatch,b},_}}, + {offender,[{pid,ChPid}|_]}]}), + ?check({info_report,progress,[{supervisor,{local,SupName}}, + {started,_}]}), + + ok = application:stop(?MODULE), + ?check({info_report,std_info,[{application,?MODULE}, + {exited,stopped}, + {type,temporary}]}). + +sasl_reports(cleanup,_Config) -> + application:stop(?MODULE). + +supervisor_handle_info(_Config) -> + {ok,Pid} = supervisor:start_link({local,sup_name()},?MODULE,supervisor), + ?check({info_report,progress,[{supervisor,_},{started,_}]}), + Pid ! msg, + ?check({error,"Supervisor received unexpected message: ~tp~n",[msg]}). + +supervisor_handle_info(cleanup,_Config) -> + Pid = whereis(sup_name()), + unlink(Pid), + exit(Pid,shutdown). + +%%%----------------------------------------------------------------- +%%% Callbacks for error_logger event handler, gen_server, gen_statem, +%%% gen_fsm, gen_event, supervisor and application. +start(_,_) -> + supervisor:start_link({local,sup_name()},?MODULE,supervisor). + +init(supervisor) -> + {ok,{#{},[#{id=>ch,start=>{gen_server,start_link,[?MODULE,gen_server,[]]}}]}}; +init(StateMachine) when StateMachine==gen_statem; StateMachine==gen_fsm -> + {ok,mystate,StateMachine}; +init(State) -> + {ok,State}. + +%% error_logger event handler +handle_event({Tag,_Gl,{_Pid,Type,Report}},{_,Pid}=State) -> + Pid ! {Tag,Type,Report}, + {ok,State}; +%% other gen_event +handle_event(Fun,State) when is_function(Fun) -> + Fun(), + {next_state,State}. + +%% gen_fsm +handle_event(Fun,State,Data) when is_function(Fun) -> + Fun(), + {next_state,State,Data}. + +%% gen_statem +handle_event(info,Fun,State,Data) when is_function(Fun) -> + Fun(), + {next_state,State,Data}. + +%% gen_server +handle_cast(Fun,State) when is_function(Fun) -> + Fun(), + {noreply,State}. + +%% gen_statem +callback_mode() -> + handle_event_function. + +%%%----------------------------------------------------------------- +%%% Internal +sup_name() -> + list_to_atom(?MODULE_STRING++"_sup"). diff --git a/lib/kernel/test/logger_simple_h_SUITE.erl b/lib/kernel/test/logger_simple_h_SUITE.erl new file mode 100644 index 0000000000..e0ad792bdb --- /dev/null +++ b/lib/kernel/test/logger_simple_h_SUITE.erl @@ -0,0 +1,209 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_simple_h_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). + +-import(logger_test_lib, [setup/2, log/3, sync_and_read/3]). + +-define(check_no_log,[] = test_server:messages_get()). +-define(check(Expected), + receive {log,Expected} -> + [] = test_server:messages_get() + after 1000 -> + ct:fail({report_not_received, + {line,?LINE}, + {expected,Expected}, + {got,test_server:messages_get()}}) + end). + +-define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++ + ":"++integer_to_list(?LINE)). +-define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}). +-define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]). + +suite() -> + [{timetrap,{seconds,30}}, + {ct_hooks, [logger_test_lib]}]. + +init_per_suite(Config) -> + Hs0 = logger:get_handler_config(), + Hs = lists:keydelete(cth_log_redirect,1,Hs0), + [ok = logger:remove_handler(Id) || {Id,_,_} <- Hs], + Env = [{App,Key,application:get_env(App,Key)} || + {App,Key} <- [{kernel,logger_level}]], + [{env,Env},{logger,Hs}|Config]. + +end_per_suite(Config) -> + [application:set_env(App,Key,Val) || {App,Key,Val} <- ?config(env,Config)], + Hs = ?config(logger,Config), + [ok = logger:add_handler(Id,Mod,C) || {Id,Mod,C} <- Hs], + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [start_stop, + replace_default, + replace_file, + replace_disk_log + ]. + +start_stop(_Config) -> + undefined = whereis(logger_simple_h), + register(logger_simple_h,self()), + {error,_} = logger:add_handler(simple, + logger_simple_h, + #{filter_default=>log}), + unregister(logger_simple_h), + ok = logger:add_handler(simple,logger_simple_h,#{filter_default=>log}), + Pid = whereis(logger_simple_h), + true = is_pid(Pid), + ok = logger:remove_handler(simple), + false = is_pid(whereis(logger_simple_h)), + ok. +start_stop(cleanup,_Config) -> + logger:remove_handler(simple). + +%% This testcase just tests that it does not crash, the default handler prints +%% to stdout which we cannot read from in a detached slave. +replace_default(Config) -> + + {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]), + log(Node, emergency, [?str]), + log(Node, alert, [?str,[]]), + log(Node, error, [?map_rep]), + log(Node, info, [?keyval_rep]), + log(Node, info, [?keyval_rep++[not_key_val]]), + rpc:call(Node, error_logger, error_report, [some_type,?map_rep]), + rpc:call(Node, error_logger, warning_report, ["some_type",?map_rep]), + log(Node, critical, [?str,[?keyval_rep]]), + log(Node, notice, [["fake",string,"line:",?LINE]]), + + ok = rpc:call(Node, logger, add_handlers, [kernel]), + + ok. + +replace_file(Config) -> + + {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]), + log(Node, emergency, [M1=?str]), + log(Node, alert, [M2=?str,[]]), + log(Node, error, [?map_rep]), + log(Node, warning, [?keyval_rep]), + log(Node, warning, [?keyval_rep++[not_key_val]]), + log(Node, critical, [?str,[?keyval_rep]]), + log(Node, notice, [["fake",string,"line:",?LINE]]), + + File = filename:join(proplists:get_value(priv_dir,Config), + atom_to_list(?FUNCTION_NAME)++".log"), + + ok = rpc:call(Node, logger, add_handlers, + [[{handler, default, logger_std_h, + #{ config => #{ type => {file, File} }, + formatter => {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}}]]), + + {ok,Bin} = sync_and_read(Node, file, File), + Lines = [unicode:characters_to_list(L) || + L <- binary:split(Bin,<<"\n">>,[global,trim])], + ["=EMERGENCY REPORT===="++_, + M1, + "=ALERT REPORT===="++_, + M2, + "=ERROR REPORT===="++_, + _, + _, + "=WARNING REPORT===="++_, + _, + _, + "=WARNING REPORT===="++_, + _, + _, + _, + "=CRITICAL REPORT===="++_, + _, + _, + "=NOTICE REPORT===="++_, + _ + ] = Lines, + ok. + +replace_disk_log(Config) -> + + {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]), + log(Node, emergency, [M1=?str]), + log(Node, alert, [M2=?str,[]]), + log(Node, error, [?map_rep]), + log(Node, warning, [?keyval_rep]), + log(Node, warning, [?keyval_rep++[not_key_val]]), + log(Node, critical, [?str,[?keyval_rep]]), + log(Node, notice, [["fake",string,"line:",?LINE]]), + + File = filename:join(proplists:get_value(priv_dir,Config), + atom_to_list(?FUNCTION_NAME)++".log"), + + ok = rpc:call(Node, logger, add_handlers, + [[{handler, default, logger_disk_log_h, + #{ config => #{ file => File }, + formatter => {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}}]]), + {ok,Bin} = sync_and_read(Node, disk_log, File), + Lines = [unicode:characters_to_list(L) || + L <- binary:split(Bin,<<"\n">>,[global,trim])], + ["=EMERGENCY REPORT===="++_, + M1, + "=ALERT REPORT===="++_, + M2, + "=ERROR REPORT===="++_, + _, + _, + "=WARNING REPORT===="++_, + _, + _, + "=WARNING REPORT===="++_, + _, + _, + _, + "=CRITICAL REPORT===="++_, + _, + _, + "=NOTICE REPORT===="++_, + _ + ] = Lines, + ok. diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl new file mode 100644 index 0000000000..a1159f280c --- /dev/null +++ b/lib/kernel/test/logger_std_h_SUITE.erl @@ -0,0 +1,1718 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_std_h_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). +-include_lib("kernel/src/logger_h_common.hrl"). +-include_lib("stdlib/include/ms_transform.hrl"). +-include_lib("kernel/include/file.hrl"). + +-define(check_no_log, [] = test_server:messages_get()). +-define(check(Expected), + receive + {log,Expected} -> + [] = test_server:messages_get() + after 5000 -> + ct:fail({report_not_received, + {line,?LINE}, + {expected,Expected}, + {got,test_server:messages_get()}}) + end). + +-define(msg,"Log from "++atom_to_list(?FUNCTION_NAME)++ + ":"++integer_to_list(?LINE)). +-define(bin(Msg), list_to_binary(Msg++"\n")). +-define(domain,#{domain=>[?MODULE]}). + +suite() -> + [{timetrap,{seconds,30}}, + {ct_hooks,[logger_test_lib]}]. + +init_per_suite(Config) -> + timer:start(), % to avoid progress report + {ok,#{formatter:=OrigFormatter}} = + logger:get_handler_config(?STANDARD_HANDLER), + [{formatter,OrigFormatter}|Config]. + +end_per_suite(Config) -> + {OrigMod,OrigConf} = proplists:get_value(formatter,Config), + logger:set_handler_config(?STANDARD_HANDLER,formatter,{OrigMod,OrigConf}), + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(TestHooksCase, Config) when + TestHooksCase == write_failure; + TestHooksCase == sync_failure -> + case (fun() -> ?TEST_HOOKS_TAB == undefined end)() of + true -> + {skip,"Define the TEST_HOOKS macro to run this test"}; + false -> + ct:print("********** ~w **********", [TestHooksCase]), + Config + end; +init_per_testcase(OPCase, Config) when + OPCase == qlen_kill_new; + OPCase == restart_after -> + case re:run(erlang:system_info(system_version), + "dirty-schedulers-TEST", + [{capture,none}]) of + match -> + {skip,"Overload protection test skipped on dirty-schedulers-TEST"}; + nomatch -> + ct:print("********** ~w **********", [OPCase]), + Config + end; +init_per_testcase(TestCase, Config) -> + ct:print("********** ~w **********", [TestCase]), + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [add_remove_instance_tty, + add_remove_instance_standard_io, + add_remove_instance_standard_error, + add_remove_instance_file1, + add_remove_instance_file2, + default_formatter, + filter_config, + errors, + formatter_fail, + config_fail, + crash_std_h_to_file, + crash_std_h_to_disk_log, + bad_input, + info_and_reset, + reconfig, + file_opts, + sync, + write_failure, + sync_failure, + op_switch_to_sync_file, + op_switch_to_sync_tty, + op_switch_to_drop_file, + op_switch_to_drop_tty, + op_switch_to_flush_file, + op_switch_to_flush_tty, + limit_burst_disabled, + limit_burst_enabled_one, + limit_burst_enabled_period, + kill_disabled, + qlen_kill_new, + qlen_kill_std, + mem_kill_new, + mem_kill_std, + restart_after, + handler_requests_under_load + ]. + +add_remove_instance_tty(_Config) -> + {error,{handler_not_added,{invalid_config,logger_std_h,#{type:=tty}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{config => #{type => tty}, + filter_default=>log, + formatter=>{?MODULE,self()}}), + ok. + +add_remove_instance_standard_io(_Config) -> + add_remove_instance_nofile(standard_io). +add_remove_instance_standard_io(cleanup,_Config) -> + logger_std_h_remove(). + +add_remove_instance_standard_error(_Config) -> + add_remove_instance_nofile(standard_error). +add_remove_instance_standard_error(cleanup,_Config) -> + logger_std_h_remove(). + +add_remove_instance_file1(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,"stdlog1.txt"), + Type = {file,Log}, + add_remove_instance_file(Log, Type). +add_remove_instance_file1(cleanup,_Config) -> + logger_std_h_remove(). + +add_remove_instance_file2(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,"stdlog2.txt"), + Type = {file,Log,[raw,append]}, + add_remove_instance_file(Log, Type). +add_remove_instance_file2(cleanup,_Config) -> + logger_std_h_remove(). + +add_remove_instance_file(Log, Type) -> + ok = logger:add_handler(?MODULE, + logger_std_h, + #{config => #{type => Type}, + filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + Pid = whereis(h_proc_name()), + true = is_pid(Pid), + logger:notice(M1=?msg,?domain), + ?check(M1), + B1 = ?bin(M1), + try_read_file(Log, {ok,B1}, filesync_rep_int()), + ok = logger:remove_handler(?MODULE), + timer:sleep(500), + undefined = whereis(h_proc_name()), + logger:notice(?msg,?domain), + ?check_no_log, + try_read_file(Log, {ok,B1}, filesync_rep_int()), + ok. + +default_formatter(_Config) -> + ok = logger:set_handler_config(?STANDARD_HANDLER,formatter, + {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}), + ct:capture_start(), + logger:notice(M1=?msg), + timer:sleep(100), + ct:capture_stop(), + [Msg] = ct:capture_get(), + match = re:run(Msg,"=NOTICE REPORT====.*\n"++M1,[{capture,none}]), + ok. + +filter_config(_Config) -> + ok = logger:add_handler(?MODULE,logger_std_h,#{}), + {ok,#{config:=HConfig}=Config} = logger:get_handler_config(?MODULE), + HConfig = maps:without([handler_pid,mode_tab],HConfig), + + FakeFullHConfig = HConfig#{handler_pid=>self(),mode_tab=>erlang:make_ref()}, + #{config:=HConfig} = + logger_std_h:filter_config(Config#{config=>FakeFullHConfig}), + ok. + +filter_config(cleanup,_Config) -> + logger:remove_handler(?MODULE), + ok. + +errors(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,?FUNCTION_NAME), + + ok = logger:add_handler(?MODULE,logger_std_h,#{}), + {error,{already_exist,?MODULE}} = + logger:add_handler(?MODULE,logger_std_h,#{}), + + {error,{not_found,no_such_name}} = logger:remove_handler(no_such_name), + + ok = logger:remove_handler(?MODULE), + {error,{not_found,?MODULE}} = logger:remove_handler(?MODULE), + + {error, + {handler_not_added, + {invalid_config,logger_std_h,#{type:=faulty_type}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{config => #{type => faulty_type}}), + + case os:type() of + {win32,_} -> + %% No use in testing file access on windows + ok; + _ -> + NoDir = lists:concat(["/",?MODULE,"_dir"]), + {error, + {handler_not_added,{{open_failed,NoDir,eacces},_}}} = + logger:add_handler(myh2,logger_std_h, + #{config=>#{type=>{file,NoDir}}}) + end, + + {error, + {handler_not_added,{{open_failed,Log,_},_}}} = + logger:add_handler(myh3,logger_std_h, + #{config=>#{type=>{file,Log,[bad_file_opt]}}}), + + ok = logger:notice(?msg). + +errors(cleanup,_Config) -> + logger:remove_handler(?MODULE). + +formatter_fail(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,?FUNCTION_NAME), + + %% no formatter + ok = logger:add_handler(?MODULE, + logger_std_h, + #{config => #{type => {file,Log}}, + filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE])}), + Pid = whereis(h_proc_name()), + true = is_pid(Pid), + H = logger:get_handler_ids(), + true = lists:member(?MODULE,H), + + %% Formatter is added automatically + {ok,#{formatter:={logger_formatter,_}}} = logger:get_handler_config(?MODULE), + logger:notice(M1=?msg,?domain), + Got1 = try_match_file(Log,"[0-9\\+\\-T:\\.]* notice: "++M1,5000), + + ok = logger:set_handler_config(?MODULE,formatter,{nonexistingmodule,#{}}), + logger:notice(M2=?msg,?domain), + Got2 = try_match_file(Log, + escape(Got1)++"[0-9\\+\\-T:\\.]* notice: FORMATTER CRASH: .*"++M2, + 5000), + + ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,crash}), + logger:notice(M3=?msg,?domain), + Got3 = try_match_file(Log, + escape(Got2)++"[0-9\\+\\-T:\\.]* notice: FORMATTER CRASH: .*"++M3, + 5000), + + ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,bad_return}), + logger:notice(?msg,?domain), + try_match_file(Log, + escape(Got3)++"FORMATTER ERROR: bad return value", + 5000), + + %% Check that handler is still alive and was never dead + Pid = whereis(h_proc_name()), + H = logger:get_handler_ids(), + + ok. + +formatter_fail(cleanup,_Config) -> + logger:remove_handler(?MODULE). + +config_fail(_Config) -> + {error,{handler_not_added,{invalid_config,logger_std_h,#{bad:=bad}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{config => #{bad => bad}, + filter_default=>log, + formatter=>{?MODULE,self()}}), + {error,{handler_not_added,{invalid_config,logger_std_h, + #{restart_type:=bad}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{config => #{restart_type => bad}, + filter_default=>log, + formatter=>{?MODULE,self()}}), + {error,{handler_not_added,{invalid_config,logger_std_h, + {invalid_levels,#{drop_mode_qlen:=1}}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{config => #{drop_mode_qlen=>1}}), + {error,{handler_not_added,{invalid_config,logger_std_h, + {invalid_levels,#{sync_mode_qlen:=43, + drop_mode_qlen:=42}}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{config => #{sync_mode_qlen=>43, + drop_mode_qlen=>42}}), + {error,{handler_not_added,{invalid_config,logger_std_h, + {invalid_levels,#{drop_mode_qlen:=43, + flush_qlen:=42}}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{config => #{drop_mode_qlen=>43, + flush_qlen=>42}}), + + ok = logger:add_handler(?MODULE,logger_std_h, + #{filter_default=>log, + formatter=>{?MODULE,self()}}), + {error,{illegal_config_change,logger_std_h,#{type:=_},#{type:=_}}} = + logger:set_handler_config(?MODULE,config, + #{type=>{file,"file"}}), + + {error,{invalid_config,logger_std_h,{invalid_levels,_}}} = + logger:set_handler_config(?MODULE,config, + #{sync_mode_qlen=>100, + flush_qlen=>99}), + {error,{invalid_config,logger_std_h,#{filesync_rep_int:=2000}}} = + logger:set_handler_config(?MODULE, config, + #{filesync_rep_int => 2000}), + + %% Read-only fields may (accidentially) be included in the change, + %% but it won't take effect + {ok,C} = logger:get_handler_config(?MODULE), + ok = logger:set_handler_config(?MODULE,config, + #{handler_pid=>self(), + mode_tab=>erlang:make_ref()}), + {ok,C} = logger:get_handler_config(?MODULE), + + ok. + +config_fail(cleanup,_Config) -> + logger:remove_handler(?MODULE). + +crash_std_h_to_file(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"])), + crash_std_h(Config,?FUNCTION_NAME, + [{handler,default,logger_std_h, + #{ config => #{ type => {file, Log} }}}], + file, Log). +crash_std_h_to_file(cleanup,_Config) -> + crash_std_h(cleanup). + +crash_std_h_to_disk_log(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"])), + crash_std_h(Config,?FUNCTION_NAME, + [{handler,default,logger_disk_log_h, + #{ config => #{ file => Log }}}], + disk_log,Log). +crash_std_h_to_disk_log(cleanup,_Config) -> + crash_std_h(cleanup). + +crash_std_h(Config,Func,Var,Type,Log) -> + Dir = ?config(priv_dir,Config), + SysConfig = filename:join(Dir,lists:concat([?MODULE,"_",Func,".config"])), + ok = file:write_file(SysConfig, io_lib:format("[{kernel,[{logger,~p}]}].",[Var])), + Pa = filename:dirname(code:which(?MODULE)), + Name = lists:concat([?MODULE,"_",Func]), + Args = lists:concat([" -config ",filename:rootname(SysConfig)," -pa ",Pa]), + ct:pal("Starting ~p with ~tp", [Name,Args]), + %% Start a node which prints kernel logs to the destination specified by Type + {ok,Node} = test_server:start_node(Name, peer, [{args, Args}]), + HProcName = + case Type of + file -> ?name_to_reg_name(logger_std_h,?STANDARD_HANDLER); + disk_log -> ?name_to_reg_name(logger_disk_log_h,?STANDARD_HANDLER) + end, + Pid = rpc:call(Node,erlang,whereis,[HProcName]), + ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter, + {?MODULE,self()}]), + ok = log_on_remote_node(Node,"dummy1"), + ?check("dummy1"), + {ok,Bin1} = sync_and_read(Node,Type,Log), + <<"dummy1\n">> = binary:part(Bin1,{byte_size(Bin1),-7}), + + %% Kill the logger_std_h process + exit(Pid, kill), + + %% Wait a bit, then check that it is gone + timer:sleep(2000), + undefined = rpc:call(Node,erlang,whereis,[HProcName]), + + %% Check that file is not empty + {ok,Bin2} = sync_and_read(Node,Type,Log), + <<"dummy1\n">> = binary:part(Bin2,{byte_size(Bin2),-7}), + ok. + +%% Can not use rpc:call here, since the code would execute on a +%% process with group_leader on this (the calling) node, and thus +%% logger would send the log event to the logger process here instead +%% of logging it itself. +log_on_remote_node(Node,Msg) -> + _ = spawn_link(Node, + fun() -> erlang:group_leader(whereis(user),self()), + logger:notice(Msg) + end), + ok. + + +crash_std_h(cleanup) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes]. + +sync_and_read(Node,disk_log,Log) -> + rpc:call(Node,logger_disk_log_h,filesync,[?STANDARD_HANDLER]), + case file:read_file(Log ++ ".1") of + {ok,<<>>} -> + timer:sleep(5000), + file:read_file(Log ++ ".1"); + Ok -> + Ok + end; +sync_and_read(Node,file,Log) -> + rpc:call(Node,logger_std_h,filesync,[?STANDARD_HANDLER]), + case file:read_file(Log) of + {ok,<<>>} -> + timer:sleep(5000), + file:read_file(Log); + Ok -> + Ok + end. + +bad_input(_Config) -> + {error,{badarg,{filesync,["BadType"]}}} = logger_std_h:filesync("BadType"), + {error,{badarg,{info,["BadType"]}}} = logger_std_h:info("BadType"), + {error,{badarg,{reset,["BadType"]}}} = logger_std_h:reset("BadType"). + + +info_and_reset(_Config) -> + #{id := ?STANDARD_HANDLER} = logger_std_h:info(?STANDARD_HANDLER), + ok = logger_std_h:reset(?STANDARD_HANDLER). + +reconfig(Config) -> + Dir = ?config(priv_dir,Config), + ok = logger:add_handler(?MODULE, + logger_std_h, + #{config => #{type => standard_io}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + #{id := ?MODULE, + handler_state := #{type := standard_io, + file_ctrl_pid := FileCtrlPid}, + sync_mode_qlen := ?SYNC_MODE_QLEN, + drop_mode_qlen := ?DROP_MODE_QLEN, + flush_qlen := ?FLUSH_QLEN, + burst_limit_enable := ?BURST_LIMIT_ENABLE, + burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT, + burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME, + overload_kill_enable := ?OVERLOAD_KILL_ENABLE, + overload_kill_qlen := ?OVERLOAD_KILL_QLEN, + overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE, + overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER, + filesync_repeat_interval := no_repeat} = DefaultInfo = + logger_std_h:info(?MODULE), + + {ok, + #{config:= + #{type := standard_io, + sync_mode_qlen := ?SYNC_MODE_QLEN, + drop_mode_qlen := ?DROP_MODE_QLEN, + flush_qlen := ?FLUSH_QLEN, + burst_limit_enable := ?BURST_LIMIT_ENABLE, + burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT, + burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME, + overload_kill_enable := ?OVERLOAD_KILL_ENABLE, + overload_kill_qlen := ?OVERLOAD_KILL_QLEN, + overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE, + overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER, + filesync_repeat_interval := no_repeat} = + DefaultHConf}} + = logger:get_handler_config(?MODULE), + + ok = logger:set_handler_config(?MODULE, config, + #{sync_mode_qlen => 1, + drop_mode_qlen => 2, + flush_qlen => 3, + burst_limit_enable => false, + burst_limit_max_count => 10, + burst_limit_window_time => 10, + overload_kill_enable => true, + overload_kill_qlen => 100000, + overload_kill_mem_size => 10000000, + overload_kill_restart_after => infinity, + filesync_repeat_interval => 5000}), + #{id := ?MODULE, + handler_state := #{type := standard_io, + file_ctrl_pid := FileCtrlPid}, + sync_mode_qlen := 1, + drop_mode_qlen := 2, + flush_qlen := 3, + burst_limit_enable := false, + burst_limit_max_count := 10, + burst_limit_window_time := 10, + overload_kill_enable := true, + overload_kill_qlen := 100000, + overload_kill_mem_size := 10000000, + overload_kill_restart_after := infinity, + filesync_repeat_interval := no_repeat} = Info = logger_std_h:info(?MODULE), + + {ok,#{config := + #{type := standard_io, + sync_mode_qlen := 1, + drop_mode_qlen := 2, + flush_qlen := 3, + burst_limit_enable := false, + burst_limit_max_count := 10, + burst_limit_window_time := 10, + overload_kill_enable := true, + overload_kill_qlen := 100000, + overload_kill_mem_size := 10000000, + overload_kill_restart_after := infinity, + filesync_repeat_interval := no_repeat} = HConf}} = + logger:get_handler_config(?MODULE), + + ok = logger:update_handler_config(?MODULE, config, + #{flush_qlen => ?FLUSH_QLEN}), + {ok,#{config:=C1}} = logger:get_handler_config(?MODULE), + ct:log("C1: ~p",[C1]), + C1 = HConf#{flush_qlen => ?FLUSH_QLEN}, + + ok = logger:set_handler_config(?MODULE, config, #{sync_mode_qlen => 1}), + {ok,#{config:=C2}} = logger:get_handler_config(?MODULE), + ct:log("C2: ~p",[C2]), + C2 = DefaultHConf#{sync_mode_qlen => 1}, + + ok = logger:set_handler_config(?MODULE, config, #{drop_mode_qlen => 100}), + {ok,#{config:=C3}} = logger:get_handler_config(?MODULE), + ct:log("C3: ~p",[C3]), + C3 = DefaultHConf#{drop_mode_qlen => 100}, + + ok = logger:update_handler_config(?MODULE, config, #{sync_mode_qlen => 1}), + {ok,#{config:=C4}} = logger:get_handler_config(?MODULE), + ct:log("C4: ~p",[C4]), + C4 = DefaultHConf#{sync_mode_qlen => 1, + drop_mode_qlen => 100}, + + ok = logger:remove_handler(?MODULE), + + File = filename:join(Dir,lists:concat([?FUNCTION_NAME,".log"])), + ok = logger:add_handler(?MODULE, + logger_std_h, + #{config => #{type => {file,File}}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + + {ok,#{config:=#{filesync_repeat_interval:=FSI}=FileHConfig}} = + logger:get_handler_config(?MODULE), + ok = logger:update_handler_config(?MODULE,config, + #{filesync_repeat_interval=>FSI+2000}), + {ok,#{config:=C5}} = logger:get_handler_config(?MODULE), + ct:log("C5: ~p",[C5]), + C5 = FileHConfig#{filesync_repeat_interval=>FSI+2000}, + + %% You are not allowed to actively set 'type' in runtime, since + %% this is a write once field. + {error, {illegal_config_change,logger_std_h,_,_}} = + logger:set_handler_config(?MODULE,config,#{type=>standard_io}), + {ok,#{config:=C6}} = logger:get_handler_config(?MODULE), + ct:log("C6: ~p",[C6]), + C6 = C5, + + %% ... but if you don't specify 'type', then set_handler_config shall + %% NOT reset it to its default value + ok = logger:set_handler_config(?MODULE,config,#{sync_mode_qlen=>1}), + {ok,#{config:=C7}} = logger:get_handler_config(?MODULE), + ct:log("C7: ~p",[C7]), + C7 = FileHConfig#{sync_mode_qlen=>1}, + ok. + +reconfig(cleanup, _Config) -> + logger:remove_handler(?MODULE). + + +file_opts(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir, lists:concat([?FUNCTION_NAME,".log"])), + BadFileOpts = [raw], + BadType = {file,Log,BadFileOpts}, + {error,{handler_not_added,{{open_failed,Log,enoent},_}}} = + logger:add_handler(?MODULE, logger_std_h, + #{config => #{type => BadType}}), + + OkFileOpts = [raw,append], + OkType = {file,Log,OkFileOpts}, + ok = logger:add_handler(?MODULE, + logger_std_h, + #{config => #{type => OkType}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + + #{handler_state := #{type := OkType}} = logger_std_h:info(?MODULE), + logger:notice(M1=?msg,?domain), + ?check(M1), + B1 = ?bin(M1), + try_read_file(Log, {ok,B1}, filesync_rep_int()), + ok. +file_opts(cleanup, _Config) -> + logger:remove_handler(?MODULE). + + +sync(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir, lists:concat([?FUNCTION_NAME,".log"])), + Type = {file,Log}, + ok = logger:add_handler(?MODULE, + logger_std_h, + #{config => #{type => Type}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,nl}}), + + %% check repeated filesync happens + start_tracer([{logger_std_h, write_to_dev, 5}, + {file, datasync, 1}], + [{logger_std_h, write_to_dev, <<"first\n">>}, + {file,datasync}]), + + logger:notice("first", ?domain), + %% wait for automatic filesync + check_tracer(filesync_rep_int()*2), + + %% check that explicit filesync is only done once + start_tracer([{logger_std_h, write_to_dev, 5}, + {file, datasync, 1}], + [{logger_std_h, write_to_dev, <<"second\n">>}, + {file,datasync}, + {no_more,500} + ]), + logger:notice("second", ?domain), + %% do explicit sync + logger_std_h:filesync(?MODULE), + %% a second sync should be ignored + logger_std_h:filesync(?MODULE), + check_tracer(100), + + %% check that if there's no repeated filesync active, + %% a filesync is still performed when handler goes idle + ok = logger:update_handler_config(?MODULE, config, + #{filesync_repeat_interval => no_repeat}), + no_repeat = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)), + %% The following timer is to make sure the time from last log + %% ("second") to next ("third") is long enough, so the a flush is + %% triggered by the idle timeout between "thrid" and "fourth". + timer:sleep(?IDLE_DETECT_TIME_MSEC*2), + start_tracer([{logger_std_h, write_to_dev, 5}, + {file, datasync, 1}], + [{logger_std_h, write_to_dev, <<"third\n">>}, + {file,datasync}, + {logger_std_h, write_to_dev, <<"fourth\n">>}, + {file,datasync}]), + logger:notice("third", ?domain), + %% wait for automatic filesync + timer:sleep(?IDLE_DETECT_TIME_MSEC*2), + logger:notice("fourth", ?domain), + %% wait for automatic filesync + check_tracer(?IDLE_DETECT_TIME_MSEC*2), + + %% switch repeated filesync on and verify that the looping works + SyncInt = 1000, + WaitT = 4500, + OneSync = {logger_h_common,handle_cast,repeated_filesync}, + %% receive 1 repeated_filesync per sec + start_tracer([{logger_h_common,handle_cast,2}], + [OneSync || _ <- lists:seq(1, trunc(WaitT/SyncInt))]), + + ok = logger:update_handler_config(?MODULE, config, + #{filesync_repeat_interval => SyncInt}), + SyncInt = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)), + timer:sleep(WaitT), + ok = logger:update_handler_config(?MODULE, config, + #{filesync_repeat_interval => no_repeat}), + check_tracer(100), + ok. +sync(cleanup, _Config) -> + dbg:stop_clear(), + logger:remove_handler(?MODULE). + +write_failure(Config) -> + Dir = ?config(priv_dir, Config), + File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]), + Log = filename:join(Dir, File), + Node = start_std_h_on_new_node(Config, Log), + false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), + rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), + rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), + rpc:call(Node, ?MODULE, set_result, [file_write,ok]), + + ok = log_on_remote_node(Node, "Logged1"), + rpc:call(Node, logger_std_h, filesync, [?STANDARD_HANDLER]), + ?check_no_log, + try_read_file(Log, {ok,<<"Logged1\n">>}, filesync_rep_int()), + + rpc:call(Node, ?MODULE, set_result, [file_write,{error,terminated}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + + ?check({error,{?STANDARD_HANDLER,write,Log,{error,terminated}}}), + + ok = log_on_remote_node(Node, "No second error printout"), + ?check_no_log, + + rpc:call(Node, ?MODULE, set_result, [file_write,{error,eacces}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + ?check({error,{?STANDARD_HANDLER,write,Log,{error,eacces}}}), + + rpc:call(Node, ?MODULE, set_result, [file_write,ok]), + ok = log_on_remote_node(Node, "Logged2"), + rpc:call(Node, logger_std_h, filesync, [?STANDARD_HANDLER]), + ?check_no_log, + try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, filesync_rep_int()), + ok. +write_failure(cleanup, _Config) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes]. + +sync_failure(Config) -> + Dir = ?config(priv_dir, Config), + File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]), + Log = filename:join(Dir, File), + Node = start_std_h_on_new_node(Config, Log), + false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), + rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), + rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), + rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]), + + SyncInt = 500, + ok = rpc:call(Node, logger, update_handler_config, + [?STANDARD_HANDLER, config, + #{filesync_repeat_interval => SyncInt}]), + Info = rpc:call(Node, logger_std_h, info, [?STANDARD_HANDLER]), + SyncInt = maps:get(filesync_repeat_interval, Info), + + ok = log_on_remote_node(Node, "Logged1"), + ?check_no_log, + + rpc:call(Node, ?MODULE, set_result, [file_datasync,{error,terminated}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + + ?check({error,{?STANDARD_HANDLER,filesync,Log,{error,terminated}}}), + + ok = log_on_remote_node(Node, "No second error printout"), + ?check_no_log, + + rpc:call(Node, ?MODULE, set_result, [file_datasync,{error,eacces}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + ?check({error,{?STANDARD_HANDLER,filesync,Log,{error,eacces}}}), + + rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]), + ok = log_on_remote_node(Node, "Logged2"), + ?check_no_log, + ok. +sync_failure(cleanup, _Config) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes]. + +start_std_h_on_new_node(Config, Log) -> + {ok,_,Node} = + logger_test_lib:setup( + Config, + [{logger,[{handler,default,logger_std_h, + #{ config => #{ type => {file,Log}}}}]}]), + ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter, + {?MODULE,nl}]), + Node. + +%% functions for test hook macros to be called by rpc +set_internal_log(_Mod, _Func) -> + ?set_internal_log({_Mod,_Func}). +set_result(_Op, _Result) -> + ?set_result(_Op, _Result). +set_defaults() -> + ?set_defaults(). + +%% internal log function that sends the term to the test case process +internal_log(Type, Term) -> + [{tester,Tester}] = ets:lookup(?TEST_HOOKS_TAB, tester), + Tester ! {log,{Type,Term}}, + logger:internal_log(Type, Term), + ok. + + +%%%----------------------------------------------------------------- +%%% Overload protection tests + +op_switch_to_sync_file(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NumOfReqs = 500, + NewHConfig = + HConfig#{config => StdHConfig#{sync_mode_qlen => 2, + drop_mode_qlen => NumOfReqs+1, + flush_qlen => 2*NumOfReqs, + burst_limit_enable => false}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + %% TRecvPid = start_op_trace(), + send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + Lines = count_lines(Log), + %% true = analyse_trace(TRecvPid, + %% fun(Events) -> find_mode(async,Events) end), + %% true = analyse_trace(TRecvPid, + %% fun(Events) -> find_mode(sync,Events) end), + %% true = analyse_trace(TRecvPid, + %% fun(Events) -> find_switch(async,sync,Events) end), + %% false = analyse_trace(TRecvPid, + %% fun(Events) -> find_mode(drop,Events) end), + %% false = analyse_trace(TRecvPid, + %% fun(Events) -> find_mode(flush,Events) end), + %% stop_op_trace(TRecvPid), + NumOfReqs = Lines, + ok = file_delete(Log), + ok. +op_switch_to_sync_file(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +op_switch_to_sync_tty(Config) -> + {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config), + NumOfReqs = 500, + NewHConfig = + HConfig#{config => StdHConfig#{sync_mode_qlen => 3, + drop_mode_qlen => NumOfReqs+1, + flush_qlen => 2*NumOfReqs, + burst_limit_enable => false}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + ok. +op_switch_to_sync_tty(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +op_switch_to_drop_file() -> + [{timetrap,{seconds,180}}]. +op_switch_to_drop_file(Config) -> + Test = + fun() -> + {Log,HConfig,StdHConfig} = + start_handler(?MODULE, ?FUNCTION_NAME, Config), + NumOfReqs = 300, + Procs = 2, + Bursts = 10, + NewHConfig = + HConfig#{config => + StdHConfig#{sync_mode_qlen => 1, + drop_mode_qlen => 2, + flush_qlen => + Procs*NumOfReqs*Bursts, + burst_limit_enable => false}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + %% It sometimes happens that the handler gets the + %% requests in a slow enough pace so that dropping + %% never occurs. Therefore, lets generate a number of + %% bursts to increase the chance of message buildup. + [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice) || + _ <- lists:seq(1, Bursts)], + Logged = count_lines(Log), + ok = stop_handler(?MODULE), + ct:pal("Number of messages dropped = ~w (~w)", + [Procs*NumOfReqs*Bursts-Logged,Procs*NumOfReqs*Bursts]), + true = (Logged < (Procs*NumOfReqs*Bursts)), + true = (Logged > 0), + _ = file_delete(Log), + ok + end, + %% As it's tricky to get the timing right in only one go, we perform the + %% test repeatedly, hoping that will generate a successful result. + case repeat_until_ok(Test, 10) of + {ok,{Failures,_Result}} -> + ct:log("Failed ~w times before success!", [Failures]); + {fails,Reason} -> + ct:fail(Reason) + end. +op_switch_to_drop_file(cleanup, _Config) -> + _ = stop_handler(?MODULE). + +op_switch_to_drop_tty(Config) -> + {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config), + NumOfReqs = 300, + Procs = 2, + NewHConfig = + HConfig#{config => StdHConfig#{sync_mode_qlen => 1, + drop_mode_qlen => 2, + flush_qlen => + Procs*NumOfReqs+1, + burst_limit_enable => false}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice), + ok. +op_switch_to_drop_tty(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +op_switch_to_flush_file() -> + [{timetrap,{minutes,5}}]. +op_switch_to_flush_file(Config) -> + Test = + fun() -> + {Log,HConfig,StdHConfig} = + start_handler(?MODULE, ?FUNCTION_NAME, Config), + + %% NOTE: it's important that both async and sync + %% requests have been queued when the flush happens + %% (verify with coverage of flush_log_requests/2) + + NewHConfig = + HConfig#{config => + StdHConfig#{sync_mode_qlen => 2, + %% disable drop mode + drop_mode_qlen => 300, + flush_qlen => 300, + burst_limit_enable => false}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + NumOfReqs = 1500, + Procs = 10, + Bursts = 10, + %% It sometimes happens that the handler either gets + %% the requests in a slow enough pace so that flushing + %% never occurs, or it gets all messages at once, + %% causing all messages to get flushed (no dropping of + %% sync messages gets tested). Therefore, lets + %% generate a number of bursts to increase the chance + %% of message buildup in some random fashion. + [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice) || + _ <- lists:seq(1,Bursts)], + Logged = count_lines(Log), + ok = stop_handler(?MODULE), + ct:pal("Number of messages flushed/dropped = ~w (~w)", + [NumOfReqs*Procs*Bursts-Logged,NumOfReqs*Procs*Bursts]), + true = (Logged < (NumOfReqs*Procs*Bursts)), + true = (Logged > 0), + _ = file_delete(Log), + ok + end, + %% As it's tricky to get the timing right in only one go, we perform the + %% test repeatedly, hoping that will generate a successful result. + case repeat_until_ok(Test, 10) of + {ok,{Failures,_Result}} -> + ct:log("Failed ~w times before success!", [Failures]); + {fails,Reason} -> + ct:fail(Reason) + end. +op_switch_to_flush_file(cleanup, _Config) -> + _ = stop_handler(?MODULE). + +op_switch_to_flush_tty() -> + [{timetrap,{minutes,5}}]. +op_switch_to_flush_tty(Config) -> + {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config), + + %% it's important that both async and sync requests have been queued + %% when the flush happens (verify with coverage of flush_log_requests/2) + + NewHConfig = + HConfig#{config => StdHConfig#{sync_mode_qlen => 2, + %% disable drop mode + drop_mode_qlen => 100, + flush_qlen => 100, + burst_limit_enable => false}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + NumOfReqs = 1000, + Procs = 100, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice), + ok. +op_switch_to_flush_tty(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +limit_burst_disabled(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{config => StdHConfig#{burst_limit_enable => false, + burst_limit_max_count => 10, + burst_limit_window_time => 2000, + drop_mode_qlen => 200, + flush_qlen => 300}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + NumOfReqs = Logged, + ok = file_delete(Log), + ok. +limit_burst_disabled(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +limit_burst_enabled_one(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + ReqLimit = 10, + NewHConfig = + HConfig#{config => StdHConfig#{burst_limit_enable => true, + burst_limit_max_count => ReqLimit, + burst_limit_window_time => 2000, + drop_mode_qlen => 200, + flush_qlen => 300}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + ReqLimit = Logged, + ok = file_delete(Log), + ok. +limit_burst_enabled_one(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +limit_burst_enabled_period(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + ReqLimit = 10, + BurstTWin = 1000, + NewHConfig = + HConfig#{config => StdHConfig#{burst_limit_enable => true, + burst_limit_max_count => ReqLimit, + burst_limit_window_time => BurstTWin, + drop_mode_qlen => 20000, + flush_qlen => 20001}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + + Windows = 3, + Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, notice), + Logged = count_lines(Log), + ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w", + [Sent,Logged]), + true = (Logged > (ReqLimit*Windows)) andalso + (Logged < (ReqLimit*(Windows+2))), + ok = file_delete(Log), + ok. +limit_burst_enabled_period(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +kill_disabled(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{config=>StdHConfig#{overload_kill_enable=>false, + overload_kill_qlen=>10, + overload_kill_mem_size=>100}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + ok = file_delete(Log), + true = is_pid(whereis(h_proc_name())), + ok. +kill_disabled(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +qlen_kill_new(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + Pid0 = whereis(h_proc_name()), + {_,Mem0} = process_info(Pid0, memory), + RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER, + NewHConfig = + HConfig#{config=>StdHConfig#{overload_kill_enable=>true, + overload_kill_qlen=>10, + overload_kill_mem_size=>Mem0+50000, + overload_kill_restart_after=>RestartAfter}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + MRef = erlang:monitor(process, Pid0), + NumOfReqs = 100, + Procs = 4, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice), + %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + receive + {'DOWN', MRef, _, _, Info} -> + case Info of + {shutdown,{overloaded,?MODULE,QLen,Mem}} -> + ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]); + killed -> + ct:pal("Slow shutdown, handler process was killed!", []) + end, + file_delete(Log), + {ok,_} = wait_for_process_up(RestartAfter * 3), + ok + after + 5000 -> + Info = logger_std_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info]), + ct:fail("Handler not dead! It should not have survived this!") + end. +qlen_kill_new(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +%% choke the standard handler on remote node to verify the termination +%% works as expected +qlen_kill_std(_Config) -> + %%! HERE + %% Dir = ?config(priv_dir, Config), + %% File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]), + %% Log = filename:join(Dir, File), + %% Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log), + %% ok = rpc:call(Node, logger, update_handler_config, + %% [?STANDARD_HANDLER, config, + %% #{overload_kill_enable=>true, + %% overload_kill_qlen=>10, + %% overload_kill_mem_size=>100000}]), + {skip,"Not done yet"}. + +mem_kill_new(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + Pid0 = whereis(h_proc_name()), + {_,Mem0} = process_info(Pid0, memory), + RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER, + NewHConfig = + HConfig#{config=>StdHConfig#{overload_kill_enable=>true, + overload_kill_qlen=>50000, + overload_kill_mem_size=>Mem0+500, + overload_kill_restart_after=>RestartAfter}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + MRef = erlang:monitor(process, Pid0), + NumOfReqs = 100, + Procs = 4, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice), + %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice), + receive + {'DOWN', MRef, _, _, Info} -> + case Info of + {shutdown,{overloaded,?MODULE,QLen,Mem}} -> + ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]); + killed -> + ct:pal("Slow shutdown, handler process was killed!", []) + end, + file_delete(Log), + {ok,_} = wait_for_process_up(RestartAfter * 3), + ok + after + 5000 -> + Info = logger_std_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info]), + ct:fail("Handler not dead! It should not have survived this!") + end. +mem_kill_new(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +%% choke the standard handler on remote node to verify the termination +%% works as expected +mem_kill_std(_Config) -> + {skip,"Not done yet"}. + +restart_after() -> + [{timetrap,{minutes,2}}]. +restart_after(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig1 = + HConfig#{config=>StdHConfig#{overload_kill_enable=>true, + overload_kill_qlen=>10, + overload_kill_restart_after=>infinity}}, + ok = logger:update_handler_config(?MODULE, NewHConfig1), + MRef1 = erlang:monitor(process, whereis(h_proc_name())), + %% kill handler + send_burst({n,100}, {spawn,4,0}, {chars,79}, notice), + receive + {'DOWN', MRef1, _, _, _Reason1} -> + file_delete(Log), + error = wait_for_process_up(?OVERLOAD_KILL_RESTART_AFTER * 3), + ok + after + 5000 -> + Info1 = logger_std_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info1]), + ct:fail("Handler not dead! It should not have survived this!") + end, + + {Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER, + + NewHConfig2 = + HConfig#{config=>StdHConfig#{overload_kill_enable=>true, + overload_kill_qlen=>10, + overload_kill_restart_after=>RestartAfter}}, + ok = logger:update_handler_config(?MODULE, NewHConfig2), + Pid0 = whereis(h_proc_name()), + MRef2 = erlang:monitor(process, Pid0), + %% kill handler + send_burst({n,100}, {spawn,4,0}, {chars,79}, notice), + receive + {'DOWN', MRef2, _, _, _Reason2} -> + file_delete(Log), + {ok,Pid1} = wait_for_process_up(RestartAfter * 3), + false = (Pid1 == Pid0), + ok + after + 5000 -> + Info2 = logger_std_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info2]), + ct:fail("Handler not dead! It should not have survived this!") + end, + ok. +restart_after(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +%% send handler requests (sync, info, reset, change_config) +%% during high load to verify that sync, dropping and flushing is +%% handled correctly. +handler_requests_under_load() -> + [{timetrap,{minutes,3}}]. +handler_requests_under_load(Config) -> + {Log,HConfig,StdHConfig} = + start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{config => StdHConfig#{sync_mode_qlen => 2, + drop_mode_qlen => 1000, + flush_qlen => 2000, + burst_limit_enable => false}}, + ok = logger:update_handler_config(?MODULE, NewHConfig), + Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]}, + {info,[]}, + {reset,[]}, + {change_config,[]}]) + end), + Sent = send_burst({t,10000}, seq, {chars,79}, notice), + Pid ! {self(),finish}, + ReqResult = receive {Pid,Result} -> Result end, + Logged = count_lines(Log), + ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w", + [Sent,Logged]), + FindError = fun(Res) -> + [E || E <- Res, + is_tuple(E) andalso (element(1,E) == error)] + end, + Errors = [{Req,FindError(Res)} || {Req,Res} <- ReqResult], + NoOfReqs = lists:foldl(fun({_,Res}, N) -> N + length(Res) end, 0, ReqResult), + ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]), + ok = file_delete(Log). +handler_requests_under_load(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) -> + receive + {From,finish} -> + From ! {self(),Reqs} + after + TO -> + Result = + case Req of + change_config -> + logger:update_handler_config(HName, config, + #{overload_kill_enable => + false}); + Func -> + logger_std_h:Func(HName) + end, + send_requests(HName, TO, Rs ++ [{Req,[Result|Res]}]) + end. + + +%%%----------------------------------------------------------------- +%%% +start_handler(Name, TTY, Config) when TTY == standard_io; + TTY == standard_error-> + ok = logger:add_handler(Name, + logger_std_h, + #{config => #{type => TTY}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([Name]), + formatter=>{?MODULE,op}}), + {ok,HConfig = #{config := StdHConfig}} = logger:get_handler_config(Name), + {HConfig,StdHConfig}; + +start_handler(Name, FuncName, Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir, lists:concat([FuncName,".log"])), + ct:pal("Logging to ~tp", [Log]), + Type = {file,Log}, + _ = file_delete(Log), + ok = logger:add_handler(Name, + logger_std_h, + #{config => #{type => Type}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([Name]), + formatter=>{?MODULE,op}}), + {ok,HConfig = #{config := StdHConfig}} = logger:get_handler_config(Name), + {Log,HConfig,StdHConfig}. + +stop_handler(Name) -> + R = logger:remove_handler(Name), + ct:pal("Handler ~p stopped! Result: ~p", [Name,R]), + R. + +count_lines(File) -> + wait_until_written(File, -1), + count_lines1(File). + +wait_until_written(File, Sz) -> + timer:sleep(2000), + case file:read_file_info(File) of + {ok,#file_info{size = Sz}} -> + timer:sleep(1000), + case file:read_file_info(File) of + {ok,#file_info{size = Sz}} -> + ok; + {ok,#file_info{size = Sz1}} -> + wait_until_written(File, Sz1) + end; + {ok,#file_info{size = Sz1}} -> + wait_until_written(File, Sz1) + end. + +count_lines1(File) -> + {_,Dev} = file:open(File, [read]), + Lines = count_lines2(Dev, 0), + file:close(Dev), + Lines. + +count_lines2(Dev, LC) -> + case file:read_line(Dev) of + {ok,"Handler logger_std_h_SUITE " ++_} -> + %% Not counting handler info + count_lines2(Dev,LC); + {ok,_} -> + count_lines2(Dev,LC+1); + eof -> LC + end. + +send_burst(NorT, Type, {chars,Sz}, Class) -> + Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,Sz)], + case NorT of + {n,N} -> + %% process_flag(priority, high), + send_n_burst(N, Type, Text, Class), + %% process_flag(priority, normal), + N; + {t,T} -> + ct:pal("Sending messages sequentially for ~w ms", [T]), + T0 = erlang:monotonic_time(millisecond), + send_t_burst(T0, T, Text, Class, 0) + end. + +send_n_burst(0, _, _Text, _Class) -> + ok; +send_n_burst(N, seq, Text, Class) -> + ok = logger:Class(Text, ?domain), + send_n_burst(N-1, seq, Text, Class); +send_n_burst(N, {spawn,Ps,TO}, Text, Class) -> + ct:pal("~w processes each sending ~w messages", [Ps,N]), + MRefs = [begin if TO == 0 -> ok; true -> timer:sleep(TO) end, + monitor(process,spawn_link(per_proc_fun(N,Text,Class,X))) + end || X <- lists:seq(1,Ps)], + lists:foreach(fun(MRef) -> + receive + {'DOWN', MRef, _, _, _} -> + ok + end + end, MRefs), + ct:pal("Message burst sent", []), + ok. + +send_t_burst(T0, T, Text, Class, N) -> + T1 = erlang:monotonic_time(millisecond), + if (T1-T0) > T -> + N; + true -> + ok = logger:Class(Text, ?domain), + send_t_burst(T0, T, Text, Class, N+1) + end. + +per_proc_fun(N,Text,Class,X) when X rem 2 == 0 -> + fun() -> + process_flag(priority,high), + send_n_burst(N, seq, Text, Class) + end; +per_proc_fun(N,Text,Class,_) -> + fun() -> + send_n_burst(N, seq, Text, Class) + end. + +%%%----------------------------------------------------------------- +%%% Formatter callback +%%% Using this to send the formatted string back to the test case +%%% process - so it can check for logged events. +format(_,bad_return) -> + bad_return; +format(_,crash) -> + erlang:error(formatter_crashed); +format(#{msg:={string,String0}},no_nl) -> + String = unicode:characters_to_list(String0), + String; +format(#{msg:={string,String0}},nl) -> + String = unicode:characters_to_list(String0), + String++"\n"; +format(#{msg:={string,String0}},op) -> + String = unicode:characters_to_list(String0), + String++"\n"; +format(#{msg:={report,#{label:={supervisor,progress}}}},op) -> + ""; +format(#{msg:={report,#{label:={gen_server,terminate}}}},op) -> + ""; +format(#{msg:={report,#{label:={proc_lib,crash}}}},op) -> + ""; +format(#{msg:={F,A}},OpOrPid) when is_list(F), is_list(A) -> + String = lists:flatten(io_lib:format(F,A)), + if is_pid(OpOrPid) -> OpOrPid ! {log,String}; + true -> ok + end, + String++"\n"; +format(#{msg:={string,String0}},Pid) -> + String = unicode:characters_to_list(String0), + Pid ! {log,String}, + String++"\n". + +add_remove_instance_nofile(Type) -> + ok = logger:add_handler(?MODULE,logger_std_h, + #{config => #{type => Type}, + filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + Pid = whereis(h_proc_name()), + true = is_pid(Pid), + group_leader(group_leader(),Pid), % to get printouts in test log + logger:notice(M1=?msg,?domain), + ?check(M1), + %% check that sync doesn't do damage even if not relevant + ok = logger_std_h:filesync(?MODULE), + ok = logger:remove_handler(?MODULE), + timer:sleep(500), + undefined = whereis(h_proc_name()), + logger:notice(?msg,?domain), + ?check_no_log, + ok. + +logger_std_h_remove() -> + logger:remove_handler(?MODULE). +logger_std_h_remove(Id) -> + logger:remove_handler(Id). + +try_read_file(FileName, Expected, Time) when Time > 0 -> + case file:read_file(FileName) of + Expected -> + ok; + Error = {error,_Reason} -> + ct:pal("Can't read ~tp: ~tp", [FileName,Error]), + erlang:error(Error); + Got -> + ct:pal("try_read_file got ~tp", [Got]), + timer:sleep(500), + try_read_file(FileName, Expected, Time-500) + end; +try_read_file(FileName, Expected, _) -> + ct:pal("Missing pattern ~tp in ~tp", [Expected,FileName]), + erlang:error({error,missing_expected_pattern}). + +try_match_file(FileName, Pattern, Time) -> + try_match_file(FileName, Pattern, Time, <<>>). + +try_match_file(FileName, Pattern, Time, _) when Time > 0 -> + case file:read_file(FileName) of + {ok, Bin} -> + case re:run(Bin,Pattern,[{capture,none}]) of + match -> + unicode:characters_to_list(Bin); + _ -> + timer:sleep(100), + try_match_file(FileName, Pattern, Time-100, Bin) + end; + Error -> + erlang:error(Error) + end; +try_match_file(_,Pattern,_,Incorrect) -> + ct:pal("try_match_file did not match pattern: ~p~nGot: ~p~n", + [Pattern,Incorrect]), + erlang:error({error,not_matching_pattern,Pattern,Incorrect}). + +repeat_until_ok(Fun, N) -> + repeat_until_ok(Fun, 0, N, undefined). + +repeat_until_ok(_Fun, Stop, Stop, Reason) -> + {fails,Reason}; + +repeat_until_ok(Fun, C, Stop, FirstReason) -> + if C > 0 -> timer:sleep(5000); + true -> ok + end, + try Fun() of + Result -> + {ok,{C,Result}} + catch + _:Reason:Stack -> + ct:pal("Test fails: ~p (~p)~n", [Reason,hd(Stack)]), + if FirstReason == undefined -> + repeat_until_ok(Fun, C+1, Stop, {Reason,Stack}); + true -> + repeat_until_ok(Fun, C+1, Stop, FirstReason) + end + end. + + +%%%----------------------------------------------------------------- +%%% +start_op_trace() -> + TraceFun = fun({trace,_,call,{_Mod,Func,Details}}, Pid) -> + Pid ! {trace_call,Func,Details}, + Pid; + ({trace,_,return_from,{_Mod,Func,_},RetVal}, Pid) -> + Pid ! {trace_return,Func,RetVal}, + Pid + end, + TRecvPid = spawn_link(fun() -> trace_receiver(5000) end), + {ok,_} = dbg:tracer(process, {TraceFun, TRecvPid}), + + {ok,_} = dbg:p(whereis(h_proc_name()), [c]), + {ok,_} = dbg:p(self(), [c]), + + MS1 = dbg:fun2ms(fun([_]) -> return_trace() end), + {ok,_} = dbg:tpl(logger_h_common, check_load, 1, MS1), + + {ok,_} = dbg:tpl(logger_h_common, flush_log_requests, 2, []), + + MS2 = dbg:fun2ms(fun([_,mode]) -> return_trace() end), + {ok,_} = dbg:tpl(ets, lookup, 2, MS2), + + ct:pal("Tracing started!", []), + TRecvPid. + +stop_op_trace(TRecvPid) -> + dbg:stop_clear(), + unlink(TRecvPid), + exit(TRecvPid, kill), + ok. + +find_mode(flush, Events) -> + lists:any(fun({trace_call,flush_log_requests,[_,_]}) -> true; + (_) -> false + end, Events); +find_mode(Mode, Events) -> + lists:keymember([{mode,Mode}], 3, Events). + +%% find_switch(_From, To, Events) -> +%% try lists:foldl(fun({trace_return,check_load,{To,_,_,_}}, +%% {trace_call,check_load,[#{mode := From}]}) -> +%% throw(match); +%% (Event, _) -> +%% Event +%% end, undefined, Events) of +%% _ -> false +%% catch +%% throw:match -> true +%% end. + +analyse_trace(TRecvPid, TestFun) -> + TRecvPid ! {test,self(),TestFun}, + receive + {result,TRecvPid,Result} -> + Result + after + 60000 -> + fails + end. + +trace_receiver(IdleT) -> + Msgs = receive_until_idle(IdleT, 5, []), + ct:pal("~w trace events generated", [length(Msgs)]), + analyse(Msgs). + +receive_until_idle(IdleT, WaitN, Msgs) -> + receive + Msg = {trace_call,_,_} -> + receive_until_idle(IdleT, 5, [Msg | Msgs]); + Msg = {trace_return,_,_} -> + receive_until_idle(IdleT, 5, [Msg | Msgs]) + after + IdleT -> + if WaitN == 0 -> + Msgs; + true -> + receive_until_idle(IdleT, WaitN-1, Msgs) + end + end. + +analyse(Msgs) -> + receive + {test,From,TestFun} -> + From ! {result,self(),TestFun(Msgs)}, + analyse(Msgs) + end. + +start_tracer(Trace,Expected) -> + Pid = self(), + FileCtrlPid = maps:get(file_ctrl_pid, + maps:get(handler_state, + logger_std_h:info(?MODULE))), + dbg:tracer(process,{fun tracer/2,{Pid,Expected}}), + dbg:p(whereis(h_proc_name()),[c]), + dbg:p(FileCtrlPid,[c]), + tpl(Trace), + ok. + +tpl([{M,F,A}|Trace]) -> + {ok,Match} = dbg:tpl(M,F,A,[]), + case lists:keyfind(matched,1,Match) of + {_,_,1} -> + ok; + _ -> + dbg:stop_clear(), + throw({skip,"Can't trace "++atom_to_list(M)++":"++ + atom_to_list(F)++"/"++integer_to_list(A)}) + end, + tpl(Trace); +tpl([]) -> + ok. + +tracer({trace,_,call,{logger_h_common,handle_cast,[Op|_]}}, + {Pid,[{Mod,Func,Op}|Expected]}) -> + maybe_tracer_done(Pid,Expected,{Mod,Func,Op}); +tracer({trace,_,call,{Mod=logger_std_h,Func=write_to_dev,[_,Data,_,_,_]}}, + {Pid,[{Mod,Func,Data}|Expected]}) -> + maybe_tracer_done(Pid,Expected,{Mod,Func,Data}); +tracer({trace,_,call,{Mod,Func,_}}, {Pid,[{Mod,Func}|Expected]}) -> + maybe_tracer_done(Pid,Expected,{Mod,Func}); +tracer({trace,_,call,Call}, {Pid,Expected}) -> + ct:log("Tracer got unexpected: ~p~nExpected: ~p~n",[Call,Expected]), + Pid ! {tracer_got_unexpected,Call,Expected}, + {Pid,Expected}. + +maybe_tracer_done(Pid,[]=Expected,Got) -> + ct:log("Tracer got: ~p~n",[Got]), + Pid ! {tracer_done,0}, + {Pid,Expected}; +maybe_tracer_done(Pid,[{no_more,T}]=Expected,Got) -> + ct:log("Tracer got: ~p~n",[Got]), + Pid ! {tracer_done,T}, + {Pid,Expected}; +maybe_tracer_done(Pid,Expected,Got) -> + ct:log("Tracer got: ~p~n",[Got]), + {Pid,Expected}. + +check_tracer(T) -> + check_tracer(T,fun() -> ct:fail({timeout,tracer}) end). +check_tracer(T,TimeoutFun) -> + receive + {tracer_done,Delay} -> + %% Possibly wait Delay ms to check that no unexpected + %% traces are received + check_tracer(Delay,fun() -> ok end); + {tracer_got_unexpected,Got,Expected} -> + dbg:stop_clear(), + ct:fail({tracer_got_unexpected,Got,Expected}) + after T -> + dbg:stop_clear(), + TimeoutFun() + end. + +escape([$+|Rest]) -> + [$\\,$+|escape(Rest)]; +escape([H|T]) -> + [H|escape(T)]; +escape([]) -> + []. + +h_proc_name() -> + h_proc_name(?MODULE). +h_proc_name(Name) -> + ?name_to_reg_name(logger_std_h,Name). + +wait_for_process_up(T) -> + wait_for_process_up(?MODULE, h_proc_name(), T). + +wait_for_process_up(Name, RegName, T) -> + N = (T div 500) + 1, + wait_for_process_up1(Name, RegName, N). + +wait_for_process_up1(_Name, _RegName, 0) -> + error; +wait_for_process_up1(Name, RegName, N) -> + timer:sleep(500), + case whereis(RegName) of + Pid when is_pid(Pid) -> + case logger:get_handler_config(Name) of + {ok,_} -> + %% ct:pal("Process ~p up (~p tries left)",[Name,N]), + {ok,Pid}; + _ -> + wait_for_process_up1(Name, RegName, N-1) + end; + undefined -> + %% ct:pal("Waiting for process ~p (~p tries left)",[Name,N]), + wait_for_process_up1(Name, RegName, N-1) + end. + +filesync_rep_int() -> + case (fun() -> is_atom(?FILESYNC_REPEAT_INTERVAL) end)() of + true -> 5500; + false -> ?FILESYNC_REPEAT_INTERVAL + 500 + end. + + +file_delete(Log) -> + file:delete(Log). + diff --git a/lib/kernel/test/logger_test_lib.erl b/lib/kernel/test/logger_test_lib.erl new file mode 100644 index 0000000000..81eb9ce5eb --- /dev/null +++ b/lib/kernel/test/logger_test_lib.erl @@ -0,0 +1,82 @@ +% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_test_lib). + +-include_lib("kernel/src/logger_internal.hrl"). + +-export([setup/2, log/3, sync_and_read/3]). + +-export([init/2, + pre_init_per_suite/3, pre_init_per_testcase/4, + post_end_per_testcase/5, post_end_per_suite/3]). + +setup(Config,Vars) -> + FuncStr = lists:concat([proplists:get_value(suite, Config), "_", + proplists:get_value(tc, Config)]), + ConfigFileName = filename:join(proplists:get_value(priv_dir, Config), FuncStr), + file:write_file(ConfigFileName ++ ".config", io_lib:format("[{kernel, ~p}].",[Vars])), + case test_server:start_node(proplists:get_value(tc, Config), slave, + [{args, ["-pa ",filename:dirname(code:which(?MODULE)), + " -boot start_sasl -kernel start_timer true " + "-config ",ConfigFileName]}]) of + {ok, Node} -> + L = rpc:call(Node, logger, get_config, []), + ct:log("~p",[L]), + {ok, L, Node}; + {error, Reason} -> + ct:log("Failed to start node: ~p",[Reason]), + error + end. + +log(Node, F, A) -> + log(Node, logger, F, A). +log(Node, M, F, A) -> + MD = #{ gl => rpc:call(Node, erlang, whereis, [logger]) }, + rpc:call(Node, M, F, A ++ [MD]). + +sync_and_read(Node,disk_log,Log) -> + rpc:call(Node,logger_disk_log_h,filesync,[?STANDARD_HANDLER]), + file:read_file(Log ++ ".1"); +sync_and_read(Node, file,Log) -> + ok = rpc:call(Node,logger_std_h,filesync,[?STANDARD_HANDLER]), + file:read_file(Log). + + +init(_, _) -> + {ok, []}. + +pre_init_per_suite(_Suite, Config, State) -> + {[{nodes, nodes()} | Config], State}. + +pre_init_per_testcase(Suite, TC, Config, State) -> + cleanup(Config), + {[{suite, Suite}, {tc, TC} | Config], State}. + +post_end_per_testcase(_, _TC, Config, Res, State) -> + cleanup(Config), + {Res, State}. + +post_end_per_suite(_, Config, State) -> + cleanup(Config), + {Config, State}. + +cleanup(Config) -> + [test_server:stop_node(N) || N <- nodes(), + not lists:member(N, proplists:get_value(nodes, Config))]. diff --git a/lib/kernel/test/os_SUITE.erl b/lib/kernel/test/os_SUITE.erl index a0bcde68db..710b9b115c 100644 --- a/lib/kernel/test/os_SUITE.erl +++ b/lib/kernel/test/os_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2017. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -22,7 +22,8 @@ -export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, init_per_group/2,end_per_group/2, init_per_testcase/2,end_per_testcase/2]). --export([space_in_cwd/1, quoting/1, cmd_unicode/1, space_in_name/1, bad_command/1, +-export([space_in_cwd/1, quoting/1, cmd_unicode/1, + null_in_command/1, space_in_name/1, bad_command/1, find_executable/1, unix_comment_in_command/1, deep_list_command/1, large_output_command/1, background_command/0, background_command/1, message_leak/1, close_stdin/0, close_stdin/1, max_size_command/1, @@ -35,7 +36,8 @@ suite() -> {timetrap,{minutes,1}}]. all() -> - [space_in_cwd, quoting, cmd_unicode, space_in_name, bad_command, + [space_in_cwd, quoting, cmd_unicode, null_in_command, + space_in_name, bad_command, find_executable, unix_comment_in_command, deep_list_command, large_output_command, background_command, message_leak, close_stdin, max_size_command, perf_counter_api]. @@ -126,6 +128,14 @@ cmd_unicode(Config) when is_list(Config) -> [] = receive_all(), ok. +null_in_command(Config) -> + {Ok, Error} = case os:type() of + {win32,_} -> {"dir", "di\0r"}; + _ -> {"ls", "l\0s"} + end, + true = is_list(try os:cmd(Ok) catch Class0:_ -> Class0 end), + error = try os:cmd(Error) catch Class1:_ -> Class1 end, + ok. %% Test that program with a space in its name can be executed. space_in_name(Config) when is_list(Config) -> @@ -217,8 +227,8 @@ find_executable(Config) when is_list(Config) -> DataDir = proplists:get_value(data_dir, Config), %% Smoke test. - case lib:progname() of - erl -> + case ct:get_progname() of + "erl" -> ErlPath = os:find_executable("erl"), true = is_list(ErlPath), true = filelib:is_regular(ErlPath); @@ -378,7 +388,7 @@ comp(Expected, Got) -> ct:fail(failed) end. -%% Like lib:nonl/1, but strips \r as well as \n. +%% strips \n and \r\n from end of string strip_nl([$\r, $\n]) -> []; strip_nl([$\n]) -> []; diff --git a/lib/kernel/test/pdict_SUITE.erl b/lib/kernel/test/pdict_SUITE.erl index d105952df9..3685e51c10 100644 --- a/lib/kernel/test/pdict_SUITE.erl +++ b/lib/kernel/test/pdict_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1999-2016. All Rights Reserved. +%% Copyright Ericsson AB 1999-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -33,6 +33,7 @@ init_per_group/2,end_per_group/2, mixed/1, literals/1, + destructive/1, simple/1, complicated/1, heavy/1, simple_all_keys/1, info/1]). -export([init_per_testcase/2, end_per_testcase/2]). -export([other_process/2]). @@ -52,6 +53,7 @@ suite() -> all() -> [simple, complicated, heavy, simple_all_keys, info, literals, + destructive, mixed]. groups() -> @@ -367,6 +369,36 @@ match_keys(All) -> ok. +%% Test destructive put optimization of immed values +%% does not affect get/0 or process_info. +destructive(_Config) -> + Keys = lists:seq(1,100), + [put(Key, 17) || Key <- Keys], + Get1 = get(), + {dictionary,PI1} = process_info(self(), dictionary), + + [begin + {Key, 17} = lists:keyfind(Key, 1, Get1), + {Key, 17} = lists:keyfind(Key, 1, PI1) + end + || Key <- Keys], + + [17 = put(Key, 42) || Key <- Keys], % Mutate + + Get2 = get(), + {dictionary,PI2} = process_info(self(), dictionary), + + [begin + {Key, 17} = lists:keyfind(Key, 1, Get1), + {Key, 17} = lists:keyfind(Key, 1, PI1), + {Key, 42} = lists:keyfind(Key, 1, Get2), + {Key, 42} = lists:keyfind(Key, 1, PI2) + + end + || Key <- Keys], + + ok. + %% Do random mixed put/erase to test grow/shrink %% Written for a temporary bug in gc during shrink mixed(_Config) -> diff --git a/lib/kernel/test/prim_file_SUITE.erl b/lib/kernel/test/prim_file_SUITE.erl index e88d42788f..2f465a15bc 100644 --- a/lib/kernel/test/prim_file_SUITE.erl +++ b/lib/kernel/test/prim_file_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2000-2017. All Rights Reserved. +%% Copyright Ericsson AB 2000-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -21,38 +21,23 @@ -export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, init_per_group/2,end_per_group/2, init_per_testcase/2, end_per_testcase/2, read_write_file/1, free_memory/0]). --export([cur_dir_0a/1, cur_dir_0b/1, - cur_dir_1a/1, cur_dir_1b/1, - make_del_dir_a/1, make_del_dir_b/1, - pos1/1, pos2/1]). --export([close/1, - delete_a/1, delete_b/1]). --export([ open1/1, modes/1]). --export([ - file_info_basic_file_a/1, file_info_basic_file_b/1, - file_info_basic_directory_a/1, file_info_basic_directory_b/1, - file_info_bad_a/1, file_info_bad_b/1, - file_info_times_a/1, file_info_times_b/1, - file_write_file_info_a/1, file_write_file_info_b/1, - file_read_file_info_opts/1, file_write_file_info_opts/1, - file_write_read_file_info_opts/1 - ]). --export([rename_a/1, rename_b/1, - access/1, truncate/1, datasync/1, sync/1, +-export([cur_dir_0/1, cur_dir_1/1, + make_del_dir/1, pos1/1, pos2/1]). +-export([close/1, delete/1]). +-export([open1/1, modes/1]). +-export([file_info_basic_file/1, file_info_basic_directory/1, file_info_bad/1, + file_info_times/1, file_write_file_info/1, + file_read_file_info_opts/1, file_write_file_info_opts/1, + file_write_read_file_info_opts/1]). +-export([rename/1, access/1, truncate/1, datasync/1, sync/1, read_write/1, pread_write/1, append/1, exclusive/1]). --export([ e_delete/1, e_rename/1, e_make_dir/1, e_del_dir/1]). +-export([e_delete/1, e_rename/1, e_make_dir/1, e_del_dir/1]). --export([ read_not_really_compressed/1, - read_compressed/1, write_compressed/1, - compress_errors/1]). - --export([ - make_link_a/1, make_link_b/1, - read_link_info_for_non_link/1, - symlinks_a/1, symlinks_b/1, - list_dir_limit/1, - list_dir_error/1, - list_dir/1]). +-export([make_link/1, read_link_info_for_non_link/1, + symlinks/1, + list_dir_limit/1, + list_dir_error/1, + list_dir/1]). -export([advise/1]). -export([large_write/1]). @@ -67,29 +52,16 @@ -define(PRIM_FILE, prim_file). -%% Calls ?PRIM_FILE:F with arguments A and an optional handle H -%% as first argument, unless the handle is [], i.e no handle. -%% This is a macro to give the compiler and thereby -%% the cross reference tool the possibility to interprete -%% the call, since M, F, A (or [H | A]) can all be known at -%% compile time. --define(PRIM_FILE_call(F, H, A), - case H of - [] -> apply(?PRIM_FILE, F, A); - _ -> apply(?PRIM_FILE, F, [H | A]) - end). - suite() -> []. all() -> [read_write_file, {group, dirs}, {group, files}, - delete_a, delete_b, rename_a, rename_b, {group, errors}, - {group, compression}, {group, links}, list_dir_limit, list_dir]. + delete, rename, {group, errors}, {group, links}, + list_dir_limit, list_dir]. groups() -> [{dirs, [], - [make_del_dir_a, make_del_dir_b, cur_dir_0a, cur_dir_0b, - cur_dir_1a, cur_dir_1b]}, + [make_del_dir, cur_dir_0, cur_dir_1]}, {files, [], [{group, open}, {group, pos}, {group, file_info}, truncate, sync, datasync, advise, large_write, allocate]}, @@ -98,22 +70,14 @@ groups() -> append, exclusive]}, {pos, [], [pos1, pos2]}, {file_info, [], - [file_info_basic_file_a, file_info_basic_file_b, - file_info_basic_directory_a, - file_info_basic_directory_b, file_info_bad_a, - file_info_bad_b, file_info_times_a, file_info_times_b, - file_write_file_info_a, file_write_file_info_b, - file_read_file_info_opts, file_write_file_info_opts, - file_write_read_file_info_opts + [file_info_basic_file,file_info_basic_directory, file_info_bad, + file_info_times, file_write_file_info, file_read_file_info_opts, + file_write_file_info_opts, file_write_read_file_info_opts ]}, {errors, [], [e_delete, e_rename, e_make_dir, e_del_dir]}, - {compression, [], - [read_compressed, read_not_really_compressed, - write_compressed, compress_errors]}, {links, [], - [make_link_a, make_link_b, read_link_info_for_non_link, - symlinks_a, symlinks_b, list_dir_error]}]. + [make_link, read_link_info_for_non_link, symlinks, list_dir_error]}]. init_per_testcase(large_write, Config) -> {ok, Started} = application:ensure_all_started(os_mon), @@ -246,39 +210,27 @@ read_write_file(Config) when is_list(Config) -> %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -make_del_dir_a(Config) when is_list(Config) -> - make_del_dir(Config, [], "_a"). - -make_del_dir_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = make_del_dir(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - %% Just to make sure the state of the server makes a difference - {error, einval} = ?PRIM_FILE_call(get_cwd, Handle, []), - Result. - -make_del_dir(Config, Handle, Suffix) -> +make_del_dir(Config) when is_list(Config) -> RootDir = proplists:get_value(priv_dir,Config), NewDir = filename:join(RootDir, atom_to_list(?MODULE) - ++"_mk-dir"++Suffix), - ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]), - {error, eexist} = ?PRIM_FILE_call(make_dir, Handle, [NewDir]), - ok = ?PRIM_FILE_call(del_dir, Handle, [NewDir]), - {error, enoent} = ?PRIM_FILE_call(del_dir, Handle, [NewDir]), + ++"_mk-dir"), + ok = ?PRIM_FILE:make_dir(NewDir), + {error, eexist} = ?PRIM_FILE:make_dir(NewDir), + ok = ?PRIM_FILE:del_dir(NewDir), + {error, enoent} = ?PRIM_FILE:del_dir(NewDir), %% Make sure we are not in a directory directly under test_server %% as that would result in eacces errors when trying to delete '..', %% because there are processes having that directory as current. - ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]), - {ok, CurrentDir} = ?PRIM_FILE_call(get_cwd, Handle, []), + ok = ?PRIM_FILE:make_dir(NewDir), + {ok, CurrentDir} = ?PRIM_FILE:get_cwd(), case {os:type(), length(NewDir) >= 260 } of {{win32,_}, true} -> io:format("Skip set_cwd for windows path longer than 260 (MAX_PATH)\n", []), io:format("\nNewDir = ~p\n", [NewDir]); _ -> - ok = ?PRIM_FILE_call(set_cwd, Handle, [NewDir]) + ok = ?PRIM_FILE:set_cwd(NewDir) end, try %% Check that we get an error when trying to create... @@ -286,14 +238,14 @@ make_del_dir(Config, Handle, Suffix) -> NewDir2 = filename:join(RootDir, atom_to_list(?MODULE) ++"_mk-dir-noexist/foo"), - {error, enoent} = ?PRIM_FILE_call(make_dir, Handle, [NewDir2]), + {error, enoent} = ?PRIM_FILE:make_dir(NewDir2), %% a nameless directory - {error, enoent} = ?PRIM_FILE_call(make_dir, Handle, [""]), + {error, enoent} = ?PRIM_FILE:make_dir(""), %% a directory with illegal name - {error, badarg} = ?PRIM_FILE_call(make_dir, Handle, ['mk-dir']), + {error, badarg} = ?PRIM_FILE:make_dir('mk-dir'), %% a directory with illegal name, even if it's a (bad) list - {error, badarg} = ?PRIM_FILE_call(make_dir, Handle, [[1,2,3,{}]]), + {error, badarg} = ?PRIM_FILE:make_dir([1,2,3,{}]), %% Maybe this isn't an error, exactly, but worth mentioning anyway: %% ok = ?PRIM_FILE:make_dir([$f,$o,$o,0,$b,$a,$r])), @@ -306,125 +258,101 @@ make_del_dir(Config, Handle, Suffix) -> %% Try deleting some bad directories %% Deleting the parent directory to the current, sounds dangerous, huh? %% Don't worry ;-) the parent directory should never be empty, right? - case ?PRIM_FILE_call(del_dir, Handle, [".."]) of + case ?PRIM_FILE:del_dir("..") of {error, eexist} -> ok; {error, eacces} -> ok; %OpenBSD {error, einval} -> ok %FreeBSD end, - {error, enoent} = ?PRIM_FILE_call(del_dir, Handle, [""]), - {error, badarg} = ?PRIM_FILE_call(del_dir, Handle, [[3,2,1,{}]]) + {error, enoent} = ?PRIM_FILE:del_dir(""), + {error, badarg} = ?PRIM_FILE:del_dir([3,2,1,{}]) after - ok = ?PRIM_FILE_call(set_cwd, Handle, [CurrentDir]) + ok = ?PRIM_FILE:set_cwd(CurrentDir) end, ok. -cur_dir_0a(Config) when is_list(Config) -> - cur_dir_0(Config, []). - -cur_dir_0b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = cur_dir_0(Config, Handle), - ok = ?PRIM_FILE:stop(Handle), - Result. - -cur_dir_0(Config, Handle) -> +cur_dir_0(Config) when is_list(Config) -> %% Find out the current dir, and cd to it ;-) - {ok,BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, []), + {ok,BaseDir} = ?PRIM_FILE:get_cwd(), Dir1 = BaseDir ++ "", %% Check that it's a string - ok = ?PRIM_FILE_call(set_cwd, Handle, [Dir1]), - DirName = atom_to_list(?MODULE) ++ - case Handle of - [] -> - "_curdir"; - _ -> - "_curdir_h" - end, + ok = ?PRIM_FILE:set_cwd(Dir1), + DirName = atom_to_list(?MODULE) ++ "_curdir", %% Make a new dir, and cd to that RootDir = proplists:get_value(priv_dir,Config), NewDir = filename:join(RootDir, DirName), - ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]), + ok = ?PRIM_FILE:make_dir(NewDir), case {os:type(), length(NewDir) >= 260} of {{win32,_}, true} -> io:format("Skip set_cwd for windows path longer than 260 (MAX_PATH):\n"), io:format("\nNewDir = ~p\n", [NewDir]); _ -> io:format("cd to ~s",[NewDir]), - ok = ?PRIM_FILE_call(set_cwd, Handle, [NewDir]), + ok = ?PRIM_FILE:set_cwd(NewDir), %% Create a file in the new current directory, and check that it %% really is created there UncommonName = "uncommon.fil", {ok,Fd} = ?PRIM_FILE:open(UncommonName, [read, write]), ok = ?PRIM_FILE:close(Fd), - {ok,NewDirFiles} = ?PRIM_FILE_call(list_dir, Handle, ["."]), + {ok,NewDirFiles} = ?PRIM_FILE:list_dir("."), true = lists:member(UncommonName,NewDirFiles), %% Delete the directory and return to the old current directory %% and check that the created file isn't there (too!) expect({error, einval}, {error, eacces}, {error, eexist}, - ?PRIM_FILE_call(del_dir, Handle, [NewDir])), - ?PRIM_FILE_call(delete, Handle, [UncommonName]), - {ok,[]} = ?PRIM_FILE_call(list_dir, Handle, ["."]), - ok = ?PRIM_FILE_call(set_cwd, Handle, [Dir1]), + ?PRIM_FILE:del_dir(NewDir)), + ?PRIM_FILE:delete(UncommonName), + {ok,[]} = ?PRIM_FILE:list_dir("."), + ok = ?PRIM_FILE:set_cwd(Dir1), io:format("cd back to ~s",[Dir1]), - ok = ?PRIM_FILE_call(del_dir, Handle, [NewDir]), - {error, enoent} = ?PRIM_FILE_call(set_cwd, Handle, [NewDir]), - ok = ?PRIM_FILE_call(set_cwd, Handle, [Dir1]), + ok = ?PRIM_FILE:del_dir(NewDir), + {error, enoent} = ?PRIM_FILE:set_cwd(NewDir), + ok = ?PRIM_FILE:set_cwd(Dir1), io:format("cd back to ~s",[Dir1]), - {ok,OldDirFiles} = ?PRIM_FILE_call(list_dir, Handle, ["."]), + {ok,OldDirFiles} = ?PRIM_FILE:list_dir("."), false = lists:member(UncommonName,OldDirFiles) end, %% Try doing some bad things {error, badarg} = - ?PRIM_FILE_call(set_cwd, Handle, [{foo,bar}]), + ?PRIM_FILE:set_cwd({foo,bar}), {error, enoent} = - ?PRIM_FILE_call(set_cwd, Handle, [""]), + ?PRIM_FILE:set_cwd(""), {error, enoent} = - ?PRIM_FILE_call(set_cwd, Handle, [".......a......"]), + ?PRIM_FILE:set_cwd(".......a......"), {ok,BaseDir} = - ?PRIM_FILE_call(get_cwd, Handle, []), %% Still there? + ?PRIM_FILE:get_cwd(), %% Still there? %% On Windows, there should only be slashes, no backslashes, %% in the return value of get_cwd(). %% (The test is harmless on Unix, because filenames usually %% don't contain backslashes.) - {ok, BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, []), + {ok, BaseDir} = ?PRIM_FILE:get_cwd(), false = lists:member($\\, BaseDir), ok. %% Tests ?PRIM_FILE:get_cwd/1. -cur_dir_1a(Config) when is_list(Config) -> - cur_dir_1(Config, []). - -cur_dir_1b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = cur_dir_1(Config, Handle), - ok = ?PRIM_FILE:stop(Handle), - Result. - -cur_dir_1(Config, Handle) -> +cur_dir_1(Config) when is_list(Config) -> case os:type() of {win32, _} -> - win_cur_dir_1(Config, Handle); + win_cur_dir_1(Config); _ -> {error, enotsup} = - ?PRIM_FILE_call(get_cwd, Handle, ["d:"]) + ?PRIM_FILE:get_cwd("d:") end, ok. -win_cur_dir_1(_Config, Handle) -> - {ok, BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, []), +win_cur_dir_1(_Config) -> + {ok, BaseDir} = ?PRIM_FILE:get_cwd(), %% Get the drive letter from the current directory, %% and try to get current directory for that drive. [Drive, $:|_] = BaseDir, - {ok, BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, [[Drive, $:]]), + {ok, BaseDir} = ?PRIM_FILE:get_cwd([Drive, $:]), io:format("BaseDir = ~s\n", [BaseDir]), %% Unfortunately, there is no way to move away from the @@ -446,12 +374,12 @@ open1(Config) when is_list(Config) -> Name = filename:join(NewDir, "foo1.fil"), {ok,Fd1} = ?PRIM_FILE:open(Name, [read, write]), {ok,Fd2} = ?PRIM_FILE:open(Name, [read]), - Str = "{a,tuple}.\n", - Length = length(Str), - ?PRIM_FILE:write(Fd1,Str), + Bin = list_to_binary("{a,tuple}.\n"), + Length = byte_size(Bin), + ?PRIM_FILE:write(Fd1,Bin), {ok,0} = ?PRIM_FILE:position(Fd1,bof), - {ok, Str} = ?PRIM_FILE:read(Fd1,Length), - {ok, Str} = ?PRIM_FILE:read(Fd2,Length), + {ok, Bin} = ?PRIM_FILE:read(Fd1,Length), + {ok, Bin} = ?PRIM_FILE:read(Fd2,Length), ok = ?PRIM_FILE:close(Fd2), {ok,0} = ?PRIM_FILE:position(Fd1,bof), ok = ?PRIM_FILE:truncate(Fd1), @@ -471,13 +399,13 @@ modes(Config) when is_list(Config) -> ++"_open_modes"), ok = ?PRIM_FILE:make_dir(NewDir), Name1 = filename:join(NewDir, "foo1.fil"), - Marker = "hello, world", - Length = length(Marker), + Marker = <<"hello, world">>, + Length = byte_size(Marker), %% write {ok, Fd1} = ?PRIM_FILE:open(Name1, [write]), ok = ?PRIM_FILE:write(Fd1, Marker), - ok = ?PRIM_FILE:write(Fd1, ".\n"), + ok = ?PRIM_FILE:write(Fd1, <<".\n">>), ok = ?PRIM_FILE:close(Fd1), %% read @@ -496,12 +424,6 @@ modes(Config) when is_list(Config) -> {ok, Marker} = ?PRIM_FILE:read(Fd4, Length), ok = ?PRIM_FILE:close(Fd4), - %% read and binary - BinaryMarker = list_to_binary(Marker), - {ok, Fd5} = ?PRIM_FILE:open(Name1, [read, binary]), - {ok, BinaryMarker} = ?PRIM_FILE:read(Fd5, Length), - ok = ?PRIM_FILE:close(Fd5), - ok. close(Config) when is_list(Config) -> @@ -528,9 +450,9 @@ access(Config) when is_list(Config) -> Name = filename:join(RootDir, atom_to_list(?MODULE) ++"_access.fil"), - Str = "ABCDEFGH", + Bin = <<"ABCDEFGH">>, {ok,Fd1} = ?PRIM_FILE:open(Name, [write]), - ?PRIM_FILE:write(Fd1,Str), + ?PRIM_FILE:write(Fd1,Bin), ok = ?PRIM_FILE:close(Fd1), %% Check that we can't write when in read only mode {ok,Fd2} = ?PRIM_FILE:open(Name, [read]), @@ -542,7 +464,7 @@ access(Config) when is_list(Config) -> end, ok = ?PRIM_FILE:close(Fd2), {ok, Fd3} = ?PRIM_FILE:open(Name, [read]), - {ok, Str} = ?PRIM_FILE:read(Fd3,length(Str)), + {ok, Bin} = ?PRIM_FILE:read(Fd3,byte_size(Bin)), ok = ?PRIM_FILE:close(Fd3), ok. @@ -564,7 +486,7 @@ read_write(Config) when is_list(Config) -> ok. read_write_test(File) -> - Marker = "hello, world", + Marker = <<"hello, world">>, ok = ?PRIM_FILE:write(File, Marker), {ok, 0} = ?PRIM_FILE:position(File, 0), {ok, Marker} = ?PRIM_FILE:read(File, 100), @@ -590,15 +512,15 @@ pread_write(Config) when is_list(Config) -> ok. pread_write_test(File) -> - Marker = "hello, world", - Len = length(Marker), + Marker = <<"hello, world">>, + Len = byte_size(Marker), ok = ?PRIM_FILE:write(File, Marker), {ok, Marker} = ?PRIM_FILE:pread(File, 0, 100), eof = ?PRIM_FILE:pread(File, 100, 1), ok = ?PRIM_FILE:pwrite(File, Len, Marker), {ok, Marker} = ?PRIM_FILE:pread(File, Len, 100), eof = ?PRIM_FILE:pread(File, 100, 1), - MM = Marker ++ Marker, + MM = <<Marker/binary,Marker/binary>>, {ok, MM} = ?PRIM_FILE:pread(File, 0, 100), ok = ?PRIM_FILE:close(File), ok. @@ -655,24 +577,24 @@ pos1(Config) when is_list(Config) -> atom_to_list(?MODULE) ++"_pos1.fil"), {ok, Fd1} = ?PRIM_FILE:open(Name, [write]), - ?PRIM_FILE:write(Fd1,"ABCDEFGH"), + ?PRIM_FILE:write(Fd1,<<"ABCDEFGH">>), ok = ?PRIM_FILE:close(Fd1), {ok, Fd2} = ?PRIM_FILE:open(Name, [read]), %% Start pos is first char io:format("Relative positions"), - {ok, "A"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1), {ok, 2} = ?PRIM_FILE:position(Fd2,{cur,1}), - {ok, "C"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"C">>} = ?PRIM_FILE:read(Fd2,1), {ok, 0} = ?PRIM_FILE:position(Fd2,{cur,-3}), - {ok, "A"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1), %% Backwards from first char should be an error {ok,0} = ?PRIM_FILE:position(Fd2,{cur,-1}), {error, einval} = ?PRIM_FILE:position(Fd2,{cur,-1}), %% Reset position and move again {ok, 0} = ?PRIM_FILE:position(Fd2,0), {ok, 2} = ?PRIM_FILE:position(Fd2,{cur,2}), - {ok, "C"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"C">>} = ?PRIM_FILE:read(Fd2,1), %% Go a lot forwards {ok, 13} = ?PRIM_FILE:position(Fd2,{cur,10}), eof = ?PRIM_FILE:read(Fd2,1), @@ -684,27 +606,27 @@ pos1(Config) when is_list(Config) -> {ok, 8} = ?PRIM_FILE:position(Fd2,cur), eof = ?PRIM_FILE:read(Fd2,1), {ok, 7} = ?PRIM_FILE:position(Fd2,7), - {ok, "H"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"H">>} = ?PRIM_FILE:read(Fd2,1), {ok, 0} = ?PRIM_FILE:position(Fd2,0), - {ok, "A"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1), {ok, 3} = ?PRIM_FILE:position(Fd2,3), - {ok, "D"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1), {ok, 12} = ?PRIM_FILE:position(Fd2,12), eof = ?PRIM_FILE:read(Fd2,1), {ok, 3} = ?PRIM_FILE:position(Fd2,3), - {ok, "D"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1), %% Try the {bof,X} notation {ok, 3} = ?PRIM_FILE:position(Fd2,{bof,3}), - {ok, "D"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1), %% Try eof positions io:format("EOF positions"), {ok, 8} = ?PRIM_FILE:position(Fd2,{eof,0}), eof = ?PRIM_FILE:read(Fd2,1), {ok, 7} = ?PRIM_FILE:position(Fd2,{eof,-1}), - {ok, "H"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"H">>} = ?PRIM_FILE:read(Fd2,1), {ok, 0} = ?PRIM_FILE:position(Fd2,{eof,-8}), - {ok, "A"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1), {error, einval} = ?PRIM_FILE:position(Fd2,{eof,-9}), ok. @@ -714,7 +636,7 @@ pos2(Config) when is_list(Config) -> atom_to_list(?MODULE) ++"_pos2.fil"), {ok,Fd1} = ?PRIM_FILE:open(Name, [write]), - ?PRIM_FILE:write(Fd1,"ABCDEFGH"), + ?PRIM_FILE:write(Fd1,<<"ABCDEFGH">>), ok = ?PRIM_FILE:close(Fd1), {ok, Fd2} = ?PRIM_FILE:open(Name, [read]), {error, einval} = ?PRIM_FILE:position(Fd2,-1), @@ -722,35 +644,25 @@ pos2(Config) when is_list(Config) -> %% Make sure that we still can search after an error. {ok, 0} = ?PRIM_FILE:position(Fd2, 0), {ok, 3} = ?PRIM_FILE:position(Fd2, {bof,3}), - {ok, "D"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1), io:format("DONE"), ok. - -file_info_basic_file_a(Config) when is_list(Config) -> - file_info_basic_file(Config, [], "_a"). - -file_info_basic_file_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = file_info_basic_file(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - Result. - -file_info_basic_file(Config, Handle, Suffix) -> +file_info_basic_file(Config) when is_list(Config) -> RootDir = proplists:get_value(priv_dir, Config), %% Create a short file. Name = filename:join(RootDir, atom_to_list(?MODULE) - ++"_basic_test"++Suffix++".fil"), + ++"_basic_test"".fil"), {ok,Fd1} = ?PRIM_FILE:open(Name, [write]), ?PRIM_FILE:write(Fd1, "foo bar"), ok = ?PRIM_FILE:close(Fd1), %% Test that the file has the expected attributes. %% The times are tricky, so we will save them to a separate test case. - {ok, FileInfo} = ?PRIM_FILE_call(read_file_info, Handle, [Name]), + {ok, FileInfo} = ?PRIM_FILE:read_file_info(Name), #file_info{size = Size, type = Type, access = Access, atime = AccessTime, mtime = ModifyTime} = FileInfo, @@ -768,39 +680,30 @@ file_info_basic_file(Config, Handle, Suffix) -> ok. -file_info_basic_directory_a(Config) when is_list(Config) -> - file_info_basic_directory(Config, []). - -file_info_basic_directory_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = file_info_basic_directory(Config, Handle), - ok = ?PRIM_FILE:stop(Handle), - Result. - -file_info_basic_directory(Config, Handle) -> +file_info_basic_directory(Config) when is_list(Config) -> %% Note: filename:join/1 removes any trailing slash, %% which is essential for ?PRIM_FILE:read_file_info/1 to work on %% platforms such as Windows95. RootDir = filename:join([proplists:get_value(priv_dir, Config)]), %% Test that the RootDir directory has the expected attributes. - test_directory(RootDir, read_write, Handle), + test_directory(RootDir, read_write), %% Note that on Windows file systems, "/" or "c:/" are *NOT* directories. %% Therefore, test that ?PRIM_FILE:read_file_info/1 behaves %% as if they were directories. case os:type() of {win32, _} -> - test_directory("/", read_write, Handle), - test_directory("c:/", read_write, Handle), - test_directory("c:\\", read_write, Handle); + test_directory("/", read_write), + test_directory("c:/", read_write), + test_directory("c:\\", read_write); _ -> - test_directory("/", read, Handle) + test_directory("/", read) end, ok. -test_directory(Name, ExpectedAccess, Handle) -> - {ok, FileInfo} = ?PRIM_FILE_call(read_file_info, Handle, [Name]), +test_directory(Name, ExpectedAccess) -> + {ok, FileInfo} = ?PRIM_FILE:read_file_info(Name), #file_info{size = Size, type = Type, access = Access, atime = AccessTime, mtime = ModifyTime} = FileInfo, @@ -824,45 +727,24 @@ all_integers([]) -> %% Try something nonexistent. -file_info_bad_a(Config) when is_list(Config) -> - file_info_bad(Config, []). - -file_info_bad_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = file_info_bad(Config, Handle), - ok = ?PRIM_FILE:stop(Handle), - Result. - -file_info_bad(Config, Handle) -> +file_info_bad(Config) when is_list(Config) -> RootDir = filename:join([proplists:get_value(priv_dir, Config)]), - {error, enoent} = - ?PRIM_FILE_call( - read_file_info, Handle, - [filename:join(RootDir, - atom_to_list(?MODULE)++"_nonexistent")]), + NonExistent = filename:join(RootDir, atom_to_list(?MODULE)++"_nonexistent"), + {error, enoent} = ?PRIM_FILE:read_file_info(NonExistent), ok. %% Test that the file times behave as they should. -file_info_times_a(Config) when is_list(Config) -> - file_info_times(Config, [], "_a"). - -file_info_times_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = file_info_times(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - Result. - -file_info_times(Config, Handle, Suffix) -> +file_info_times(Config) when is_list(Config) -> %% We have to try this twice, since if the test runs across the change %% of a month the time diff calculations will fail. But it won't happen %% if you run it twice in succession. test_server:m_out_of_n( 1,2, - fun() -> file_info_int(Config, Handle, Suffix) end), + fun() -> file_info_int(Config) end), ok. -file_info_int(Config, Handle, Suffix) -> +file_info_int(Config) -> %% Note: filename:join/1 removes any trailing slash, %% which is essential for ?PRIM_FILE:read_file_info/1 to work on %% platforms such as Windows95. @@ -872,14 +754,14 @@ file_info_int(Config, Handle, Suffix) -> Name = filename:join(RootDir, atom_to_list(?MODULE) - ++"_file_info"++Suffix++".fil"), + ++"_file_info.fil"), {ok,Fd1} = ?PRIM_FILE:open(Name, [write]), ?PRIM_FILE:write(Fd1,"foo"), %% check that the file got a modify date max a few seconds away from now {ok, #file_info{type = regular, atime = AccTime1, mtime = ModTime1}} = - ?PRIM_FILE_call(read_file_info, Handle, [Name]), + ?PRIM_FILE:read_file_info(Name), Now = erlang:localtime(), io:format("Now ~p",[Now]), io:format("Open file Acc ~p Mod ~p",[AccTime1,ModTime1]), @@ -897,7 +779,7 @@ file_info_int(Config, Handle, Suffix) -> ok = ?PRIM_FILE:close(Fd1), {ok, #file_info{size = Size, type = regular, access = Access, atime = AccTime2, mtime = ModTime2}} = - ?PRIM_FILE_call(read_file_info, Handle, [Name]), + ?PRIM_FILE:read_file_info(Name), io:format("Closed file Acc ~p Mod ~p",[AccTime2,ModTime2]), true = time_dist(ModTime1, ModTime2) >= 0, @@ -909,7 +791,7 @@ file_info_int(Config, Handle, Suffix) -> {ok, #file_info{size = DSize, type = directory, access = DAccess, atime = AccTime3, mtime = ModTime3}} = - ?PRIM_FILE_call(read_file_info, Handle, [RootDir]), + ?PRIM_FILE:read_file_info(RootDir), %% this dir was modified only a few secs ago io:format("Dir Acc ~p; Mod ~p; Now ~p", [AccTime3, ModTime3, Now]), @@ -936,16 +818,7 @@ filter_atime(Atime, Config) -> %% Test the write_file_info/2 function. -file_write_file_info_a(Config) when is_list(Config) -> - file_write_file_info(Config, [], "_a"). - -file_write_file_info_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = file_write_file_info(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - Result. - -file_write_file_info(Config, Handle, Suffix) -> +file_write_file_info(Config) when is_list(Config) -> RootDir = get_good_directory(Config), io:format("RootDir = ~p", [RootDir]), @@ -955,16 +828,16 @@ file_write_file_info(Config, Handle, Suffix) -> Name = filename:join(RootDir, atom_to_list(?MODULE) - ++"_write_file_info_ro"++Suffix), + ++"_write_file_info_ro"), ok = ?PRIM_FILE:write_file(Name, "hello"), Time = {{1997, 01, 02}, {12, 35, 42}}, Info = #file_info{mode=8#400, atime=Time, mtime=Time, ctime=Time}, - ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, Info]), + ok = ?PRIM_FILE:write_file_info(Name, Info), %% Read back the times. {ok, ActualInfo} = - ?PRIM_FILE_call(read_file_info, Handle, [Name]), + ?PRIM_FILE:read_file_info(Name), #file_info{mode=_Mode, atime=ActAtime, mtime=Time, ctime=ActCtime} = ActualInfo, FilteredAtime = filter_atime(Time, Config), @@ -980,14 +853,11 @@ file_write_file_info(Config, Handle, Suffix) -> {error, eacces} = ?PRIM_FILE:write_file(Name, "hello again"), %% Make the file writable again. - - ?PRIM_FILE_call(write_file_info, Handle, - [Name, #file_info{mode=8#600}]), + ?PRIM_FILE:write_file_info(Name, #file_info{mode=8#600}), ok = ?PRIM_FILE:write_file(Name, "hello again"), %% And unwritable. - ?PRIM_FILE_call(write_file_info, Handle, - [Name, #file_info{mode=8#400}]), + ?PRIM_FILE:write_file_info(Name, #file_info{mode=8#400}), {error, eacces} = ?PRIM_FILE:write_file(Name, "hello again"), %% Write the times again. @@ -995,9 +865,9 @@ file_write_file_info(Config, Handle, Suffix) -> NewTime = {{1997, 02, 15}, {13, 18, 20}}, NewInfo = #file_info{atime=NewTime, mtime=NewTime, ctime=NewTime}, - ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, NewInfo]), + ok = ?PRIM_FILE:write_file_info(Name, NewInfo), {ok, ActualInfo2} = - ?PRIM_FILE_call(read_file_info, Handle, [Name]), + ?PRIM_FILE:read_file_info(Name), #file_info{atime=NewActAtime, mtime=NewTime, ctime=NewActCtime} = ActualInfo2, NewFilteredAtime = filter_atime(NewTime, Config), @@ -1012,14 +882,12 @@ file_write_file_info(Config, Handle, Suffix) -> %% Make the file writeable again, so that we can remove the %% test suites ... :-) - ?PRIM_FILE_call(write_file_info, Handle, - [Name, #file_info{mode=8#600}]), + ?PRIM_FILE:write_file_info(Name, #file_info{mode=8#600}), ok. %% Test the write_file_info/3 function. file_write_file_info_opts(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), RootDir = get_good_directory(Config), io:format("RootDir = ~p", [RootDir]), @@ -1028,7 +896,7 @@ file_write_file_info_opts(Config) when is_list(Config) -> lists:foreach(fun ({FI, Opts}) -> - ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, FI, Opts]) + ok = ?PRIM_FILE:write_file_info(Name, FI, Opts) end, [ {#file_info{ mode=8#600, atime = Time, mtime = Time, ctime = Time}, Opts} || Opts <- [[{time, posix}]], @@ -1038,7 +906,7 @@ file_write_file_info_opts(Config) when is_list(Config) -> %% REM: determine date range dependent on time_t = Uint32 | Sint32 | Sint64 | Uint64 %% Determine time_t on os:type()? lists:foreach(fun ({FI, Opts}) -> - ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, FI, Opts]) + ok = ?PRIM_FILE:write_file_info(Name, FI, Opts) end, [ {#file_info{ mode=8#400, atime = Time, mtime = Time, ctime = Time}, Opts} || Opts <- [[{time, universal}],[{time, local}]], Time <- [ @@ -1050,11 +918,9 @@ file_write_file_info_opts(Config) when is_list(Config) -> {{2037,2,3},{23,59,59}}, erlang:localtime() ]]), - ok = ?PRIM_FILE:stop(Handle), ok. file_read_file_info_opts(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), RootDir = get_good_directory(Config), io:format("RootDir = ~p", [RootDir]), @@ -1063,41 +929,38 @@ file_read_file_info_opts(Config) when is_list(Config) -> lists:foreach(fun (Opts) -> - {ok,_} = ?PRIM_FILE_call(read_file_info, Handle, [Name, Opts]) + {ok,_} = ?PRIM_FILE:read_file_info(Name, Opts) end, [[{time, Type}] || Type <- [local, universal, posix]]), - ok = ?PRIM_FILE:stop(Handle), ok. %% Test the write and read back *_file_info/3 functions. file_write_read_file_info_opts(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), RootDir = get_good_directory(Config), io:format("RootDir = ~p", [RootDir]), Name = filename:join(RootDir, atom_to_list(?MODULE) ++"_read_write_file_info_opts"), ok = ?PRIM_FILE:write_file(Name, "hello_opts2"), - ok = file_write_read_file_info_opts(Handle, Name, {{1989, 04, 28}, {19,30,22}}, [{time, local}]), - ok = file_write_read_file_info_opts(Handle, Name, {{1989, 04, 28}, {19,30,22}}, [{time, universal}]), + ok = file_write_read_file_info_opts(Name, {{1989, 04, 28}, {19,30,22}}, [{time, local}]), + ok = file_write_read_file_info_opts(Name, {{1989, 04, 28}, {19,30,22}}, [{time, universal}]), %% will not work on platforms with unsigned time_t - %ok = file_write_read_file_info_opts(Handle, Name, {{1930, 04, 28}, {19,30,22}}, [{time, local}]), - %ok = file_write_read_file_info_opts(Handle, Name, {{1930, 04, 28}, {19,30,22}}, [{time, universal}]), - ok = file_write_read_file_info_opts(Handle, Name, 1, [{time, posix}]), + %ok = file_write_read_file_info_opts(Name, {{1930, 04, 28}, {19,30,22}}, [{time, local}]), + %ok = file_write_read_file_info_opts(Name, {{1930, 04, 28}, {19,30,22}}, [{time, universal}]), + ok = file_write_read_file_info_opts(Name, 1, [{time, posix}]), %% will not work on platforms with unsigned time_t - %ok = file_write_read_file_info_opts(Handle, Name, -1, [{time, posix}]), - %ok = file_write_read_file_info_opts(Handle, Name, -300000, [{time, posix}]), - ok = file_write_read_file_info_opts(Handle, Name, 300000, [{time, posix}]), - ok = file_write_read_file_info_opts(Handle, Name, 0, [{time, posix}]), + %ok = file_write_read_file_info_opts(Name, -1, [{time, posix}]), + %ok = file_write_read_file_info_opts(Name, -300000, [{time, posix}]), + ok = file_write_read_file_info_opts(Name, 300000, [{time, posix}]), + ok = file_write_read_file_info_opts(Name, 0, [{time, posix}]), - ok = ?PRIM_FILE:stop(Handle), ok. -file_write_read_file_info_opts(Handle, Name, Mtime, Opts) -> - {ok, FI} = ?PRIM_FILE_call(read_file_info, Handle, [Name, Opts]), +file_write_read_file_info_opts(Name, Mtime, Opts) -> + {ok, FI} = ?PRIM_FILE:read_file_info(Name, Opts), FI2 = FI#file_info{ mtime = Mtime }, - ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, FI2, Opts]), - {ok, FI3} = ?PRIM_FILE_call(read_file_info, Handle, [Name, Opts]), + ok = ?PRIM_FILE:write_file_info(Name, FI2, Opts), + {ok, FI3} = ?PRIM_FILE:read_file_info(Name, Opts), io:format("Expecting mtime = ~p, got ~p~n", [FI2#file_info.mtime, FI3#file_info.mtime]), FI2 = FI3, ok. @@ -1175,8 +1038,8 @@ advise(Config) when is_list(Config) -> atom_to_list(?MODULE) ++"_advise.fil"), - Line1 = "Hello\n", - Line2 = "World!\n", + Line1 = <<"Hello\n">>, + Line2 = <<"World!\n">>, {ok, Fd} = ?PRIM_FILE:open(Advise, [write]), ok = ?PRIM_FILE:advise(Fd, 0, 0, normal), @@ -1226,7 +1089,7 @@ advise(Config) when is_list(Config) -> {ok, Fd9} = ?PRIM_FILE:open(Advise, [read]), Offset = 0, %% same as a 0 length in some implementations - Length = length(Line1) + length(Line2), + Length = byte_size(Line1) + byte_size(Line2), ok = ?PRIM_FILE:advise(Fd9, Offset, Length, sequential), {ok, Line1} = ?PRIM_FILE:read_line(Fd9), {ok, Line2} = ?PRIM_FILE:read_line(Fd9), @@ -1250,23 +1113,18 @@ do_large_write(Name) -> Chunk = <<0:ChunkSize/unit:8>>, Data = zip_data(lists:duplicate(Chunks, Chunk), Interleave), Size = Chunks * ChunkSize + Chunks, % 4 G + 32 - Wordsize = erlang:system_info(wordsize), - case prim_file:write_file(Name, Data) of - ok when Wordsize =:= 8 -> - {ok,#file_info{size=Size}} = file:read_file_info(Name), - {ok,Fd} = prim_file:open(Name, [read]), - check_large_write(Fd, ChunkSize, 0, Interleave); - {error,einval} when Wordsize =:= 4 -> - ok - end. + ok = ?PRIM_FILE:write_file(Name, Data), + {ok,#file_info{size=Size}} = file:read_file_info(Name), + {ok,Fd} = ?PRIM_FILE:open(Name, [read]), + check_large_write(Fd, ChunkSize, 0, Interleave). check_large_write(Fd, ChunkSize, Pos, [X|Interleave]) -> Pos1 = Pos + ChunkSize, - {ok,Pos1} = prim_file:position(Fd, {cur,ChunkSize}), - {ok,[X]} = prim_file:read(Fd, 1), + {ok,Pos1} = ?PRIM_FILE:position(Fd, {cur,ChunkSize}), + {ok,<<X>>} = ?PRIM_FILE:read(Fd, 1), check_large_write(Fd, ChunkSize, Pos1+1, Interleave); check_large_write(Fd, _, _, []) -> - eof = prim_file:read(Fd, 1), + eof = ?PRIM_FILE:read(Fd, 1), ok. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -1338,71 +1196,53 @@ allocate_and_assert(Fd, Offset, Length) -> %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -delete_a(Config) when is_list(Config) -> - delete(Config, [], "_a"). - -delete_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = delete(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - Result. - -delete(Config, Handle, Suffix) -> +delete(Config) when is_list(Config) -> RootDir = proplists:get_value(priv_dir,Config), Name = filename:join(RootDir, atom_to_list(?MODULE) - ++"_delete"++Suffix++".fil"), + ++"_delete.fil"), {ok, Fd1} = ?PRIM_FILE:open(Name, [write]), ?PRIM_FILE:write(Fd1,"ok.\n"), ok = ?PRIM_FILE:close(Fd1), %% Check that the file is readable {ok, Fd2} = ?PRIM_FILE:open(Name, [read]), ok = ?PRIM_FILE:close(Fd2), - ok = ?PRIM_FILE_call(delete, Handle, [Name]), + ok = ?PRIM_FILE:delete(Name), %% Check that the file is not readable anymore {error, _} = ?PRIM_FILE:open(Name, [read]), %% Try deleting a nonexistent file - {error, enoent} = ?PRIM_FILE_call(delete, Handle, [Name]), + {error, enoent} = ?PRIM_FILE:delete(Name), ok. -rename_a(Config) when is_list(Config) -> - rename(Config, [], "_a"). - -rename_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = rename(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - Result. - -rename(Config, Handle, Suffix) -> +rename(Config) when is_list(Config) -> RootDir = proplists:get_value(priv_dir,Config), - FileName1 = atom_to_list(?MODULE)++"_rename"++Suffix++".fil", - FileName2 = atom_to_list(?MODULE)++"_rename"++Suffix++".ful", + FileName1 = atom_to_list(?MODULE)++"_rename.fil", + FileName2 = atom_to_list(?MODULE)++"_rename.ful", Name1 = filename:join(RootDir, FileName1), Name2 = filename:join(RootDir, FileName2), {ok,Fd1} = ?PRIM_FILE:open(Name1, [write]), ok = ?PRIM_FILE:close(Fd1), %% Rename, and check that it really changed name - ok = ?PRIM_FILE_call(rename, Handle, [Name1, Name2]), + ok = ?PRIM_FILE:rename(Name1, Name2), {error, _} = ?PRIM_FILE:open(Name1, [read]), {ok, Fd2} = ?PRIM_FILE:open(Name2, [read]), ok = ?PRIM_FILE:close(Fd2), %% Try renaming something to itself - ok = ?PRIM_FILE_call(rename, Handle, [Name2, Name2]), + ok = ?PRIM_FILE:rename(Name2, Name2), %% Try renaming something that doesn't exist {error, enoent} = - ?PRIM_FILE_call(rename, Handle, [Name1, Name2]), + ?PRIM_FILE:rename(Name1, Name2), %% Try renaming to something else than a string {error, badarg} = - ?PRIM_FILE_call(rename, Handle, [Name1, foobar]), + ?PRIM_FILE:rename(Name1, foobar), %% Move between directories DirName1 = filename:join(RootDir, atom_to_list(?MODULE) - ++"_rename_dir"++Suffix), + ++"_rename_dir"), DirName2 = filename:join(RootDir, atom_to_list(?MODULE) - ++"_second_rename_dir"++Suffix), + ++"_second_rename_dir"), Name1foo = filename:join(DirName1, "foo.fil"), Name2foo = filename:join(DirName2, "foo.fil"), Name2bar = filename:join(DirName2, "bar.dir"), @@ -1410,21 +1250,21 @@ rename(Config, Handle, Suffix) -> %% The name has to include the full file name, path is not enough expect( {error, eexist}, {error, eisdir}, - ?PRIM_FILE_call(rename, Handle, [Name2, DirName1])), + ?PRIM_FILE:rename(Name2, DirName1)), ok = - ?PRIM_FILE_call(rename, Handle, [Name2, Name1foo]), + ?PRIM_FILE:rename(Name2, Name1foo), %% Now rename the directory - ok = ?PRIM_FILE_call(rename, Handle, [DirName1, DirName2]), + ok = ?PRIM_FILE:rename(DirName1, DirName2), %% And check that the file is there now {ok,Fd3} = ?PRIM_FILE:open(Name2foo, [read]), ok = ?PRIM_FILE:close(Fd3), %% Try some dirty things now: move the directory into itself {error, Msg1} = - ?PRIM_FILE_call(rename, Handle, [DirName2, Name2bar]), + ?PRIM_FILE:rename(DirName2, Name2bar), io:format("Errmsg1: ~p",[Msg1]), %% move dir into a file in itself {error, Msg2} = - ?PRIM_FILE_call(rename, Handle, [DirName2, Name2foo]), + ?PRIM_FILE:rename(DirName2, Name2foo), io:format("Errmsg2: ~p",[Msg2]), ok. @@ -1460,13 +1300,14 @@ e_delete(Config) when is_list(Config) -> case os:type() of {win32, _} -> %% Remove a character device. - {error, eacces} = ?PRIM_FILE:delete("nul"); + expect({error, eacces}, {error, einval}, + ?PRIM_FILE:delete("nul")); _ -> ?PRIM_FILE:write_file_info( Base, #file_info {mode=0}), {error, eacces} = ?PRIM_FILE:delete(Afile), ?PRIM_FILE:write_file_info( - Base, #file_info {mode=8#600}) + Base, #file_info {mode=8#700}) end, ok. @@ -1602,7 +1443,7 @@ e_make_dir(Config) when is_list(Config) -> ?PRIM_FILE:write_file_info(Base, #file_info {mode=0}), {error, eacces} = ?PRIM_FILE:make_dir(filename:join(Base, "xxxx")), - ?PRIM_FILE:write_file_info(Base, #file_info {mode=8#600}) + ?PRIM_FILE:write_file_info(Base, #file_info {mode=8#700}) end, ok. @@ -1652,170 +1493,24 @@ e_del_dir(Config) when is_list(Config) -> ?PRIM_FILE:write_file_info(Base, #file_info {mode=0}), {error, eacces} = ?PRIM_FILE:del_dir(ADirectory), ?PRIM_FILE:write_file_info( - Base, #file_info {mode=8#600}) + Base, #file_info {mode=8#700}) end, ok. -%% Trying reading and positioning from a compressed file. - -read_compressed(Config) when is_list(Config) -> - Data = proplists:get_value(data_dir, Config), - Real = filename:join(Data, "realmen.html.gz"), - {ok, Fd} = ?PRIM_FILE:open(Real, [read, compressed]), - try_read_file(Fd). - -%% Trying reading and positioning from an uncompressed file, -%% but with the compressed flag given. - -read_not_really_compressed(Config) when is_list(Config) -> - Data = proplists:get_value(data_dir, Config), - Priv = proplists:get_value(priv_dir, Config), - - %% The file realmen.html might have got CRs added (by WinZip). - %% Remove them, or the file positions will not be correct. - - Real = filename:join(Data, "realmen.html"), - RealPriv = filename:join(Priv, - atom_to_list(?MODULE)++"_realmen.html"), - {ok, RealDataBin} = ?PRIM_FILE:read_file(Real), - RealData = remove_crs(binary_to_list(RealDataBin), []), - ok = ?PRIM_FILE:write_file(RealPriv, RealData), - {ok, Fd} = ?PRIM_FILE:open(RealPriv, [read, compressed]), - try_read_file(Fd). - -remove_crs([$\r|Rest], Result) -> - remove_crs(Rest, Result); -remove_crs([C|Rest], Result) -> - remove_crs(Rest, [C|Result]); -remove_crs([], Result) -> - lists:reverse(Result). - -try_read_file(Fd) -> - %% Seek to the current position (nothing should happen). - - {ok, 0} = ?PRIM_FILE:position(Fd, 0), - {ok, 0} = ?PRIM_FILE:position(Fd, {cur, 0}), - - %% Read a few lines from a compressed file. - - ShouldBe = "<TITLE>Real Programmers Don't Use PASCAL</TITLE>\n", - {ok, ShouldBe} = ?PRIM_FILE:read(Fd, length(ShouldBe)), - - %% Now seek forward. - - {ok, 381} = ?PRIM_FILE:position(Fd, 381), - Back = "Back in the good old days -- the \"Golden Era\" " ++ - "of computers, it was\n", - {ok, Back} = ?PRIM_FILE:read(Fd, length(Back)), - - %% Try to search forward relative to the current position. - - {ok, CurPos} = ?PRIM_FILE:position(Fd, {cur, 0}), - RealPos = 4273, - {ok, RealPos} = ?PRIM_FILE:position(Fd, {cur, RealPos-CurPos}), - RealProg = "<LI> Real Programmers aren't afraid to use GOTOs.\n", - {ok, RealProg} = ?PRIM_FILE:read(Fd, length(RealProg)), - - %% Seek backward. - - AfterTitle = length("<TITLE>"), - {ok, AfterTitle} = ?PRIM_FILE:position(Fd, AfterTitle), - Title = "Real Programmers Don't Use PASCAL</TITLE>\n", - {ok, Title} = ?PRIM_FILE:read(Fd, length(Title)), - - %% Done. - - ?PRIM_FILE:close(Fd), - ok. - -write_compressed(Config) when is_list(Config) -> - Priv = proplists:get_value(priv_dir, Config), - MyFile = filename:join(Priv, - atom_to_list(?MODULE)++"_test.gz"), - - %% Write a file. - - {ok, Fd} = ?PRIM_FILE:open(MyFile, [write, compressed]), - {ok, 0} = ?PRIM_FILE:position(Fd, 0), - Prefix = "hello\n", - End = "end\n", - ok = ?PRIM_FILE:write(Fd, Prefix), - {ok, 143} = ?PRIM_FILE:position(Fd, 143), - ok = ?PRIM_FILE:write(Fd, End), - ok = ?PRIM_FILE:close(Fd), - - %% Read the file and verify the contents. - - {ok, Fd1} = ?PRIM_FILE:open(MyFile, [read, compressed]), - {ok, Prefix} = ?PRIM_FILE:read(Fd1, length(Prefix)), - Second = lists:duplicate(143-length(Prefix), 0) ++ End, - {ok, Second} = ?PRIM_FILE:read(Fd1, length(Second)), - ok = ?PRIM_FILE:close(Fd1), - - %% Ensure that the file is compressed. - - TotalSize = 143 + length(End), - case ?PRIM_FILE:read_file_info(MyFile) of - {ok, #file_info{size=Size}} when Size < TotalSize -> - ok; - {ok, #file_info{size=Size}} when Size == TotalSize -> - ct:fail(file_not_compressed) - end, - - %% Write again to ensure that the file is truncated. - - {ok, Fd2} = ?PRIM_FILE:open(MyFile, [write, compressed]), - NewString = "aaaaaaaaaaa", - ok = ?PRIM_FILE:write(Fd2, NewString), - ok = ?PRIM_FILE:close(Fd2), - {ok, Fd3} = ?PRIM_FILE:open(MyFile, [read, compressed]), - {ok, NewString} = ?PRIM_FILE:read(Fd3, 1024), - ok = ?PRIM_FILE:close(Fd3), - - ok. - -compress_errors(Config) when is_list(Config) -> - Data = proplists:get_value(data_dir, Config), - {error, enoent} = ?PRIM_FILE:open("non_existing__", - [compressed, read]), - {error, einval} = ?PRIM_FILE:open("non_existing__", - [compressed, read, write]), - - %% Read a corrupted .gz file. - - Corrupted = filename:join(Data, "corrupted.gz"), - {ok, Fd} = ?PRIM_FILE:open(Corrupted, [read, compressed]), - {error, eio} = ?PRIM_FILE:read(Fd, 100), - ?PRIM_FILE:close(Fd), - - ok. - - -%% Test creating a hard link. -make_link_a(Config) when is_list(Config) -> - make_link(Config, [], "_a"). - -%% Test creating a hard link. -make_link_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = make_link(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - Result. - -make_link(Config, Handle, Suffix) -> +make_link(Config) when is_list(Config) -> RootDir = proplists:get_value(priv_dir, Config), NewDir = filename:join(RootDir, atom_to_list(?MODULE) - ++"_make_link"++Suffix), - ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]), + ++"_make_link"), + ok = ?PRIM_FILE:make_dir(NewDir), Name = filename:join(NewDir, "a_file"), ok = ?PRIM_FILE:write_file(Name, "some contents\n"), Alias = filename:join(NewDir, "an_alias"), Result = - case ?PRIM_FILE_call(make_link, Handle, [Name, Alias]) of + case ?PRIM_FILE:make_link(Name, Alias) of {error, enotsup} -> {skipped, "Links not supported on this platform"}; ok -> @@ -1826,12 +1521,12 @@ make_link(Config, Handle, Suffix) -> %% since they are not used on symbolic links. {ok, Info} = - ?PRIM_FILE_call(read_link_info, Handle, [Name]), + ?PRIM_FILE:read_link_info(Name), {ok, Info} = - ?PRIM_FILE_call(read_link_info, Handle, [Alias]), + ?PRIM_FILE:read_link_info(Alias), #file_info{links = 2, type = regular} = Info, {error, eexist} = - ?PRIM_FILE_call(make_link, Handle, [Name, Alias]), + ?PRIM_FILE:make_link(Name, Alias), ok end, @@ -1843,30 +1538,19 @@ read_link_info_for_non_link(Config) when is_list(Config) -> {ok, #file_info{type=directory}} = ?PRIM_FILE:read_link_info("."), ok. -%% Test operations on symbolic links (for Unix). -symlinks_a(Config) when is_list(Config) -> - symlinks(Config, [], "_a"). - -%% Test operations on symbolic links (for Unix). -symlinks_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = symlinks(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - Result. - -symlinks(Config, Handle, Suffix) -> +symlinks(Config) when is_list(Config) -> RootDir = proplists:get_value(priv_dir, Config), NewDir = filename:join(RootDir, atom_to_list(?MODULE) - ++"_make_symlink"++Suffix), - ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]), + ++"_make_symlink"), + ok = ?PRIM_FILE:make_dir(NewDir), Name = filename:join(NewDir, "a_plain_file"), ok = ?PRIM_FILE:write_file(Name, "some stupid content\n"), Alias = filename:join(NewDir, "a_symlink_alias"), Result = - case ?PRIM_FILE_call(make_symlink, Handle, [Name, Alias]) of + case ?PRIM_FILE:make_symlink(Name, Alias) of {error, enotsup} -> {skipped, "Links not supported on this platform"}; {error, eperm} -> @@ -1874,20 +1558,20 @@ symlinks(Config, Handle, Suffix) -> {skipped, "Windows user not privileged to create links"}; ok -> {ok, Info1} = - ?PRIM_FILE_call(read_file_info, Handle, [Name]), + ?PRIM_FILE:read_file_info(Name), {ok, Info1} = - ?PRIM_FILE_call(read_file_info, Handle, [Alias]), + ?PRIM_FILE:read_file_info(Alias), {ok, Info1} = - ?PRIM_FILE_call(read_link_info, Handle, [Name]), + ?PRIM_FILE:read_link_info(Name), #file_info{links = 1, type = regular} = Info1, {ok, Info2} = - ?PRIM_FILE_call(read_link_info, Handle, [Alias]), + ?PRIM_FILE:read_link_info(Alias), #file_info{links=1, type=symlink} = Info2, {ok, Name} = - ?PRIM_FILE_call(read_link, Handle, [Alias]), + ?PRIM_FILE:read_link(Alias), {ok, Name} = - ?PRIM_FILE_call(read_link_all, Handle, [Alias]), + ?PRIM_FILE:read_link_all(Alias), %% If all is good, delete dir again (avoid hanging dir on windows) rm_rf(?PRIM_FILE,NewDir), ok @@ -1907,10 +1591,9 @@ list_dir_limit(Config) when is_list(Config) -> RootDir = proplists:get_value(priv_dir, Config), NewDir = filename:join(RootDir, atom_to_list(?MODULE)++"_list_dir_limit"), - {ok, Handle1} = ?PRIM_FILE:start(), - ok = ?PRIM_FILE_call(make_dir, Handle1, [NewDir]), + ok = ?PRIM_FILE:make_dir(NewDir), Ref = erlang:start_timer(MaxTime*1000, self(), []), - Result = list_dir_limit_loop(NewDir, Handle1, Ref, MaxNumber, 0), + Result = list_dir_limit_loop(NewDir, Ref, MaxNumber, 0), Time = case erlang:cancel_timer(Ref) of false -> MaxTime; T -> MaxTime - (T div 1000) @@ -1920,21 +1603,18 @@ list_dir_limit(Config) when is_list(Config) -> {error, _Reason, N} -> N; _ -> 0 end, - {ok, Handle2} = ?PRIM_FILE:start(), - list_dir_limit_cleanup(NewDir, Handle2, Number, 0), - ok = ?PRIM_FILE:stop(Handle1), - ok = ?PRIM_FILE:stop(Handle2), + list_dir_limit_cleanup(NewDir, Number, 0), {ok, Number} = Result, {comment, "Created " ++ integer_to_list(Number) ++ " files in " ++ integer_to_list(Time) ++ " seconds."}. -list_dir_limit_loop(Dir, Handle, _Ref, N, Cnt) when Cnt >= N -> - list_dir_check(Dir, Handle, Cnt); -list_dir_limit_loop(Dir, Handle, Ref, N, Cnt) -> +list_dir_limit_loop(Dir, _Ref, N, Cnt) when Cnt >= N -> + list_dir_check(Dir, Cnt); +list_dir_limit_loop(Dir, Ref, N, Cnt) -> receive {timeout, Ref, []} -> - list_dir_check(Dir, Handle, Cnt) + list_dir_check(Dir, Cnt) after 0 -> Name = integer_to_list(Cnt), case ?PRIM_FILE:write_file(filename:join(Dir, Name), Name) of @@ -1942,23 +1622,23 @@ list_dir_limit_loop(Dir, Handle, Ref, N, Cnt) -> Next = Cnt + 1, case Cnt rem 100 of 0 -> - case list_dir_check(Dir, Handle, Next) of + case list_dir_check(Dir, Next) of {ok, Next} -> list_dir_limit_loop( - Dir, Handle, Ref, N, Next); + Dir, Ref, N, Next); Other -> Other end; _ -> - list_dir_limit_loop(Dir, Handle, Ref, N, Next) + list_dir_limit_loop(Dir, Ref, N, Next) end; {error, Reason} -> {error, Reason, Cnt} end end. -list_dir_check(Dir, Handle, Cnt) -> - case ?PRIM_FILE:list_dir(Handle, Dir) of +list_dir_check(Dir, Cnt) -> + case ?PRIM_FILE:list_dir(Dir) of {ok, ListDir} -> case length(ListDir) of Cnt -> @@ -1975,18 +1655,18 @@ list_dir_check(Dir, Handle, Cnt) -> %% Deletes N files while ignoring errors, then continues deleting %% as long as they exist. -list_dir_limit_cleanup(Dir, Handle, N, Cnt) when Cnt >= N -> +list_dir_limit_cleanup(Dir, N, Cnt) when Cnt >= N -> Name = integer_to_list(Cnt), - case ?PRIM_FILE:delete(Handle, filename:join(Dir, Name)) of + case ?PRIM_FILE:delete(filename:join(Dir, Name)) of ok -> - list_dir_limit_cleanup(Dir, Handle, N, Cnt+1); + list_dir_limit_cleanup(Dir, N, Cnt+1); _ -> ok end; -list_dir_limit_cleanup(Dir, Handle, N, Cnt) -> +list_dir_limit_cleanup(Dir, N, Cnt) -> Name = integer_to_list(Cnt), - ?PRIM_FILE:delete(Handle, filename:join(Dir, Name)), - list_dir_limit_cleanup(Dir, Handle, N, Cnt+1). + ?PRIM_FILE:delete(filename:join(Dir, Name)), + list_dir_limit_cleanup(Dir, N, Cnt+1). %%% %%% Test list_dir() on a non-existing pathname. @@ -1995,7 +1675,7 @@ list_dir_limit_cleanup(Dir, Handle, N, Cnt) -> list_dir_error(Config) -> Priv = proplists:get_value(priv_dir, Config), NonExisting = filename:join(Priv, "non-existing-dir"), - {error,enoent} = prim_file:list_dir(NonExisting), + {error,enoent} = ?PRIM_FILE:list_dir(NonExisting), ok. %%% @@ -2063,7 +1743,7 @@ do_run_large_file_test(Config, Run, Name0) -> {'DOWN',Mref,_,_,_} -> ok; {Tester,done} -> ok end, - prim_file:delete(Name) + ?PRIM_FILE:delete(Name) end), %% Run the test case. diff --git a/lib/kernel/test/sendfile_SUITE.erl b/lib/kernel/test/sendfile_SUITE.erl index bfa564c32c..0c0b1cbcb6 100644 --- a/lib/kernel/test/sendfile_SUITE.erl +++ b/lib/kernel/test/sendfile_SUITE.erl @@ -23,30 +23,41 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("kernel/include/file.hrl"). --compile(export_all). - -all() -> [{group,async_threads}, - {group,no_async_threads}]. - -groups() -> - [{async_threads,[],tcs()}, - {no_async_threads,[],tcs()}]. - -tcs() -> - [t_sendfile_small - ,t_sendfile_big_all - ,t_sendfile_big_size - ,t_sendfile_many_small - ,t_sendfile_partial - ,t_sendfile_offset - ,t_sendfile_sendafter - ,t_sendfile_recvafter - ,t_sendfile_recvafter_remoteclose - ,t_sendfile_sendduring - ,t_sendfile_recvduring - ,t_sendfile_closeduring - ,t_sendfile_crashduring - ]. +-export([all/0, init_per_suite/1, end_per_suite/1, init_per_testcase/2]). + +-export([sendfile_server/2, sendfile_do_recv/2, init/1, handle_event/2]). + +-export( + [t_sendfile_small/1, + t_sendfile_big_all/1, + t_sendfile_big_size/1, + t_sendfile_many_small/1, + t_sendfile_partial/1, + t_sendfile_offset/1, + t_sendfile_sendafter/1, + t_sendfile_recvafter/1, + t_sendfile_recvafter_remoteclose/1, + t_sendfile_sendduring/1, + t_sendfile_recvduring/1, + t_sendfile_closeduring/1, + t_sendfile_crashduring/1, + t_sendfile_arguments/1]). + +all() -> + [t_sendfile_small, + t_sendfile_big_all, + t_sendfile_big_size, + t_sendfile_many_small, + t_sendfile_partial, + t_sendfile_offset, + t_sendfile_sendafter, + t_sendfile_recvafter, + t_sendfile_recvafter_remoteclose, + t_sendfile_sendduring, + t_sendfile_recvduring, + t_sendfile_closeduring, + t_sendfile_crashduring, + t_sendfile_arguments]. init_per_suite(Config) -> case {os:type(),os:version()} of @@ -72,28 +83,18 @@ init_per_suite(Config) -> end_per_suite(Config) -> file:delete(proplists:get_value(big_file, Config)). -init_per_group(async_threads,Config) -> - case erlang:system_info(thread_pool_size) of - 0 -> - {skip,"No async threads"}; - _ -> - [{sendfile_opts,[{use_threads,true}]}|Config] - end; -init_per_group(no_async_threads,Config) -> - [{sendfile_opts,[{use_threads,false}]}|Config]. - -end_per_group(_,_Config) -> - ok. - init_per_testcase(TC,Config) when TC == t_sendfile_recvduring; TC == t_sendfile_sendduring -> Filename = proplists:get_value(small_file, Config), Send = fun(Sock) -> {_Size, Data} = sendfile_file_info(Filename), - {ok,D} = file:open(Filename, [raw,binary,read]), - prim_file:sendfile(D, Sock, 0, 0, 0, - [],[],[]), + {ok,Fd} = file:open(Filename, [raw,binary,read]), + %% Determine whether the driver has native support by + %% hitting the raw module directly; file:sendfile/5 will + %% land in the fallback if it doesn't. + RawModule = Fd#file_descriptor.module, + {ok, _Ignored} = RawModule:sendfile(Fd,Sock,0,0,0,[],[],[]), Data end, @@ -105,9 +106,8 @@ init_per_testcase(TC,Config) when TC == t_sendfile_recvduring; ct:log("Error: ~p",[Error]), {skip,"Not supported"} end; -init_per_testcase(_Tc,Config) -> - Config ++ [{sendfile_opts,[{use_threads,false}]}]. - +init_per_testcase(_TC,Config) -> + Config. t_sendfile_small(Config) when is_list(Config) -> Filename = proplists:get_value(small_file, Config), @@ -124,7 +124,7 @@ t_sendfile_small(Config) when is_list(Config) -> t_sendfile_many_small(Config) when is_list(Config) -> Filename = proplists:get_value(small_file, Config), FileOpts = proplists:get_value(file_opts, Config, []), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), error_logger:add_report_handler(?MODULE,[self()]), @@ -151,7 +151,7 @@ t_sendfile_many_small(Config) when is_list(Config) -> t_sendfile_big_all(Config) when is_list(Config) -> Filename = proplists:get_value(big_file, Config), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), Send = fun(Sock) -> {ok, #file_info{size = Size}} = @@ -165,7 +165,7 @@ t_sendfile_big_all(Config) when is_list(Config) -> t_sendfile_big_size(Config) -> Filename = proplists:get_value(big_file, Config), FileOpts = proplists:get_value(file_opts, Config, []), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), SendAll = fun(Sock) -> {ok, #file_info{size = Size}} = @@ -180,7 +180,7 @@ t_sendfile_big_size(Config) -> t_sendfile_partial(Config) -> Filename = proplists:get_value(small_file, Config), FileOpts = proplists:get_value(file_opts, Config, []), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), SendSingle = fun(Sock) -> {_Size, <<Data:5/binary,_/binary>>} = @@ -217,7 +217,7 @@ t_sendfile_partial(Config) -> t_sendfile_offset(Config) -> Filename = proplists:get_value(small_file, Config), FileOpts = proplists:get_value(file_opts, Config, []), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), Send = fun(Sock) -> {_Size, <<_:5/binary,Data:3/binary,_/binary>> = AllData} = @@ -233,7 +233,7 @@ t_sendfile_offset(Config) -> t_sendfile_sendafter(Config) -> Filename = proplists:get_value(small_file, Config), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), Send = fun(Sock) -> {Size, Data} = sendfile_file_info(Filename), @@ -246,7 +246,7 @@ t_sendfile_sendafter(Config) -> t_sendfile_recvafter(Config) -> Filename = proplists:get_value(small_file, Config), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), Send = fun(Sock) -> {Size, Data} = sendfile_file_info(Filename), @@ -279,7 +279,7 @@ t_sendfile_recvafter_remoteclose(Config) -> t_sendfile_sendduring(Config) -> Filename = proplists:get_value(big_file, Config), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), Send = fun(Sock) -> {ok, #file_info{size = Size}} = @@ -296,7 +296,7 @@ t_sendfile_sendduring(Config) -> t_sendfile_recvduring(Config) -> Filename = proplists:get_value(big_file, Config), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), Send = fun(Sock) -> {ok, #file_info{size = Size}} = @@ -315,7 +315,7 @@ t_sendfile_recvduring(Config) -> t_sendfile_closeduring(Config) -> Filename = proplists:get_value(big_file, Config), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), Send = fun(Sock,SFServPid) -> spawn_link(fun() -> @@ -345,7 +345,7 @@ t_sendfile_closeduring(Config) -> t_sendfile_crashduring(Config) -> Filename = proplists:get_value(big_file, Config), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), error_logger:add_report_handler(?MODULE,[self()]), @@ -373,6 +373,36 @@ t_sendfile_crashduring(Config) -> end end. +t_sendfile_arguments(Config) -> + Filename = proplists:get_value(small_file, Config), + + {ok, Listener} = gen_tcp:listen(0, + [{packet, 0}, {active, false}, {reuseaddr, true}]), + {ok, Port} = inet:port(Listener), + + ErrorCheck = + fun(Reason, Offset, Length, Opts) -> + {ok, Sender} = gen_tcp:connect({127, 0, 0, 1}, Port, + [{packet, 0}, {active, false}]), + {ok, Receiver} = gen_tcp:accept(Listener), + {ok, Fd} = file:open(Filename, [read, raw]), + {error, Reason} = file:sendfile(Fd, Sender, Offset, Length, Opts), + gen_tcp:close(Receiver), + gen_tcp:close(Sender), + file:close(Fd) + end, + + ErrorCheck(einval, -1, 0, []), + ErrorCheck(einval, 0, -1, []), + ErrorCheck(badarg, gurka, 0, []), + ErrorCheck(badarg, 0, gurka, []), + ErrorCheck(badarg, 0, 0, gurka), + ErrorCheck(badarg, 0, 0, [{chunk_size, gurka}]), + + gen_tcp:close(Listener), + + ok. + %% Generic sendfile server code sendfile_send(Send) -> sendfile_send({127,0,0,1},Send). diff --git a/lib/kernel/test/seq_trace_SUITE.erl b/lib/kernel/test/seq_trace_SUITE.erl index be23a1933f..cf4bf11328 100644 --- a/lib/kernel/test/seq_trace_SUITE.erl +++ b/lib/kernel/test/seq_trace_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1998-2016. All Rights Reserved. +%% Copyright Ericsson AB 1998-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -25,7 +25,7 @@ -export([token_set_get/1, tracer_set_get/1, print/1, send/1, distributed_send/1, recv/1, distributed_recv/1, trace_exit/1, distributed_exit/1, call/1, port/1, - match_set_seq_token/1, gc_seq_token/1]). + match_set_seq_token/1, gc_seq_token/1, label_capability_mismatch/1]). %% internal exports -export([simple_tracer/2, one_time_receiver/0, one_time_receiver/1, @@ -47,7 +47,7 @@ all() -> [token_set_get, tracer_set_get, print, send, distributed_send, recv, distributed_recv, trace_exit, distributed_exit, call, port, match_set_seq_token, - gc_seq_token]. + gc_seq_token, label_capability_mismatch]. groups() -> []. @@ -90,8 +90,8 @@ do_token_set_get(TsType) -> %% Test that initial seq_trace is disabled [] = seq_trace:get_token(), %% Test setting and reading the different fields - 0 = seq_trace:set_token(label,17), - {label,17} = seq_trace:get_token(label), + 0 = seq_trace:set_token(label,{my_label,1}), + {label,{my_label,1}} = seq_trace:get_token(label), false = seq_trace:set_token(print,true), {print,true} = seq_trace:get_token(print), false = seq_trace:set_token(send,true), @@ -101,12 +101,12 @@ do_token_set_get(TsType) -> false = seq_trace:set_token(TsType,true), {TsType,true} = seq_trace:get_token(TsType), %% Check the whole token - {Flags,17,0,Self,0} = seq_trace:get_token(), % all flags are set + {Flags,{my_label,1},0,Self,0} = seq_trace:get_token(), % all flags are set %% Test setting and reading the 'serial' field {0,0} = seq_trace:set_token(serial,{3,5}), {serial,{3,5}} = seq_trace:get_token(serial), %% Check the whole token, test that a whole token can be set and get - {Flags,17,5,Self,3} = seq_trace:get_token(), + {Flags,{my_label,1},5,Self,3} = seq_trace:get_token(), seq_trace:set_token({Flags,19,7,Self,5}), {Flags,19,7,Self,5} = seq_trace:get_token(), %% Check that receive timeout does not reset token @@ -166,11 +166,13 @@ do_send(TsType) -> seq_trace:reset_trace(), start_tracer(), Receiver = spawn(?MODULE,one_time_receiver,[]), + Label = make_ref(), + seq_trace:set_token(label,Label), set_token_flags([send, TsType]), Receiver ! send, Self = self(), seq_trace:reset_trace(), - [{0,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1), + [{Label,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1), check_ts(TsType, Ts). distributed_send(Config) when is_list(Config) -> @@ -184,14 +186,19 @@ do_distributed_send(TsType) -> seq_trace:reset_trace(), start_tracer(), Receiver = spawn(Node,?MODULE,one_time_receiver,[]), + + %% Make sure complex labels survive the trip. + Label = make_ref(), + seq_trace:set_token(label,Label), set_token_flags([send,TsType]), + Receiver ! send, Self = self(), seq_trace:reset_trace(), stop_node(Node), - [{0,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1), + [{Label,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1), check_ts(TsType, Ts). - + recv(Config) when is_list(Config) -> lists:foreach(fun do_recv/1, ?TIMESTAMP_MODES). @@ -220,7 +227,12 @@ do_distributed_recv(TsType) -> seq_trace:reset_trace(), rpc:call(Node,?MODULE,start_tracer,[]), Receiver = spawn(Node,?MODULE,one_time_receiver,[]), + + %% Make sure complex labels survive the trip. + Label = make_ref(), + seq_trace:set_token(label,Label), set_token_flags(['receive',TsType]), + Receiver ! 'receive', %% let the other process receive the message: receive after 1 -> ok end, @@ -229,7 +241,7 @@ do_distributed_recv(TsType) -> Result = rpc:call(Node,?MODULE,stop_tracer,[1]), stop_node(Node), ok = io:format("~p~n",[Result]), - [{0,{'receive',_,Self,Receiver,'receive'}, Ts}] = Result, + [{Label,{'receive',_,Self,Receiver,'receive'}, Ts}] = Result, check_ts(TsType, Ts). trace_exit(Config) when is_list(Config) -> @@ -240,7 +252,12 @@ do_trace_exit(TsType) -> start_tracer(), Receiver = spawn_link(?MODULE, one_time_receiver, [exit]), process_flag(trap_exit, true), + + %% Make sure complex labels survive the trip. + Label = make_ref(), + seq_trace:set_token(label,Label), set_token_flags([send, TsType]), + Receiver ! {before, exit}, %% let the other process receive the message: receive @@ -254,8 +271,8 @@ do_trace_exit(TsType) -> Result = stop_tracer(2), seq_trace:reset_trace(), ok = io:format("~p~n", [Result]), - [{0, {send, {0,1}, Self, Receiver, {before, exit}}, Ts0}, - {0, {send, {1,2}, Receiver, Self, + [{Label, {send, {0,1}, Self, Receiver, {before, exit}}, Ts0}, + {Label, {send, {1,2}, Receiver, Self, {'EXIT', Receiver, {exit, {before, exit}}}}, Ts1}] = Result, check_ts(TsType, Ts0), check_ts(TsType, Ts1). @@ -291,6 +308,74 @@ do_distributed_exit(TsType) -> {'EXIT', Receiver, {exit, {before, exit}}}}, Ts}] = Result, check_ts(TsType, Ts). +label_capability_mismatch(Config) when is_list(Config) -> + Releases = ["20_latest"], + Available = [Rel || Rel <- Releases, test_server:is_release_available(Rel)], + case Available of + [] -> {skipped, "No incompatible releases available"}; + _ -> + lists:foreach(fun do_incompatible_labels/1, Available), + lists:foreach(fun do_compatible_labels/1, Available), + ok + end. + +do_incompatible_labels(Rel) -> + Cookie = atom_to_list(erlang:get_cookie()), + {ok, Node} = test_server:start_node( + list_to_atom(atom_to_list(?MODULE)++"_"++Rel), peer, + [{args, " -setcookie "++Cookie}, {erl, [{release, Rel}]}]), + + {_,Dir} = code:is_loaded(?MODULE), + Mdir = filename:dirname(Dir), + true = rpc:call(Node,code,add_patha,[Mdir]), + seq_trace:reset_trace(), + rpc:call(Node,?MODULE,start_tracer,[]), + Receiver = spawn(Node,?MODULE,one_time_receiver,[]), + + %% This node does not support arbitrary labels, so it must fail with a + %% timeout as the token is dropped silently. + seq_trace:set_token(label,make_ref()), + seq_trace:set_token('receive',true), + + Receiver ! 'receive', + %% let the other process receive the message: + receive after 10 -> ok end, + seq_trace:reset_trace(), + + {error,timeout} = rpc:call(Node,?MODULE,stop_tracer,[1]), + stop_node(Node), + ok. + +do_compatible_labels(Rel) -> + Cookie = atom_to_list(erlang:get_cookie()), + {ok, Node} = test_server:start_node( + list_to_atom(atom_to_list(?MODULE)++"_"++Rel), peer, + [{args, " -setcookie "++Cookie}, {erl, [{release, Rel}]}]), + + {_,Dir} = code:is_loaded(?MODULE), + Mdir = filename:dirname(Dir), + true = rpc:call(Node,code,add_patha,[Mdir]), + seq_trace:reset_trace(), + rpc:call(Node,?MODULE,start_tracer,[]), + Receiver = spawn(Node,?MODULE,one_time_receiver,[]), + + %% This node does not support arbitrary labels, but small integers should + %% still work. + Label = 1234, + seq_trace:set_token(label,Label), + seq_trace:set_token('receive',true), + + Receiver ! 'receive', + %% let the other process receive the message: + receive after 10 -> ok end, + Self = self(), + seq_trace:reset_trace(), + Result = rpc:call(Node,?MODULE,stop_tracer,[1]), + stop_node(Node), + ok = io:format("~p~n",[Result]), + [{Label,{'receive',_,Self,Receiver,'receive'}, _}] = Result, + ok. + call(doc) -> "Tests special forms {is_seq_trace} and {get_seq_token} " "in trace match specs."; @@ -698,6 +783,24 @@ do_shrink(N) -> erlang:garbage_collect(), do_shrink(N-1). +%% Test that messages from a port does not clear the token +port_clean_token(Config) when is_list(Config) -> + seq_trace:reset_trace(), + Label = make_ref(), + seq_trace:set_token(label, Label), + {label,Label} = seq_trace:get_token(label), + + %% Create a port and get messages from it + %% We use os:cmd as a convenience as it does + %% open_port, port_command, port_close and receives replies. + %% Maybe it is not ideal to rely on the internal implementation + %% of os:cmd but it will have to do. + os:cmd("ls"), + + %% Make sure that the seq_trace token is still there + {label,Label} = seq_trace:get_token(label), + + ok. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% diff --git a/lib/kernel/test/wrap_log_reader_SUITE.erl b/lib/kernel/test/wrap_log_reader_SUITE.erl index 40a016aed0..59b088ca73 100644 --- a/lib/kernel/test/wrap_log_reader_SUITE.erl +++ b/lib/kernel/test/wrap_log_reader_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1998-2016. All Rights Reserved. +%% Copyright Ericsson AB 1998-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -428,13 +428,14 @@ stop() -> ok = wrap_log_test:stop(), dl_wait(). -%% Give disk logs opened by 'logger' and 'wlt' time to close after +%% Give disk logs opened by 'wlr_logger' and 'wlt' time to close after %% receiving EXIT signals. dl_wait() -> case disk_log:accessible_logs() of {[], []} -> ok; - _ -> + _X -> + erlang:display(_X), timer:sleep(100), dl_wait() end. @@ -507,27 +508,27 @@ add_ext(Name, Ext) -> %% disk_log. open(Log, File, Where) -> - logger ! {open, self(), Log, File}, + wlr_logger ! {open, self(), Log, File}, rec1(ok, Where). open_ext(Log, File, Where) -> - logger ! {open_ext, self(), Log, File}, + wlr_logger ! {open_ext, self(), Log, File}, rec1(ok, Where). close(Log) -> - logger ! {close, self(), Log}, + wlr_logger ! {close, self(), Log}, rec(ok, ?LINE). sync(Log) -> - logger ! {sync, self(), Log}, + wlr_logger ! {sync, self(), Log}, rec(ok, ?LINE). log_terms(File, Terms) -> - logger ! {log_terms, self(), File, Terms}, + wlr_logger ! {log_terms, self(), File, Terms}, rec(ok, ?LINE). blog_terms(File, Terms) -> - logger ! {blog_terms, self(), File, Terms}, + wlr_logger ! {blog_terms, self(), File, Terms}, rec(ok, ?LINE). rec1(M, Where) -> diff --git a/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl b/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl index 38449b6bb3..d2bac40192 100644 --- a/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl +++ b/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1998-2016. All Rights Reserved. +%% Copyright Ericsson AB 1998-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -36,9 +36,9 @@ -endif. init() -> - spawn(fun() -> start(logger) end), + spawn(fun() -> start(wlr_logger) end), spawn(fun() -> start2(wlt) end), - wait_registered(logger), + wait_registered(wlr_logger), wait_registered(wlt), ok. @@ -52,9 +52,9 @@ wait_registered(Name) -> end. stop() -> - catch logger ! exit, + catch wlr_logger ! exit, catch wlt ! exit, - wait_unregistered(logger), + wait_unregistered(wlr_logger), wait_unregistered(wlt), ok. @@ -82,47 +82,47 @@ loop() -> {open, Pid, Name, File} -> R = disk_log:open([{name, Name}, {type, wrap}, {file, File}, {size, {?fsize, ?fno}}]), - ?format("logger: open ~p -> ~p~n", [Name, R]), + ?format("wlr_logger: open ~p -> ~p~n", [Name, R]), Pid ! R, loop(); {open_ext, Pid, Name, File} -> R = disk_log:open([{name, Name}, {type, wrap}, {file, File}, {format, external}, {size, {?fsize, ?fno}}]), - ?format("logger: open ~p -> ~p~n", [Name, R]), + ?format("wlr_logger: open ~p -> ~p~n", [Name, R]), Pid ! R, loop(); {close, Pid, Name} -> R = disk_log:close(Name), - ?format("logger: close ~p -> ~p~n", [Name, R]), + ?format("wlr_logger: close ~p -> ~p~n", [Name, R]), Pid ! R, loop(); {sync, Pid, Name} -> R = disk_log:sync(Name), - ?format("logger: sync ~p -> ~p~n", [Name, R]), + ?format("wlr_logger: sync ~p -> ~p~n", [Name, R]), Pid ! R, loop(); {log_terms, Pid, Name, Terms} -> R = disk_log:log_terms(Name, Terms), - ?format("logger: log_terms ~p -> ~p~n", [Name, R]), + ?format("wlr_logger: log_terms ~p -> ~p~n", [Name, R]), Pid ! R, loop(); {blog_terms, Pid, Name, Terms} -> R = disk_log:blog_terms(Name, Terms), - ?format("logger: blog_terms ~p -> ~p~n", [Name, R]), + ?format("wlr_logger: blog_terms ~p -> ~p~n", [Name, R]), Pid ! R, loop(); exit -> - ?format("Stopping logger~n", []), + ?format("Stopping wlr_logger~n", []), exit(normal); _Else -> - ?format("logger: ignored: ~p~n", [_Else]), + ?format("wlr_logger: ignored: ~p~n", [_Else]), loop() end. diff --git a/lib/kernel/test/zlib_SUITE.erl b/lib/kernel/test/zlib_SUITE.erl index 26602bdcda..52ae1b3ae6 100644 --- a/lib/kernel/test/zlib_SUITE.erl +++ b/lib/kernel/test/zlib_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 2005-2016. All Rights Reserved. +%% Copyright Ericsson AB 2005-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -1061,32 +1061,27 @@ sub_heap_binaries(Config) when is_list(Config) -> %% Check concurrent access to zlib driver. smp(Config) -> - case erlang:system_info(smp_support) of - true -> - NumOfProcs = lists:min([8,erlang:system_info(schedulers)]), - io:format("smp starting ~p workers\n",[NumOfProcs]), - - %% Tests to run in parallel. - Funcs = - [zip_usage, gz_usage, compress_usage, dictionary_usage, - crc, adler], - - %% We get all function arguments here to avoid repeated parallel - %% file read access. - UsageArgs = - list_to_tuple([{F, ?MODULE:F({get_arg,Config})} || F <- Funcs]), - Parent = self(), - - WorkerFun = - fun() -> - worker(rand:uniform(9999), UsageArgs, Parent) - end, - - Pids = [spawn_link(WorkerFun) || _ <- lists:seq(1, NumOfProcs)], - wait_pids(Pids); - false -> - {skipped,"No smp support"} - end. + NumOfProcs = lists:min([8,erlang:system_info(schedulers)]), + io:format("smp starting ~p workers\n",[NumOfProcs]), + + %% Tests to run in parallel. + Funcs = + [zip_usage, gz_usage, compress_usage, dictionary_usage, + crc, adler], + + %% We get all function arguments here to avoid repeated parallel + %% file read access. + UsageArgs = + list_to_tuple([{F, ?MODULE:F({get_arg,Config})} || F <- Funcs]), + Parent = self(), + + WorkerFun = + fun() -> + worker(rand:uniform(9999), UsageArgs, Parent) + end, + + Pids = [spawn_link(WorkerFun) || _ <- lists:seq(1, NumOfProcs)], + wait_pids(Pids). worker(Seed, FnATpl, Parent) -> io:format("smp worker ~p, seed=~p~n",[self(),Seed]), diff --git a/lib/kernel/test/zzz_SUITE.erl b/lib/kernel/test/zzz_SUITE.erl new file mode 100644 index 0000000000..59c7fd7404 --- /dev/null +++ b/lib/kernel/test/zzz_SUITE.erl @@ -0,0 +1,37 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2006-2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(zzz_SUITE). + +%% The sole purpose of this test suite is for things we want to run last +%% before the VM terminates. + +-export([all/0]). + +-export([lc_graph/1]). + + +all() -> + [lc_graph]. + +lc_graph(_Config) -> + %% Create "lc_graph" file in current working dir + %% if lock checker is enabled. + erts_debug:lc_graph(), + ok. |