diff options
Diffstat (limited to 'lib/kernel/test')
45 files changed, 7195 insertions, 1032 deletions
diff --git a/lib/kernel/test/Makefile b/lib/kernel/test/Makefile index efe3a68531..8599a3d814 100644 --- a/lib/kernel/test/Makefile +++ b/lib/kernel/test/Makefile @@ -70,6 +70,15 @@ MODULES= \ interactive_shell_SUITE \ init_SUITE \ kernel_config_SUITE \ + logger_SUITE \ + logger_bench_SUITE \ + logger_disk_log_h_SUITE \ + logger_env_var_SUITE \ + logger_filters_SUITE \ + logger_formatter_SUITE \ + logger_legacy_SUITE \ + logger_simple_SUITE \ + logger_std_h_SUITE \ os_SUITE \ pg2_SUITE \ seq_trace_SUITE \ @@ -80,7 +89,8 @@ MODULES= \ loose_node \ sendfile_SUITE \ standard_error_SUITE \ - multi_load_SUITE + multi_load_SUITE \ + zzz_SUITE APP_FILES = \ appinc.app \ @@ -101,7 +111,7 @@ TARGET_FILES= $(MODULES:%=$(EBIN)/%.$(EMULATOR)) INSTALL_PROGS= $(TARGET_FILES) EMAKEFILE=Emakefile -COVERFILE=kernel.cover +COVERFILE=kernel.cover logger.cover # ---------------------------------------------------- # Release directory specification @@ -148,7 +158,8 @@ release_tests_spec: make_emakefile $(INSTALL_DIR) "$(RELSYSDIR)" $(INSTALL_DATA) $(ERL_FILES) "$(RELSYSDIR)" $(INSTALL_DATA) $(APP_FILES) "$(RELSYSDIR)" - $(INSTALL_DATA) kernel.spec kernel_smoke.spec kernel_bench.spec \ + $(INSTALL_DATA) \ + kernel.spec kernel_smoke.spec kernel_bench.spec logger.spec \ $(EMAKEFILE) $(COVERFILE) "$(RELSYSDIR)" chmod -R u+w "$(RELSYSDIR)" @tar cf - *_SUITE_data | (cd "$(RELSYSDIR)"; tar xf -) diff --git a/lib/kernel/test/application_SUITE.erl b/lib/kernel/test/application_SUITE.erl index 866043cfb4..988f26280f 100644 --- a/lib/kernel/test/application_SUITE.erl +++ b/lib/kernel/test/application_SUITE.erl @@ -1568,7 +1568,8 @@ loop5606(Pid) -> %% Tests get_env/* functions. get_env(Conf) when is_list(Conf) -> - {ok, _} = application:get_env(kernel, error_logger), + ok = application:set_env(kernel, new_var, new_val), + {ok, new_val} = application:get_env(kernel, new_var), undefined = application:get_env(undefined_app, a), undefined = application:get_env(kernel, error_logger_xyz), default = application:get_env(kernel, error_logger_xyz, default), @@ -1602,8 +1603,7 @@ get_key(Conf) when is_list(Conf) -> {ok, [{init, [kalle]}, {takeover, []}, {go, [sune]}]} = rpc:call(Cp1, application, get_key, [appinc, start_phases]), {ok, Env} = rpc:call(Cp1, application, get_key, [appinc ,env]), - [{included_applications,[appinc1,appinc2]}, - {own2,val2},{own_env1,value1}] = lists:sort(Env), + [{own2,val2},{own_env1,value1}] = lists:sort(Env), {ok, []} = rpc:call(Cp1, application, get_key, [appinc, modules]), {ok, {application_starter, [ch_sup, {appinc, 41, 43}] }} = rpc:call(Cp1, application, get_key, [appinc, mod]), @@ -1624,8 +1624,7 @@ get_key(Conf) when is_list(Conf) -> {mod, {application_starter, [ch_sup, {appinc, 41, 43}] }}, {start_phases, [{init, [kalle]}, {takeover, []}, {go, [sune]}]}]} = rpc:call(Cp1, application, get_all_key, [appinc]), - [{included_applications,[appinc1,appinc2]}, - {own2,val2},{own_env1,value1}] = lists:sort(Env), + [{own2,val2},{own_env1,value1}] = lists:sort(Env), {ok, "Test of new app file, including appnew"} = gen_server:call({global, {ch,41}}, {get_pid_key, description}), @@ -1642,8 +1641,7 @@ get_key(Conf) when is_list(Conf) -> {ok, [{init, [kalle]}, {takeover, []}, {go, [sune]}]} = gen_server:call({global, {ch,41}}, {get_pid_key, start_phases}), {ok, Env} = gen_server:call({global, {ch,41}}, {get_pid_key, env}), - [{included_applications,[appinc1,appinc2]}, - {own2,val2},{own_env1,value1}] = lists:sort(Env), + [{own2,val2},{own_env1,value1}] = lists:sort(Env), {ok, []} = gen_server:call({global, {ch,41}}, {get_pid_key, modules}), {ok, {application_starter, [ch_sup, {appinc, 41, 43}] }} = @@ -1670,8 +1668,7 @@ get_key(Conf) when is_list(Conf) -> {mod, {application_starter, [ch_sup, {appinc, 41, 43}] }}, {start_phases, [{init, [kalle]}, {takeover, []}, {go, [sune]}]}]} = gen_server:call({global, {ch,41}}, get_pid_all_key), - [{included_applications,[appinc1,appinc2]}, - {own2,val2},{own_env1,value1}] = lists:sort(Env), + [{own2,val2},{own_env1,value1}] = lists:sort(Env), stop_node_nice(Cp1), ok. diff --git a/lib/kernel/test/code_SUITE.erl b/lib/kernel/test/code_SUITE.erl index 569753155f..902196def2 100644 --- a/lib/kernel/test/code_SUITE.erl +++ b/lib/kernel/test/code_SUITE.erl @@ -931,37 +931,34 @@ purge_stacktrace(Config) when is_list(Config) -> code:purge(code_b_test), try code_b_test:call(fun(b) -> ok end, a) catch - error:function_clause -> + error:function_clause:Stacktrace -> code:load_file(code_b_test), - case erlang:get_stacktrace() of + case Stacktrace of [{?MODULE,_,[a],_}, {code_b_test,call,2,_}, {?MODULE,purge_stacktrace,1,_}|_] -> - false = code:purge(code_b_test), - [] = erlang:get_stacktrace() + false = code:purge(code_b_test) end end, try code_b_test:call(nofun, 2) catch - error:function_clause -> + error:function_clause:Stacktrace2 -> code:load_file(code_b_test), - case erlang:get_stacktrace() of + case Stacktrace2 of [{code_b_test,call,[nofun,2],_}, {?MODULE,purge_stacktrace,1,_}|_] -> - false = code:purge(code_b_test), - [] = erlang:get_stacktrace() + false = code:purge(code_b_test) end end, Args = [erlang,error,[badarg]], try code_b_test:call(erlang, error, [badarg,Args]) catch - error:badarg -> + error:badarg:Stacktrace3 -> code:load_file(code_b_test), - case erlang:get_stacktrace() of + case Stacktrace3 of [{code_b_test,call,Args,_}, {?MODULE,purge_stacktrace,1,_}|_] -> - false = code:purge(code_b_test), - [] = erlang:get_stacktrace() + false = code:purge(code_b_test) end end, ok. diff --git a/lib/kernel/test/disk_log_SUITE.erl b/lib/kernel/test/disk_log_SUITE.erl index fe2fc778f2..0709a6e766 100644 --- a/lib/kernel/test/disk_log_SUITE.erl +++ b/lib/kernel/test/disk_log_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2017. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -89,8 +89,6 @@ dist_terminate/1, dist_accessible/1, dist_deadlock/1, dist_open2/1, other_groups/1, - evil/1, - otp_6278/1, otp_10131/1]). -export([head_fun/1, hf/0, lserv/1, @@ -123,7 +121,7 @@ [halt_int, wrap_int, halt_ext, wrap_ext, read_mode, head, notif, new_idx_vsn, reopen, block, unblock, open, close, error, chunk, truncate, many_users, info, change_size, - change_attribute, distribution, evil, otp_6278, otp_10131]). + change_attribute, distribution, otp_6278, otp_10131]). %% These test cases should be skipped if the VxWorks card is %% configured without NFS cache. @@ -149,7 +147,7 @@ all() -> {group, open}, {group, close}, {group, error}, chunk, truncate, many_users, {group, info}, {group, change_size}, change_attribute, - {group, distribution}, evil, otp_6278, otp_10131]. + {group, distribution}, otp_6278, otp_10131]. groups() -> [{halt_int, [], [halt_int_inf, {group, halt_int_sz}]}, @@ -4676,119 +4674,6 @@ other_groups(Conf) when is_list(Conf) -> ok. --define(MAX, ?MAX_FWRITE_CACHE). % as in disk_log_1.erl -%% Evil cases such as closed file descriptor port. -evil(Conf) when is_list(Conf) -> - Dir = ?privdir(Conf), - File = filename:join(Dir, "n.LOG"), - Log = n, - - %% Not a very thorough test. - - ok = setup_evil_filled_cache_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:log(Log, apa), - ok = disk_log:close(Log), - - ok = setup_evil_filled_cache_halt(Log, Dir), - {error, {file_error,_,einval}} = disk_log:truncate(Log, apa), - ok = stop_evil(Log), - - %% White box test. - file:delete(File), - Ports0 = erlang:ports(), - {ok, Log} = disk_log:open([{name,Log},{file,File},{type,halt}, - {size,?MAX+50},{format,external}]), - [Fd] = erlang:ports() -- Ports0, - {B,_} = x_mk_bytes(30), - ok = disk_log:blog(Log, <<0:(?MAX-1)/unit:8>>), - exit(Fd, kill), - {error, {file_error,_,einval}} = disk_log:blog_terms(Log, [B,B]), - ok= disk_log:close(Log), - file:delete(File), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:close(Log), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:log(Log, apa), - ok = stop_evil(Log), - - ok = setup_evil_halt(Log, Dir), - {error, {file_error,_,einval}} = disk_log:log(Log, apa), - ok = stop_evil(Log), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:reopen(Log, apa), - {error, {file_error,_,einval}} = disk_log:reopen(Log, apa), - ok = stop_evil(Log), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:reopen(Log, apa), - ok = stop_evil(Log), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:inc_wrap_file(Log), - ok = stop_evil(Log), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:chunk(Log, start), - ok = stop_evil(Log), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:truncate(Log), - ok = stop_evil(Log), - - ok = setup_evil_wrap(Log, Dir), - {error, {file_error,_,einval}} = disk_log:chunk_step(Log, start, 1), - ok = stop_evil(Log), - - io:format("messages: ~p~n", [erlang:process_info(self(), messages)]), - del(File, 2), - file:delete(File), - ok. - -setup_evil_wrap(Log, Dir) -> - setup_evil(Log, [{type,wrap},{size,{100,2}}], Dir). - -setup_evil_halt(Log, Dir) -> - setup_evil(Log, [{type,halt},{size,10000}], Dir). - -setup_evil(Log, Args, Dir) -> - File = filename:join(Dir, lists:concat([Log, ".LOG"])), - file:delete(File), - del(File, 2), - ok = disk_log:start(), - Ports0 = erlang:ports(), - {ok, Log} = disk_log:open([{name,Log},{file,File} | Args]), - [Fd] = erlang:ports() -- Ports0, - exit(Fd, kill), - ok = disk_log:log_terms(n, [<<0:10/unit:8>>]), - timer:sleep(2500), % TIMEOUT in disk_log_1.erl is 2000 - ok. - -stop_evil(Log) -> - {error, _} = disk_log:close(Log), - ok. - -setup_evil_filled_cache_wrap(Log, Dir) -> - setup_evil_filled_cache(Log, [{type,wrap},{size,{?MAX,2}}], Dir). - -setup_evil_filled_cache_halt(Log, Dir) -> - setup_evil_filled_cache(Log, [{type,halt},{size,infinity}], Dir). - -%% The cache is filled, and the file descriptor port gone. -setup_evil_filled_cache(Log, Args, Dir) -> - File = filename:join(Dir, lists:concat([Log, ".LOG"])), - file:delete(File), - del(File, 2), - ok = disk_log:start(), - Ports0 = erlang:ports(), - {ok, Log} = disk_log:open([{name,Log},{file,File} | Args]), - [Fd] = erlang:ports() -- Ports0, - ok = disk_log:log_terms(n, [<<0:?MAX/unit:8>>]), - exit(Fd, kill), - ok. - %% OTP-6278. open/1 creates no status or crash report. otp_6278(Conf) when is_list(Conf) -> Dir = ?privdir(Conf), diff --git a/lib/kernel/test/erl_distribution_SUITE.erl b/lib/kernel/test/erl_distribution_SUITE.erl index bbfaa9d147..9c6712ad74 100644 --- a/lib/kernel/test/erl_distribution_SUITE.erl +++ b/lib/kernel/test/erl_distribution_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2017. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -95,7 +95,11 @@ init_per_group(_GroupName, Config) -> end_per_group(_GroupName, Config) -> Config. - +init_per_testcase(TC, Config) when TC == hostnames; + TC == nodenames -> + file:make_dir("hostnames_nodedir"), + file:write_file("hostnames_nodedir/ignore_core_files",""), + Config; init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) -> Config. @@ -240,7 +244,7 @@ illegal(Name) -> test_node(Name) -> test_node(Name, false). test_node(Name, Illigal) -> - ProgName = atom_to_list(lib:progname()), + ProgName = ct:get_progname(), Command = ProgName ++ " -noinput " ++ long_or_short() ++ Name ++ " -eval \"net_adm:ping('" ++ atom_to_list(node()) ++ "')\"" ++ case Illigal of @@ -251,7 +255,7 @@ test_node(Name, Illigal) -> end, net_kernel:monitor_nodes(true), BinCommand = unicode:characters_to_binary(Command, utf8), - Prt = open_port({spawn, BinCommand}, [stream]), + Prt = open_port({spawn, BinCommand}, [stream,{cd,"hostnames_nodedir"}]), Node = list_to_atom(Name), receive {nodeup, Node} -> @@ -459,9 +463,9 @@ run_remote_test([FuncStr, TestNodeStr | Args]) -> 1 end catch - C:E -> + C:E:S -> io:format("Node ~p got EXCEPTION ~p:~p\nat ~p\n", - [node(), C, E, erlang:get_stacktrace()]), + [node(), C, E, S]), 2 end, io:format("Node ~p doing halt(~p).\n",[node(), Status]), @@ -1140,17 +1144,16 @@ monitor_nodes_otp_6481_test(Config, TestType) when is_list(Config) -> TestMonNodeState = monitor_node_state(), %% io:format("~p~n", [TestMonNodeState]), TestMonNodeState = - MonNodeState + case TestType of + nodedown -> []; + nodeup -> [{self(), []}] + end + ++ lists:map(fun (_) -> {MN, []} end, Seq) ++ case TestType of nodedown -> [{self(), []}]; nodeup -> [] end - ++ lists:map(fun (_) -> {MN, []} end, Seq) - ++ case TestType of - nodedown -> []; - nodeup -> [{self(), []}] - end, - + ++ MonNodeState, {ok, Node} = start_node(Name, "", this), receive {nodeup, Node} -> ok end, diff --git a/lib/kernel/test/erl_distribution_wb_SUITE.erl b/lib/kernel/test/erl_distribution_wb_SUITE.erl index 03aaee56b7..8256444bdc 100644 --- a/lib/kernel/test/erl_distribution_wb_SUITE.erl +++ b/lib/kernel/test/erl_distribution_wb_SUITE.erl @@ -61,10 +61,13 @@ %% From R9 and forward extended references is compulsory %% From R10 and forward extended pids and ports are compulsory %% From R20 and forward UTF8 atoms are compulsory +%% From R21 and forward NEW_FUN_TAGS is compulsory (no more tuple fallback {fun, ...}) -define(COMPULSORY_DFLAGS, (?DFLAG_EXTENDED_REFERENCES bor ?DFLAG_EXTENDED_PIDS_PORTS bor - ?DFLAG_UTF8_ATOMS)). + ?DFLAG_UTF8_ATOMS bor + ?DFLAG_NEW_FUN_TAGS)). +-define(PASS_THROUGH, $p). -define(shutdown(X), exit(X)). -define(int16(X), [((X) bsr 8) band 16#ff, (X) band 16#ff]). @@ -674,15 +677,16 @@ build_rex_message(Cookie,OurName) -> %% Receive a distribution message recv_message(Socket) -> case gen_tcp:recv(Socket, 0) of + {ok,[]} -> + recv_message(Socket); %% a tick, ignore {ok,Data} -> B0 = list_to_binary(Data), - {_,B1} = erlang:split_binary(B0,1), - Header = binary_to_term(B1), - Siz = byte_size(term_to_binary(Header)), - {_,B2} = erlang:split_binary(B1,Siz), + <<?PASS_THROUGH, B1/binary>> = B0, + {Header,Siz} = binary_to_term(B1,[used]), + <<_:Siz/binary,B2/binary>> = B1, Message = case (catch binary_to_term(B2)) of {'EXIT', _} -> - could_not_digest_message; + {could_not_digest_message,B2}; Other -> Other end, diff --git a/lib/kernel/test/erl_prim_loader_SUITE.erl b/lib/kernel/test/erl_prim_loader_SUITE.erl index b6417210b9..3502a4ad08 100644 --- a/lib/kernel/test/erl_prim_loader_SUITE.erl +++ b/lib/kernel/test/erl_prim_loader_SUITE.erl @@ -33,6 +33,7 @@ primary_archive/1, virtual_dir_in_archive/1, get_modules/1]). +-define(PRIM_FILE, prim_file). %%----------------------------------------------------------------- %% Test suite for erl_prim_loader. (Most code is run during system start/stop.) @@ -461,7 +462,7 @@ primary_archive(Config) when is_list(Config) -> %% Set primary archive ExpectedEbins = [Archive, DictDir ++ "/ebin", DummyDir ++ "/ebin"], io:format("ExpectedEbins: ~p\n", [ExpectedEbins]), - {ok, FileInfo} = prim_file:read_file_info(Archive), + {ok, FileInfo} = ?PRIM_FILE:read_file_info(Archive), {ok, Ebins} = rpc:call(Node, erl_prim_loader, set_primary_archive, [Archive, ArchiveBin, FileInfo, fun escript:parse_file/1]), diff --git a/lib/kernel/test/error_logger_SUITE.erl b/lib/kernel/test/error_logger_SUITE.erl index 2d26a7246c..6c4526d0cf 100644 --- a/lib/kernel/test/error_logger_SUITE.erl +++ b/lib/kernel/test/error_logger_SUITE.erl @@ -32,7 +32,8 @@ init_per_group/2,end_per_group/2, off_heap/1, error_report/1, info_report/1, error/1, info/1, - emulator/1, tty/1, logfile/1, add/1, delete/1]). + emulator/1, via_logger_process/1, other_node/1, + tty/1, logfile/1, add/1, delete/1]). -export([generate_error/2]). @@ -46,16 +47,19 @@ suite() -> {timetrap,{minutes,1}}]. all() -> - [off_heap, error_report, info_report, error, info, emulator, tty, - logfile, add, delete]. + [off_heap, error_report, info_report, error, info, emulator, + via_logger_process, other_node, tty, logfile, add, delete]. groups() -> []. init_per_suite(Config) -> + logger:add_handler(error_logger,error_logger, + #{level=>info,filter_default=>log}), Config. end_per_suite(_Config) -> + logger:remove_handler(error_logger), ok. init_per_group(_GroupName, Config) -> @@ -226,6 +230,40 @@ generate_error(Error, Stack) -> erlang:raise(error, Error, Stack). %%----------------------------------------------------------------- + +via_logger_process(Config) -> + case os:type() of + {win32,_} -> + {skip,"Skip on windows - cant change file mode"}; + _ -> + error_logger:add_report_handler(?MODULE, self()), + Dir = filename:join(?config(priv_dir,Config),"dummydir"), + Msg = "File operation error: eacces. Target: " ++ + Dir ++ ". Function: list_dir. ", + ok = file:make_dir(Dir), + ok = file:change_mode(Dir,8#0222), + error = erl_prim_loader:list_dir(Dir), + ok = file:change_mode(Dir,8#0664), + _ = file:del_dir(Dir), + reported(error_report, std_error, Msg), + my_yes = error_logger:delete_report_handler(?MODULE), + ok + end. + +%%----------------------------------------------------------------- + +other_node(_Config) -> + error_logger:add_report_handler(?MODULE, self()), + {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]), + ok = rpc:call(Node,logger,add_handler,[error_logger,error_logger, + #{level=>info,filter_default=>log}]), + rpc:call(Node,error_logger,error_report,[hi_from_remote]), + reported(error_report,std_error,hi_from_remote), + test_server:stop_node(Node), + ok. + + +%%----------------------------------------------------------------- %% We don't enables or disables tty error logging here. We do not %% want to interact with the test run. %%----------------------------------------------------------------- @@ -279,7 +317,7 @@ reported(Tag, Type, Report) -> test_server:messages_get(), ok after 1000 -> - ct:fail(no_report_received) + ct:fail({no_report_received,test_server:messages_get()}) end. %%----------------------------------------------------------------- diff --git a/lib/kernel/test/file_SUITE.erl b/lib/kernel/test/file_SUITE.erl index 119e1f24bb..ff93f25e25 100644 --- a/lib/kernel/test/file_SUITE.erl +++ b/lib/kernel/test/file_SUITE.erl @@ -39,6 +39,8 @@ -define(FILE_FIN_PER_TESTCASE(Config), Config). -endif. +-define(PRIM_FILE, prim_file). + -module(?FILE_SUITE). -export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, @@ -54,7 +56,8 @@ open1/1, old_modes/1, new_modes/1, path_open/1, open_errors/1]). -export([ file_info_basic_file/1, file_info_basic_directory/1, - file_info_bad/1, file_info_times/1, file_write_file_info/1]). + file_info_bad/1, file_info_times/1, file_write_file_info/1, + file_wfi_helpers/1]). -export([rename/1, access/1, truncate/1, datasync/1, sync/1, read_write/1, pread_write/1, append/1, exclusive/1]). -export([ e_delete/1, e_rename/1, e_make_dir/1, e_del_dir/1]). @@ -97,6 +100,12 @@ -export([unicode_mode/1]). +-export([volume_relative_paths/1]). + +-export([tiny_writes/1, tiny_writes_delayed/1, + large_writes/1, large_writes_delayed/1, + tiny_reads/1, tiny_reads_ahead/1]). + %% Debug exports -export([create_file_slow/2, create_file/2, create_bin/2]). -export([verify_file/2, verify_bin/3]). @@ -107,6 +116,8 @@ -export([disc_free/1, memsize/0]). -include_lib("common_test/include/ct.hrl"). +-include_lib("common_test/include/ct_event.hrl"). + -include_lib("kernel/include/file.hrl"). -define(THROW_ERROR(RES), throw({fail, ?LINE, RES})). @@ -118,13 +129,13 @@ suite() -> all() -> [unicode, altname, read_write_file, {group, dirs}, - {group, files}, delete, rename, names, {group, errors}, - {group, compression}, {group, links}, copy, + {group, files}, delete, rename, names, volume_relative_paths, + {group, errors}, {group, compression}, {group, links}, copy, delayed_write, read_ahead, segment_read, segment_write, ipread, pid2name, interleaved_read_write, otp_5814, otp_10852, large_file, large_write, read_line_1, read_line_2, read_line_3, read_line_4, standard_io, old_io_protocol, - unicode_mode + unicode_mode, {group, bench} ]. groups() -> @@ -142,7 +153,8 @@ groups() -> {pos, [], [pos1, pos2, pos3]}, {file_info, [], [file_info_basic_file, file_info_basic_directory, - file_info_bad, file_info_times, file_write_file_info]}, + file_info_bad, file_info_times, file_write_file_info, + file_wfi_helpers]}, {consult, [], [consult1, path_consult]}, {eval, [], [eval1, path_eval]}, {script, [], [script1, path_script]}, @@ -154,11 +166,19 @@ groups() -> write_compressed, compress_errors, catenated_gzips, compress_async_crash]}, {links, [], - [make_link, read_link_info_for_non_link, symlinks]}]. + [make_link, read_link_info_for_non_link, symlinks]}, + {bench, [], + [tiny_writes, tiny_writes_delayed, + large_writes, large_writes_delayed, + tiny_reads, tiny_reads_ahead]}]. init_per_group(_GroupName, Config) -> Config. +end_per_group(bench, Config) -> + ScratchDir = proplists:get_value(priv_dir, Config), + file:delete(filename:join(ScratchDir, "benchmark_scratch_file")), + Config; end_per_group(_GroupName, Config) -> Config. @@ -381,11 +401,11 @@ read_write_0(Str, {Func, ReadFun}, Options) -> io:format("~p:~p: ~p ERROR: ~ts vs~n ~w~n - ~p~n", [?MODULE, Line, Func, Str, ReadBytes, Options]), exit({error, ?LINE}); - error:What -> + error:What:Stacktrace -> io:format("~p:??: ~p ERROR: ~p from~n ~w~n ~p~n", [?MODULE, Func, What, Str, Options]), - io:format("\t~p~n", [erlang:get_stacktrace()]), + io:format("\t~p~n", [Stacktrace]), exit({error, ?LINE}) end. @@ -473,7 +493,7 @@ um_check_unicode(_Utf8Bin, {ok, _ListOrBin}, _, _UTF8_) -> um_filename(Bin, Dir, Options) when is_binary(Bin) -> um_filename(binary_to_list(Bin), Dir, Options); um_filename(Str = [_|_], Dir, Options) -> - Name = hd(string:tokens(Str, ":")), + Name = hd(string:lexemes(Str, ":")), Enc = atom_to_list(proplists:get_value(encoding, Options, latin1)), File = case lists:member(binary, Options) of true -> @@ -638,6 +658,10 @@ cur_dir_0(Config) when is_list(Config) -> {ok,NewDirFiles} = ?FILE_MODULE:list_dir("."), true = lists:member(UncommonName,NewDirFiles), + %% Ensure that we get the same result with a trailing slash; the + %% APIs used on Windows will choke on them if passed directly. + {ok,NewDirFiles} = ?FILE_MODULE:list_dir("./"), + %% Delete the directory and return to the old current directory %% and check that the created file isn't there (too!) expect({error, einval}, {error, eacces}, @@ -690,10 +714,15 @@ win_cur_dir_1(_Config) -> %% Get the drive letter from the current directory, %% and try to get current directory for that drive. - [Drive,$:|_] = BaseDir, - {ok,BaseDir} = ?FILE_MODULE:get_cwd([Drive,$:]), + [CurDrive,$:|_] = BaseDir, + {ok,BaseDir} = ?FILE_MODULE:get_cwd([CurDrive,$:]), io:format("BaseDir = ~s\n", [BaseDir]), + %% We should error out on non-existent drives. Any reasonable system will + %% have at least one. + CurDirs = [?FILE_MODULE:get_cwd([Drive,$:]) || Drive <- lists:seq($A, $Z)], + lists:member({error,eaccess}, CurDirs), + %% Unfortunately, there is no way to move away from the %% current drive as we can't use the "subst" command from %% a SSH connection. We can't test any more. @@ -831,7 +860,7 @@ no_untranslatable_names() -> end. start_node(Name, Args) -> - [_,Host] = string:tokens(atom_to_list(node()), "@"), + [_,Host] = string:lexemes(atom_to_list(node()), "@"), ct:log("Trying to start ~w@~s~n", [Name,Host]), case test_server:start_node(Name, peer, [{args,Args}]) of {error,Reason} -> @@ -1019,6 +1048,23 @@ close(Config) when is_list(Config) -> Val = ?FILE_MODULE:close(Fd1), io:format("Second close gave: ~p",[Val]), + %% All operations on a closed raw file should EINVAL, even if they're not + %% supported on the current platform. + {ok,Fd2} = ?FILE_MODULE:open(Name, [read, write, raw]), + ok = ?FILE_MODULE:close(Fd2), + + {error, einval} = ?FILE_MODULE:advise(Fd2, 5, 5, normal), + {error, einval} = ?FILE_MODULE:allocate(Fd2, 5, 5), + {error, einval} = ?FILE_MODULE:close(Fd2), + {error, einval} = ?FILE_MODULE:datasync(Fd2), + {error, einval} = ?FILE_MODULE:position(Fd2, 5), + {error, einval} = ?FILE_MODULE:pread(Fd2, 5, 1), + {error, einval} = ?FILE_MODULE:pwrite(Fd2, 5, "einval please"), + {error, einval} = ?FILE_MODULE:read(Fd2, 1), + {error, einval} = ?FILE_MODULE:sync(Fd2), + {error, einval} = ?FILE_MODULE:truncate(Fd2), + {error, einval} = ?FILE_MODULE:write(Fd2, "einval please"), + [] = flush(), ok. @@ -1132,8 +1178,8 @@ pread_write_test(File, Data) -> end, I = Size + 17, ok = ?FILE_MODULE:pwrite(File, 0, Data), - Res = ?FILE_MODULE:pread(File, 0, I), - {ok, Data} = Res, + {ok, Data} = ?FILE_MODULE:pread(File, 0, I), + {ok, [Data]} = ?FILE_MODULE:pread(File, [{0, I}]), eof = ?FILE_MODULE:pread(File, I, 1), ok = ?FILE_MODULE:pwrite(File, [{0, Data}, {I, Data}]), {ok, [Data, eof, Data]} = @@ -1564,6 +1610,39 @@ file_write_file_info(Config) when is_list(Config) -> [] = flush(), ok. +file_wfi_helpers(Config) when is_list(Config) -> + RootDir = get_good_directory(Config), + io:format("RootDir = ~p", [RootDir]), + + Name = filename:join(RootDir, + atom_to_list(?MODULE) ++ "_wfi_helpers"), + + ok = ?FILE_MODULE:write_file(Name, "hello again"), + NewTime = {{1997, 02, 15}, {13, 18, 20}}, + ok = ?FILE_MODULE:change_time(Name, NewTime, NewTime), + + {ok, #file_info{atime=NewActAtime, mtime=NewTime}} = + ?FILE_MODULE:read_file_info(Name), + + NewFilteredAtime = filter_atime(NewTime, Config), + NewFilteredAtime = filter_atime(NewActAtime, Config), + + %% Make the file unwritable + ok = ?FILE_MODULE:change_mode(Name, 8#400), + {error, eacces} = ?FILE_MODULE:write_file(Name, "hello again"), + + %% ... and writable again + ok = ?FILE_MODULE:change_mode(Name, 8#600), + ok = ?FILE_MODULE:write_file(Name, "hello again"), + + %% We have no idea which users will work, so all we can do is to check + %% that it returns enoent instead of crashing. + {error, enoent} = ?FILE_MODULE:change_group("bogus file name", 0), + {error, enoent} = ?FILE_MODULE:change_owner("bogus file name", 0), + + [] = flush(), + ok. + %% Returns a directory on a file system that has correct file times. get_good_directory(Config) -> @@ -2044,13 +2123,22 @@ names(Config) when is_list(Config) -> ok = ?FILE_MODULE:close(Fd2), {ok,Fd3} = ?FILE_MODULE:open(Name3,read), ok = ?FILE_MODULE:close(Fd3), + + %% Now try the same on raw files. + {ok,Fd4} = ?FILE_MODULE:open(Name2, [read, raw]), + ok = ?FILE_MODULE:close(Fd4), + {ok,Fd4f} = ?FILE_MODULE:open(lists:flatten(Name2), [read, raw]), + ok = ?FILE_MODULE:close(Fd4f), + {ok,Fd5} = ?FILE_MODULE:open(Name3, [read, raw]), + ok = ?FILE_MODULE:close(Fd5), + case length(Name1) > 255 of true -> io:format("Path too long for an atom:\n\n~p\n", [Name1]); false -> Name4 = list_to_atom(Name1), - {ok,Fd4} = ?FILE_MODULE:open(Name4,read), - ok = ?FILE_MODULE:close(Fd4) + {ok,Fd6} = ?FILE_MODULE:open(Name4,read), + ok = ?FILE_MODULE:close(Fd6) end, %% Try some path names @@ -2074,6 +2162,22 @@ names(Config) when is_list(Config) -> [] = flush(), ok. +volume_relative_paths(Config) when is_list(Config) -> + case os:type() of + {win32, _} -> + {ok, [Drive, $: | _]} = file:get_cwd(), + %% Relative to current device root. + {ok, RootInfo} = file:read_file_info([Drive, $:, $/]), + {ok, RootInfo} = file:read_file_info("/"), + %% Relative to current device directory. + {ok, DirContents} = file:list_dir([Drive, $:]), + {ok, DirContents} = file:list_dir("."), + [] = flush(), + ok; + _ -> + {skip, "This test is Windows-specific."} + end. + %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -2108,7 +2212,7 @@ e_delete(Config) when is_list(Config) -> Base, #file_info {mode=0}), {error, eacces} = ?FILE_MODULE:delete(Afile), ?FILE_MODULE:write_file_info( - Base, #file_info {mode=8#600}) + Base, #file_info {mode=8#700}) end, [] = flush(), @@ -2239,7 +2343,7 @@ e_make_dir(Config) when is_list(Config) -> ?FILE_MODULE:write_file_info(Base, #file_info {mode=0}), {error, eacces} = ?FILE_MODULE:make_dir(filename:join(Base, "xxxx")), ?FILE_MODULE:write_file_info( - Base, #file_info {mode=8#600}) + Base, #file_info {mode=8#700}) end, ok. @@ -2285,7 +2389,7 @@ e_del_dir(Config) when is_list(Config) -> ok = ?FILE_MODULE:make_dir(ADirectory), ?FILE_MODULE:write_file_info( Base, #file_info {mode=0}), {error, eacces} = ?FILE_MODULE:del_dir(ADirectory), - ?FILE_MODULE:write_file_info( Base, #file_info {mode=8#600}) + ?FILE_MODULE:write_file_info( Base, #file_info {mode=8#700}) end, [] = flush(), ok. @@ -2641,8 +2745,8 @@ altname(Config) when is_list(Config) -> {skipped, "Altname not supported on this platform"}; {ok, "LONGAL~1"} -> {ok, "A_FILE~1"} = ?FILE_MODULE:altname(Name), - {ok, "C:/"} = ?FILE_MODULE:altname("C:/"), - {ok, "C:\\"} = ?FILE_MODULE:altname("C:\\"), + {ok, "c:/"} = ?FILE_MODULE:altname("C:/"), + {ok, "c:/"} = ?FILE_MODULE:altname("C:\\"), {error,enoent} = ?FILE_MODULE:altname(NonexName), {ok, "short"} = ?FILE_MODULE:altname(ShortName), ok @@ -2923,20 +3027,22 @@ delayed_write(Config) when is_list(Config) -> %% %% Test caching and normal close of non-raw file {ok, Fd1} = - ?FILE_MODULE:open(File, [write, {delayed_write, Size+1, 2000}]), + ?FILE_MODULE:open(File, [write, {delayed_write, Size+1, 400}]), ok = ?FILE_MODULE:write(Fd1, Data1), - timer:sleep(1000), % Just in case the file system is slow + %% Wait for a reasonable amount of time to check whether the write was + %% practically instantaneous or actually delayed. + timer:sleep(100), {ok, Fd2} = ?FILE_MODULE:open(File, [read]), eof = ?FILE_MODULE:read(Fd2, 1), ok = ?FILE_MODULE:write(Fd1, Data1), % Data flush on size - timer:sleep(1000), % Just in case the file system is slow + timer:sleep(100), {ok, Data1Data1} = ?FILE_MODULE:pread(Fd2, bof, 2*Size+1), ok = ?FILE_MODULE:write(Fd1, Data1), - timer:sleep(3000), % Wait until data flush on timeout + timer:sleep(500), % Wait until data flush on timeout {ok, Data1Data1Data1} = ?FILE_MODULE:pread(Fd2, bof, 3*Size+1), ok = ?FILE_MODULE:write(Fd1, Data1), ok = ?FILE_MODULE:close(Fd1), % Data flush on close - timer:sleep(1000), % Just in case the file system is slow + timer:sleep(100), {ok, Data1Data1Data1Data1} = ?FILE_MODULE:pread(Fd2, bof, 4*Size+1), ok = ?FILE_MODULE:close(Fd2), %% @@ -2970,7 +3076,7 @@ delayed_write(Config) when is_list(Config) -> {'DOWN', Mref1, _, _, _} = Down1a -> ct:fail(Down1a) end, - timer:sleep(1000), % Just in case the file system is slow + timer:sleep(100), % Just in case the file system is slow {ok, Fd3} = ?FILE_MODULE:open(File, [read]), eof = ?FILE_MODULE:read(Fd3, 1), Child1 ! {Parent, continue, normal}, @@ -2980,7 +3086,7 @@ delayed_write(Config) when is_list(Config) -> {'DOWN', Mref1, _, _, _} = Down1b -> ct:fail(Down1b) end, - timer:sleep(1000), % Just in case the file system is slow + timer:sleep(100), % Just in case the file system is slow {ok, Data1} = ?FILE_MODULE:pread(Fd3, bof, Size+1), ok = ?FILE_MODULE:close(Fd3), %% @@ -2993,7 +3099,7 @@ delayed_write(Config) when is_list(Config) -> {'DOWN', Mref2, _, _, _} = Down2a -> ct:fail(Down2a) end, - timer:sleep(1000), % Just in case the file system is slow + timer:sleep(100), % Just in case the file system is slow {ok, Fd4} = ?FILE_MODULE:open(File, [read]), eof = ?FILE_MODULE:read(Fd4, 1), Child2 ! {Parent, continue, kill}, @@ -3003,7 +3109,7 @@ delayed_write(Config) when is_list(Config) -> {'DOWN', Mref2, _, _, _} = Down2b -> ct:fail(Down2b) end, - timer:sleep(1000), % Just in case the file system is slow + timer:sleep(100), % Just in case the file system is slow eof = ?FILE_MODULE:pread(Fd4, bof, 1), ok = ?FILE_MODULE:close(Fd4), %% @@ -3095,6 +3201,16 @@ read_ahead(Config) when is_list(Config) -> Data1Data2Data3 = Data1++Data2++Data3, {ok, Data1Data2Data3} = ?FILE_MODULE:read(Fd5, 3*Size+1), ok = ?FILE_MODULE:close(Fd5), + + %% Ensure that a read that draws from both the buffer and the file won't + %% return anything wonky. + SplitData = << <<(I rem 256)>> || I <- lists:seq(1, 1024) >>, + file:write_file(File, SplitData), + {ok, Fd6} = ?FILE_MODULE:open(File, [raw, read, binary, {read_ahead, 256}]), + {ok, <<1>>} = file:read(Fd6, 1), + <<1, Shifted:512/binary, _Rest/binary>> = SplitData, + {ok, Shifted} = file:read(Fd6, 512), + %% [] = flush(), ok. @@ -3699,6 +3815,83 @@ do_large_write(Name) -> %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%% Benchmarks +%% +%% Note that we only measure the time it takes to run the isolated file +%% operations and that the actual test runtime can differ significantly, +%% especially on the write side as the files need to be truncated before +%% writing. + +large_writes(Config) when is_list(Config) -> + Modes = [raw, binary], + OpCount = 4096, + Data = <<0:(64 bsl 10)/unit:8>>, + run_write_benchmark(Config, Modes, OpCount, Data). + +large_writes_delayed(Config) when is_list(Config) -> + %% Each write is exactly as large as the delay buffer, causing the writes + %% to pass through each time, giving us a decent idea of how much overhead + %% delayed_write adds. + Modes = [raw, binary, {delayed_write, 64 bsl 10, 2000}], + OpCount = 4096, + Data = <<0:(64 bsl 10)/unit:8>>, + run_write_benchmark(Config, Modes, OpCount, Data). + +tiny_writes(Config) when is_list(Config) -> + Modes = [raw, binary], + OpCount = 512 bsl 10, + Data = <<0>>, + run_write_benchmark(Config, Modes, OpCount, Data). + +tiny_writes_delayed(Config) when is_list(Config) -> + Modes = [raw, binary, {delayed_write, 512 bsl 10, 2000}], + OpCount = 512 bsl 10, + Data = <<0>>, + run_write_benchmark(Config, Modes, OpCount, Data). + +%% The read benchmarks assume that "benchmark_scratch_file" has been filled by +%% the write benchmarks. + +tiny_reads(Config) when is_list(Config) -> + Modes = [raw, binary], + OpCount = 512 bsl 10, + run_read_benchmark(Config, Modes, OpCount, 1). + +tiny_reads_ahead(Config) when is_list(Config) -> + Modes = [raw, binary, {read_ahead, 512 bsl 10}], + OpCount = 512 bsl 10, + run_read_benchmark(Config, Modes, OpCount, 1). + +run_write_benchmark(Config, Modes, OpCount, Data) -> + run_benchmark(Config, [write | Modes], OpCount, fun file:write/2, Data). + +run_read_benchmark(Config, Modes, OpCount, OpSize) -> + run_benchmark(Config, [read | Modes], OpCount, fun file:read/2, OpSize). + +run_benchmark(Config, Modes, OpCount, Fun, Arg) -> + ScratchDir = proplists:get_value(priv_dir, Config), + Path = filename:join(ScratchDir, "benchmark_scratch_file"), + {ok, Fd} = file:open(Path, Modes), + submit_throughput_results(Fun, [Fd, Arg], OpCount). + +submit_throughput_results(Fun, Args, Times) -> + MSecs = measure_repeated_file_op(Fun, Args, Times, millisecond), + IOPS = trunc(Times * (1000 / MSecs)), + ct_event:notify(#event{ name = benchmark_data, data = [{value,IOPS}] }), + {comment, io_lib:format("~p IOPS, ~p ms", [IOPS, trunc(MSecs)])}. + +measure_repeated_file_op(Fun, Args, Times, Unit) -> + Start = os:perf_counter(Unit), + repeated_apply(Fun, Args, Times), + os:perf_counter(Unit) - Start. + +repeated_apply(_F, _Args, Times) when Times =< 0 -> + ok; +repeated_apply(F, Args, Times) -> + erlang:apply(F, Args), + repeated_apply(F, Args, Times - 1). + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% response_analysis(Module, Function, Arguments) -> @@ -3934,7 +4127,7 @@ read_line_create_files(TestData) -> read_line_remove_files(TestData) -> [ file:delete(File) || {_Function,File,_,_} <- TestData ]. -%% read_line with prim_file. +%% read_line with ?PRIM_FILE. read_line_1(Config) when is_list(Config) -> PrivDir = proplists:get_value(priv_dir, Config), All = read_line_testdata(PrivDir), @@ -4103,9 +4296,9 @@ read_line_create7(Filename) -> file:close(F). read_line_all(Filename) -> - {ok,F} = prim_file:open(Filename,[read,binary]), + {ok,F} = ?PRIM_FILE:open(Filename,[read,binary]), X=read_rl_lines(F), - prim_file:close(F), + ?PRIM_FILE:close(F), Bin = list_to_binary([B || {ok,B} <- X]), Bin = re:replace(list_to_binary([element(2,file:read_file(Filename))]), "\r\n","\n",[global,{return,binary}]), @@ -4138,7 +4331,7 @@ read_line_all4(Filename) -> {length(X),Bin}. read_rl_lines(F) -> - case prim_file:read_line(F) of + case ?PRIM_FILE:read_line(F) of eof -> []; {error,X} -> @@ -4158,9 +4351,9 @@ read_rl_lines2(F) -> end. read_line_all_alternating(Filename) -> - {ok,F} = prim_file:open(Filename,[read,binary]), + {ok,F} = ?PRIM_FILE:open(Filename,[read,binary]), X=read_rl_lines(F,true), - prim_file:close(F), + ?PRIM_FILE:close(F), Bin = list_to_binary([B || {ok,B} <- X]), Bin = re:replace(list_to_binary([element(2,file:read_file(Filename))]), "\r\n","\n",[global,{return,binary}]), @@ -4194,8 +4387,8 @@ read_line_all_alternating4(Filename) -> read_rl_lines(F,Alternate) -> case begin case Alternate of - true -> prim_file:read(F,1); - false -> prim_file:read_line(F) + true -> ?PRIM_FILE:read(F,1); + false -> ?PRIM_FILE:read_line(F) end end of eof -> diff --git a/lib/kernel/test/file_name_SUITE.erl b/lib/kernel/test/file_name_SUITE.erl index 899102c908..3afc647081 100644 --- a/lib/kernel/test/file_name_SUITE.erl +++ b/lib/kernel/test/file_name_SUITE.erl @@ -77,6 +77,7 @@ init_per_testcase/2, end_per_testcase/2]). -export([normal/1,icky/1,very_icky/1,normalize/1,home_dir/1]). +-define(PRIM_FILE, prim_file). init_per_testcase(_Func, Config) -> Config. @@ -131,7 +132,7 @@ home_dir(Config) when is_list(Config) -> os:putenv("HOME",NewHome), {"HOME",Save}; _ -> - rm_rf(prim_file,NewHome), + rm_rf(?PRIM_FILE,NewHome), throw(unsupported_os) end, try @@ -145,7 +146,7 @@ home_dir(Config) when is_list(Config) -> _ -> os:putenv(SaveOldName,SaveOldValue) end, - rm_rf(prim_file,NewHome) + rm_rf(?PRIM_FILE,NewHome) end catch throw:need_unicode_mode -> @@ -190,7 +191,7 @@ normal(Config) when is_list(Config) -> try Priv = proplists:get_value(priv_dir, Config), file:set_cwd(Priv), - ok = check_normal(prim_file), + ok = check_normal(?PRIM_FILE), ok = check_normal(file), %% If all is good, delete dir again (avoid hanging dir on windows) rm_rf(file,"normal_dir"), @@ -210,7 +211,7 @@ icky(Config) when is_list(Config) -> try Priv = proplists:get_value(priv_dir, Config), file:set_cwd(Priv), - ok = check_icky(prim_file), + ok = check_icky(?PRIM_FILE), ok = check_icky(file), %% If all is good, delete dir again (avoid hanging dir on windows) rm_rf(file,"icky_dir"), @@ -229,7 +230,7 @@ very_icky(Config) when is_list(Config) -> try Priv = proplists:get_value(priv_dir, Config), file:set_cwd(Priv), - case check_very_icky(prim_file) of + case check_very_icky(?PRIM_FILE) of need_unicode_mode -> {skipped,"VM needs to be started in Unicode filename mode"}; ok -> @@ -292,17 +293,14 @@ check_normal(Mod) -> ok end, [ begin - {ok, FD} = Mod:open(Name,[read]), - {ok, Content} = Mod:read(FD,1024), - ok = file:close(FD) - end || {regular,Name,Content} <- NormalDir ], - [ begin {ok, FD} = Mod:open(Name,[read,binary]), BC = list_to_binary(Content), {ok, BC} = Mod:read(FD,1024), ok = file:close(FD) end || {regular,Name,Content} <- NormalDir ], + {error, badarg} = Mod:rename("fil1\0tmp_fil2","tmp_fil1"), Mod:rename("fil1","tmp_fil1"), + {error, badarg} = Mod:read_file("tmp_fil1\0.txt"), {ok, <<"fil1">>} = Mod:read_file("tmp_fil1"), {error,enoent} = Mod:read_file("fil1"), Mod:rename("tmp_fil1","fil1"), @@ -410,11 +408,6 @@ check_icky(Mod) -> ok end, [ begin - {ok, FD} = Mod:open(Name,[read]), - {ok, Content} = Mod:read(FD,1024), - ok = file:close(FD) - end || {regular,Name,Content} <- IckyDir ], - [ begin {ok, FD} = Mod:open(Name,[read,binary]), BC = list_to_binary([Content]), {ok, BC} = Mod:read(FD,1024), @@ -519,11 +512,6 @@ check_very_icky(Mod) -> ok end, [ begin - {ok, FD} = Mod:open(Name,[read]), - {ok, Content} = Mod:read(FD,1024), - ok = file:close(FD) - end || {regular,Name,Content} <- VeryIckyDir ], - [ begin {ok, FD} = Mod:open(Name,[read,binary]), BC = list_to_binary([Content]), {ok, BC} = Mod:read(FD,1024), diff --git a/lib/kernel/test/gen_sctp_SUITE.erl b/lib/kernel/test/gen_sctp_SUITE.erl index 620ab235a0..9dde00652c 100644 --- a/lib/kernel/test/gen_sctp_SUITE.erl +++ b/lib/kernel/test/gen_sctp_SUITE.erl @@ -1038,8 +1038,7 @@ do_from_other_process(Fun) -> Result -> Parent ! {Ref,Result} catch - Class:Reason -> - Stacktrace = erlang:get_stacktrace(), + Class:Reason:Stacktrace -> Parent ! {Ref,Class,Reason,Stacktrace} end end), @@ -1617,8 +1616,7 @@ s_start(Socket, Timeout, Parent) -> try s_loop(Socket, Timeout, Parent, Handler, gb_trees:empty()) catch - Class:Reason -> - Stacktrace = erlang:get_stacktrace(), + Class:Reason:Stacktrace -> io:format(?MODULE_STRING":socket exception ~w:~w at~n" "~p.~n", [Class,Reason,Stacktrace]), erlang:raise(Class, Reason, Stacktrace) diff --git a/lib/kernel/test/gen_tcp_api_SUITE.erl b/lib/kernel/test/gen_tcp_api_SUITE.erl index 12d22519ce..0fe44e8a88 100644 --- a/lib/kernel/test/gen_tcp_api_SUITE.erl +++ b/lib/kernel/test/gen_tcp_api_SUITE.erl @@ -605,9 +605,9 @@ ok({ok,V}) -> V; ok(NotOk) -> try throw(not_ok) catch - Thrown -> + throw:Thrown:Stacktrace -> erlang:raise( - error, {Thrown, NotOk}, tl(erlang:get_stacktrace())) + error, {Thrown, NotOk}, tl(Stacktrace)) end. get_localaddr() -> diff --git a/lib/kernel/test/gen_tcp_misc_SUITE.erl b/lib/kernel/test/gen_tcp_misc_SUITE.erl index 331864b5de..e47023d201 100644 --- a/lib/kernel/test/gen_tcp_misc_SUITE.erl +++ b/lib/kernel/test/gen_tcp_misc_SUITE.erl @@ -1572,52 +1572,56 @@ fill_sendq(Config) when is_list(Config) -> Master = self(), Server = spawn_link(fun () -> - {ok,L} = gen_tcp:listen - (0, [{active,false},binary, - {reuseaddr,true},{packet,0}]), + {ok,L} = gen_tcp:listen(0, [{active,false},binary, + {reuseaddr,true},{packet,0}]), {ok,Port} = inet:port(L), Master ! {self(),client, fill_sendq_client(Port, Master)}, fill_sendq_srv(L, Master) end), io:format("~p Server~n", [Server]), - receive {Server,client,Client} -> - io:format("~p Client~n", [Client]), - receive {Server,reader,Reader} -> - io:format("~p Reader~n", [Reader]), - fill_sendq_loop(Server, Client, Reader) + receive + {Server,client,Client} -> + io:format("~p Client~n", [Client]), + receive + {Server,reader,Reader} -> + io:format("~p Reader~n", [Reader]), + fill_sendq_loop(Server, Client, Reader) end end. fill_sendq_loop(Server, Client, Reader) -> %% Master %% - receive {Server,send} -> + receive + {Server,send} -> fill_sendq_loop(Server, Client, Reader) after 2000 -> %% Send queue full, sender blocked -> close client. io:format("Send timeout, closing Client...~n", []), Client ! {self(),close}, - receive {Server,[{error,closed}]} -> - io:format("Got server closed.~n"), - receive {Reader,[{error,closed}]} -> - io:format - ("Got reader closed.~n"), - ok - after 3000 -> - ct:fail({timeout,{closed,reader}}) - end; - {Reader,[{error,closed}]} -> - io:format("Got reader closed.~n"), - receive {Server,[{error,closed}]} -> - io:format("Got server closed~n"), - ok - after 3000 -> - ct:fail({timeout,{closed,server}}) - end - after 3000 -> - ct:fail({timeout,{closed,[server,reader]}}) - end + receive + {Server,[{error,closed}]} -> + io:format("Got server closed.~n"), + receive + {Reader,[{error,closed}]} -> + io:format("Got reader closed.~n"), + ok + after 3000 -> + ct:fail({timeout,{closed,reader}}) + end; + {Reader,[{error,closed}]} -> + io:format("Got reader closed.~n"), + receive + {Server,[{error,closed}]} -> + io:format("Got server closed~n"), + ok + after 3000 -> + ct:fail({timeout,{closed,server}}) + end + after 3000 -> + ct:fail({timeout,{closed,[server,reader]}}) + end end. fill_sendq_srv(L, Master) -> diff --git a/lib/kernel/test/gen_udp_SUITE.erl b/lib/kernel/test/gen_udp_SUITE.erl index aa616d43d6..6a50239c2a 100644 --- a/lib/kernel/test/gen_udp_SUITE.erl +++ b/lib/kernel/test/gen_udp_SUITE.erl @@ -288,58 +288,56 @@ bad_address(Config) when is_list(Config) -> %% %% Starts a slave node that on command sends a bunch of messages %% to our UDP port. The receiving process just receives and -%% ignores the incoming messages, but counts them. -%% A tracing process traces the receiving process for -%% 'receive' and scheduling events. From the trace, -%% message contents is verified; and, how many messages -%% are received per in/out scheduling, which should be -%% the same as the read_packets parameter. -%% -%% What happens on the SMP emulator remains to be seen... -%% +%% ignores the incoming messages. +%% A tracing process traces the receiving port for +%% 'send' and scheduling events. From the trace, +%% how many messages are received per in/out scheduling, +%% which should never be more than the read_packet parameter. %% OTP-6249 UDP option for number of packet reads. read_packets(Config) when is_list(Config) -> - case erlang:system_info(smp_support) of - false -> - read_packets_1(); - true -> - %% We would need some new sort of tracing to test this - %% option reliably in an SMP emulator. - {skip,"SMP emulator"} - end. - -read_packets_1() -> N1 = 5, - N2 = 7, + N2 = 1, + Msgs = 30000, {ok,R} = gen_udp:open(0, [{read_packets,N1}]), {ok,RP} = inet:port(R), {ok,Node} = start_node(gen_udp_SUITE_read_packets), Die = make_ref(), - Loop = erlang:spawn_link(fun () -> infinite_loop(Die) end), %% - Msgs1 = [erlang:integer_to_list(M) || M <- lists:seq(1, N1*3)], - [V1|_] = read_packets_test(R, RP, Msgs1, Node), + {V1, Trace1} = read_packets_test(R, RP, Msgs, Node), {ok,[{read_packets,N1}]} = inet:getopts(R, [read_packets]), %% ok = inet:setopts(R, [{read_packets,N2}]), - Msgs2 = [erlang:integer_to_list(M) || M <- lists:seq(1, N2*3)], - [V2|_] = read_packets_test(R, RP, Msgs2, Node), + {V2, Trace2} = read_packets_test(R, RP, Msgs, Node), {ok,[{read_packets,N2}]} = inet:getopts(R, [read_packets]), %% stop_node(Node), - Mref = erlang:monitor(process, Loop), - Loop ! Die, - receive - {'DOWN',Mref,_,_, normal} -> - case {V1,V2} of - {N1,N2} -> - ok; - _ when V1 =/= N1, V2 =/= N2 -> - ok - end + ct:log("N1=~p, V1=~p vs N2=~p, V2=~p",[N1,V1,N2,V2]), + + dump_terms(Config, "trace1.terms", Trace2), + dump_terms(Config, "trace2.terms", Trace2), + + %% Because of the inherit racy-ness of the feature it is + %% hard to test that it behaves correctly. + %% Right now (OTP 21) a port task takes 5% of the + %% allotted port task reductions to execute, so + %% the max number of executions a port is allowed to + %% do before being re-scheduled is N * 20 + + if + V1 > (N1 * 20) -> + ct:fail("Got ~p msgs, max was ~p", [V1, N1]); + V2 > (N2 * 20) -> + ct:fail("Got ~p msgs, max was ~p", [V2, N2]); + true -> + ok end. +dump_terms(Config, Name, Terms) -> + FName = filename:join(proplists:get_value(priv_dir, Config),Name), + file:write_file(FName, term_to_binary(Terms)), + ct:log("Logged terms to ~s",[FName]). + infinite_loop(Die) -> receive Die -> @@ -350,7 +348,6 @@ infinite_loop(Die) -> end. read_packets_test(R, RP, Msgs, Node) -> - Len = length(Msgs), Receiver = self(), Tracer = spawn_link( @@ -375,24 +372,24 @@ read_packets_test(R, RP, Msgs, Node) -> [link,{priority,high}]), receive {Sender,{port,SP}} -> - erlang:trace(self(), true, - [running,'receive',{tracer,Tracer}]), + erlang:trace(R, true, + [running_ports,'send',{tracer,Tracer}]), erlang:yield(), Sender ! {Receiver,go}, - read_packets_recv(Len), - erlang:trace(self(), false, [all]), + read_packets_recv(Msgs), + erlang:trace(R, false, [all]), Tracer ! {Receiver,get_trace}, receive {Tracer,{trace,Trace}} -> - read_packets_verify(R, SP, Msgs, Trace) + {read_packets_verify(R, SP, Trace), Trace} end end. -read_packets_send(S, RP, [Msg|Msgs]) -> - ok = gen_udp:send(S, localhost, RP, Msg), - read_packets_send(S, RP, Msgs); -read_packets_send(_S, _RP, []) -> - ok. +read_packets_send(_S, _RP, 0) -> + ok; +read_packets_send(S, RP, Msgs) -> + ok = gen_udp:send(S, localhost, RP, "UDP FLOOOOOOD"), + read_packets_send(S, RP, Msgs - 1). read_packets_recv(0) -> ok; @@ -404,23 +401,24 @@ read_packets_recv(N) -> timeout end. -read_packets_verify(R, SP, Msg, Trace) -> - lists:reverse( - lists:sort(read_packets_verify(R, SP, Msg, Trace, 0))). - -read_packets_verify(R, SP, Msgs, [{trace,Self,OutIn,_}|Trace], M) - when Self =:= self(), OutIn =:= out; - Self =:= self(), OutIn =:= in -> - push(M, read_packets_verify(R, SP, Msgs, Trace, 0)); -read_packets_verify(R, SP, [Msg|Msgs], - [{trace,Self,'receive',{udp,R,{127,0,0,1},SP,Msg}} - |Trace], M) +read_packets_verify(R, SP, Trace) -> + [Max | _] = Pkts = lists:reverse(lists:sort(read_packets_verify(R, SP, Trace, 0))), + ct:pal("~p",[lists:sublist(Pkts,10)]), + Max. + +read_packets_verify(R, SP, [{trace,R,OutIn,_}|Trace], M) + when OutIn =:= out; OutIn =:= in -> + push(M, read_packets_verify(R, SP, Trace, 0)); +read_packets_verify(R, SP, [{trace, R,'receive',timeout}|Trace], M) -> + push(M, read_packets_verify(R, SP, Trace, 0)); +read_packets_verify(R, SP, + [{trace,R,'send',{udp,R,{127,0,0,1},SP,_Msg}, Self} | Trace], M) when Self =:= self() -> - read_packets_verify(R, SP, Msgs, Trace, M+1); -read_packets_verify(_R, _SP, [], [], M) -> + read_packets_verify(R, SP, Trace, M+1); +read_packets_verify(_R, _SP, [], M) -> push(M, []); -read_packets_verify(_R, _SP, Msgs, Trace, M) -> - ct:fail({read_packets_verify,mismatch,Msgs,Trace,M}). +read_packets_verify(_R, _SP, Trace, M) -> + ct:fail({read_packets_verify,mismatch,Trace,M}). push(0, Vs) -> Vs; @@ -757,9 +755,9 @@ ok({ok,V}) -> V; ok(NotOk) -> try throw(not_ok) catch - Thrown -> + throw:Thrown:Stacktrace -> erlang:raise( - error, {Thrown, NotOk}, tl(erlang:get_stacktrace())) + error, {Thrown, NotOk}, tl(Stacktrace)) end. diff --git a/lib/kernel/test/global_SUITE.erl b/lib/kernel/test/global_SUITE.erl index 0a7f73c344..0e7b7adc47 100644 --- a/lib/kernel/test/global_SUITE.erl +++ b/lib/kernel/test/global_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2016. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -3470,8 +3470,8 @@ start_procs(Parent, N1, N2, N3, Config) -> Pid6 = rpc:call(N3, ?MODULE, start_proc3, [test4]), assert_pid(Pid6), yes = global:register_name(test1, Pid3), - yes = global:register_name(test2, Pid4, {global, notify_all_name}), - yes = global:register_name(test3, Pid5, {global, random_notify_name}), + yes = global:register_name(test2, Pid4, fun global:notify_all_name/3), + yes = global:register_name(test3, Pid5, fun global:random_notify_name/3), Resolve = fun(Name, Pid1, Pid2) -> Parent ! {resolve_called, Name, node()}, {Min, Max} = minmax(Pid1, Pid2), @@ -3546,7 +3546,7 @@ start_proc_basic(Name) -> end. init_proc_basic(Parent, Name) -> - X = global:register_name(Name, self(), {?MODULE, fix_basic_name}), + X = global:register_name(Name, self(), fun ?MODULE:fix_basic_name/3), Parent ! {self(),X}, loop(). @@ -3791,15 +3791,6 @@ stop() -> test_server:stop_node(Node) end, nodes()). -dbg_logs(Name) -> dbg_logs(Name, ?NODES). - -dbg_logs(Name, Nodes) -> - lists:foreach(fun(N) -> - F = lists:concat([Name, ".log.", N, ".txt"]), - ok = sys:log_to_file({global_name_server, N}, F) - end, Nodes). - - %% Tests that locally loaded nodes do not loose contact with other nodes. global_lost_nodes(Config) when is_list(Config) -> Timeout = 60, diff --git a/lib/kernel/test/heart_SUITE.erl b/lib/kernel/test/heart_SUITE.erl index 45032faf6d..e95635b800 100644 --- a/lib/kernel/test/heart_SUITE.erl +++ b/lib/kernel/test/heart_SUITE.erl @@ -168,7 +168,7 @@ reboot(Config) when is_list(Config) -> {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME), ok = rpc:call(Node, heart, set_cmd, - [atom_to_list(lib:progname()) ++ + [ct:get_progname() ++ " -noshell -heart " ++ name(Node) ++ "&"]), rpc:call(Node, init, reboot, []), receive @@ -203,7 +203,7 @@ node_start_immediately_after_crash_test(Config) when is_list(Config) -> [{"ERL_CRASH_DUMP_SECONDS", "0"}]), ok = rpc:call(Node, heart, set_cmd, - [atom_to_list(lib:progname()) ++ + [ct:get_progname() ++ " -noshell -heart " ++ name(Node) ++ "&"]), Mod = exhaust_atoms, @@ -254,7 +254,7 @@ node_start_soon_after_crash_test(Config) when is_list(Config) -> [{"ERL_CRASH_DUMP_SECONDS", "10"}]), ok = rpc:call(Node, heart, set_cmd, - [atom_to_list(lib:progname()) ++ + [ct:get_progname() ++ " -noshell -heart " ++ name(Node) ++ "&"]), Mod = exhaust_atoms, @@ -309,7 +309,7 @@ set_cmd(Config) when is_list(Config) -> clear_cmd(Config) when is_list(Config) -> {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME), ok = rpc:call(Node, heart, set_cmd, - [atom_to_list(lib:progname()) ++ + [ct:get_progname() ++ " -noshell -heart " ++ name(Node) ++ "&"]), rpc:call(Node, init, reboot, []), receive @@ -346,9 +346,16 @@ clear_cmd(Config) when is_list(Config) -> get_cmd(Config) when is_list(Config) -> {ok, Node} = start_check(slave, ?UNIQ_NODE_NAME), - Cmd = "test", - ok = rpc:call(Node, heart, set_cmd, [Cmd]), - {ok, Cmd} = rpc:call(Node, heart, get_cmd, []), + + ShortCmd = "test", + ok = rpc:call(Node, heart, set_cmd, [ShortCmd]), + {ok, ShortCmd} = rpc:call(Node, heart, get_cmd, []), + + %% This would hang prior to OTP-15024 being fixed. + LongCmd = [$a || _ <- lists:seq(1, 160)], + ok = rpc:call(Node, heart, set_cmd, [LongCmd]), + {ok, LongCmd} = rpc:call(Node, heart, get_cmd, []), + stop_node(Node), ok. diff --git a/lib/kernel/test/inet_SUITE.erl b/lib/kernel/test/inet_SUITE.erl index 3b502be8b8..2e5f8c7d2c 100644 --- a/lib/kernel/test/inet_SUITE.erl +++ b/lib/kernel/test/inet_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1997-2017. All Rights Reserved. +%% Copyright Ericsson AB 1997-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -40,7 +40,8 @@ lookup_bad_search_option/1, getif/1, getif_ifr_name_overflow/1,getservbyname_overflow/1, getifaddrs/1, - parse_strict_address/1, simple_netns/1, simple_netns_open/1, + parse_strict_address/1, ipv4_mapped_ipv6_address/1, + simple_netns/1, simple_netns_open/1, simple_bind_to_device/1, simple_bind_to_device_open/1]). -export([get_hosts/1, get_ipv6_hosts/1, parse_hosts/1, parse_address/1, @@ -667,6 +668,26 @@ parse_strict_address(Config) when is_list(Config) -> {ok, {3089,3106,23603,50240,0,0,119,136}} = inet:parse_strict_address("c11:0c22:5c33:c440::077:0088"). +ipv4_mapped_ipv6_address(Config) when is_list(Config) -> + {D1,D2,D3,D4} = IPv4Address = + {rand:uniform(256) - 1, + rand:uniform(256) - 1, + rand:uniform(256) - 1, + rand:uniform(256) - 1}, + E7 = (D1 bsl 8) bor D2, + E8 = (D3 bsl 8) bor D4, + io:format("IPv4Address: ~p.~n", [IPv4Address]), + {0,0,0,0,0,65535,E7,E8} = inet:ipv4_mapped_ipv6_address(IPv4Address), + IPv6Address = + {rand:uniform(65536) - 1, + rand:uniform(65536) - 1, + rand:uniform(65536) - 1, + rand:uniform(65536) - 1, + rand:uniform(65536) - 1, + rand:uniform(65536) - 1, E7, E8}, + IPv4Address = inet:ipv4_mapped_ipv6_address(IPv6Address), + ok. + t_gethostnative(Config) when is_list(Config) -> %% this will result in 26 bytes sent which causes problem in Windows %% if the port-program has not assured stdin to be read in BINARY mode @@ -1083,11 +1104,9 @@ ifaddrs([{If,Opts}|IOs]) -> #ifopts{flags=F} = Ifopts = check_ifopts(Opts, #ifopts{name=If}), case F of {flags,Flags} -> - case lists:member(up, Flags) of - true -> - Ifopts#ifopts.addrs; - false -> - [] + case lists:member(running, Flags) of + true -> Ifopts#ifopts.addrs; + false -> [] end ++ ifaddrs(IOs); undefined -> ifaddrs(IOs) diff --git a/lib/kernel/test/inet_res_SUITE.erl b/lib/kernel/test/inet_res_SUITE.erl index 6691ad9c06..2a5b8d0044 100644 --- a/lib/kernel/test/inet_res_SUITE.erl +++ b/lib/kernel/test/inet_res_SUITE.erl @@ -217,10 +217,10 @@ proxy_start(TC, {NS,P}) -> spawn_link( fun () -> try proxy_start(TC, NS, P, Parent, Tag) - catch C:X -> + catch C:X:Stacktrace -> io:format( "~w: ~w:~p ~p~n", - [self(),C,X,erlang:get_stacktrace()]) + [self(),C,X,Stacktrace]) end end), receive {started,Tag,Port} -> diff --git a/lib/kernel/test/init_SUITE.erl b/lib/kernel/test/init_SUITE.erl index 2b59eb2bfe..6a006cdc01 100644 --- a/lib/kernel/test/init_SUITE.erl +++ b/lib/kernel/test/init_SUITE.erl @@ -1,7 +1,7 @@ %% %% %CopyrightBegin% %% -%% Copyright Ericsson AB 1996-2016. All Rights Reserved. +%% Copyright Ericsson AB 1996-2018. All Rights Reserved. %% %% Licensed under the Apache License, Version 2.0 (the "License"); %% you may not use this file except in compliance with the License. @@ -299,7 +299,7 @@ many_restarts() -> many_restarts(Config) when is_list(Config) -> {ok, Node} = loose_node:start(init_test, "", ?DEFAULT_TIMEOUT_SEC), - loop_restart(50,Node,rpc:call(Node,erlang,whereis,[error_logger])), + loop_restart(50,Node,rpc:call(Node,erlang,whereis,[logger])), loose_node:stop(Node), ok. @@ -316,13 +316,13 @@ loop_restart(N,Node,EHPid) -> ct:fail(not_stopping) end, ok = wait_for(30, Node, EHPid), - loop_restart(N-1,Node,rpc:call(Node,erlang,whereis,[error_logger])). + loop_restart(N-1,Node,rpc:call(Node,erlang,whereis,[logger])). wait_for(0,Node,_) -> loose_node:stop(Node), error; wait_for(N,Node,EHPid) -> - case rpc:call(Node, erlang, whereis, [error_logger]) of + case rpc:call(Node, erlang, whereis, [logger]) of Pid when is_pid(Pid), Pid =/= EHPid -> %% erlang:display(ok), ok; @@ -365,7 +365,9 @@ restart(Config) when is_list(Config) -> %% Ok, the node is up, now the real test test begins. erlang:monitor_node(Node, true), SysProcs0 = rpc:call(Node, ?MODULE, find_system_processes, []), - [InitPid, PurgerPid, LitCollectorPid, DirtyCodePid] = SysProcs0, + io:format("SysProcs0=~p~n", [SysProcs0]), + [InitPid, PurgerPid, LitCollectorPid, + DirtySigNPid, DirtySigHPid, DirtySigMPid] = SysProcs0, InitPid = rpc:call(Node, erlang, whereis, [init]), PurgerPid = rpc:call(Node, erlang, whereis, [erts_code_purger]), Procs = rpc:call(Node, erlang, processes, []), @@ -381,7 +383,9 @@ restart(Config) when is_list(Config) -> ok = wait_restart(30, Node), SysProcs1 = rpc:call(Node, ?MODULE, find_system_processes, []), - [InitPid1, PurgerPid1, LitCollectorPid1, DirtyCodePid1] = SysProcs1, + io:format("SysProcs1=~p~n", [SysProcs1]), + [InitPid1, PurgerPid1, LitCollectorPid1, + DirtySigNPid1, DirtySigHPid1, DirtySigMPid1] = SysProcs1, %% Still the same init process! InitPid1 = rpc:call(Node, erlang, whereis, [init]), @@ -394,20 +398,18 @@ restart(Config) when is_list(Config) -> PurgerP = pid_to_list(PurgerPid1), %% and same literal area collector process! - case LitCollectorPid of - undefined -> undefined = LitCollectorPid1; - _ -> - LitCollectorP = pid_to_list(LitCollectorPid), - LitCollectorP = pid_to_list(LitCollectorPid1) - end, - - %% and same dirty process code checker process! - case DirtyCodePid of - undefined -> undefined = DirtyCodePid1; - _ -> - DirtyCodeP = pid_to_list(DirtyCodePid), - DirtyCodeP = pid_to_list(DirtyCodePid1) - end, + LitCollectorP = pid_to_list(LitCollectorPid), + LitCollectorP = pid_to_list(LitCollectorPid1), + + %% and same normal dirty signal handler process! + DirtySigNP = pid_to_list(DirtySigNPid), + DirtySigNP = pid_to_list(DirtySigNPid1), + %% and same high dirty signal handler process! + DirtySigHP = pid_to_list(DirtySigHPid), + DirtySigHP = pid_to_list(DirtySigHPid1), + %% and same max dirty signal handler process! + DirtySigMP = pid_to_list(DirtySigMPid), + DirtySigMP = pid_to_list(DirtySigMPid1), NewProcs0 = rpc:call(Node, erlang, processes, []), NewProcs = NewProcs0 -- SysProcs1, @@ -433,7 +435,9 @@ restart(Config) when is_list(Config) -> -record(sys_procs, {init, code_purger, literal_collector, - dirty_proc_checker}). + dirty_sig_handler_normal, + dirty_sig_handler_high, + dirty_sig_handler_max}). find_system_processes() -> find_system_procs(processes(), #sys_procs{}). @@ -442,21 +446,32 @@ find_system_procs([], SysProcs) -> [SysProcs#sys_procs.init, SysProcs#sys_procs.code_purger, SysProcs#sys_procs.literal_collector, - SysProcs#sys_procs.dirty_proc_checker]; + SysProcs#sys_procs.dirty_sig_handler_normal, + SysProcs#sys_procs.dirty_sig_handler_high, + SysProcs#sys_procs.dirty_sig_handler_max]; find_system_procs([P|Ps], SysProcs) -> - case process_info(P, initial_call) of - {initial_call,{otp_ring0,start,2}} -> + case process_info(P, [initial_call, priority]) of + [{initial_call,{otp_ring0,start,2}},_] -> undefined = SysProcs#sys_procs.init, find_system_procs(Ps, SysProcs#sys_procs{init = P}); - {initial_call,{erts_code_purger,start,0}} -> + [{initial_call,{erts_code_purger,start,0}},_] -> undefined = SysProcs#sys_procs.code_purger, find_system_procs(Ps, SysProcs#sys_procs{code_purger = P}); - {initial_call,{erts_literal_area_collector,start,0}} -> + [{initial_call,{erts_literal_area_collector,start,0}},_] -> undefined = SysProcs#sys_procs.literal_collector, find_system_procs(Ps, SysProcs#sys_procs{literal_collector = P}); - {initial_call,{erts_dirty_process_code_checker,start,0}} -> - undefined = SysProcs#sys_procs.dirty_proc_checker, - find_system_procs(Ps, SysProcs#sys_procs{dirty_proc_checker = P}); + [{initial_call,{erts_dirty_process_signal_handler,start,0}}, + {priority,normal}] -> + undefined = SysProcs#sys_procs.dirty_sig_handler_normal, + find_system_procs(Ps, SysProcs#sys_procs{dirty_sig_handler_normal = P}); + [{initial_call,{erts_dirty_process_signal_handler,start,0}}, + {priority,high}] -> + undefined = SysProcs#sys_procs.dirty_sig_handler_high, + find_system_procs(Ps, SysProcs#sys_procs{dirty_sig_handler_high = P}); + [{initial_call,{erts_dirty_process_signal_handler,start,0}}, + {priority,max}] -> + undefined = SysProcs#sys_procs.dirty_sig_handler_max, + find_system_procs(Ps, SysProcs#sys_procs{dirty_sig_handler_max = P}); _ -> find_system_procs(Ps, SysProcs) end. diff --git a/lib/kernel/test/kernel.spec b/lib/kernel/test/kernel.spec index 62afc9f97b..86d2155828 100644 --- a/lib/kernel/test/kernel.spec +++ b/lib/kernel/test/kernel.spec @@ -2,3 +2,4 @@ {config, "../test_server/ts.unix.config"}. {suites,"../kernel_test", all}. +{skip_suites,"../kernel_test",[logger_bench_SUITE],"Not ready"}. diff --git a/lib/kernel/test/kernel_SUITE.erl b/lib/kernel/test/kernel_SUITE.erl index da56359294..7898988dbe 100644 --- a/lib/kernel/test/kernel_SUITE.erl +++ b/lib/kernel/test/kernel_SUITE.erl @@ -30,14 +30,14 @@ -export([init_per_testcase/2, end_per_testcase/2]). %% Test cases must be exported. --export([app_test/1, appup_test/1]). +-export([app_test/1, appup_test/1, refc/1]). suite() -> [{ct_hooks,[ts_install_cth]}, {timetrap,{minutes,2}}]. all() -> - [app_test, appup_test]. + [app_test, appup_test, refc]. groups() -> []. @@ -163,3 +163,68 @@ check_appup([Vsn|Vsns],Instrs,Expected) -> end; check_appup([],_,_) -> ok. + +%%% Check that refc module handles the counters as expected +refc(_Config) -> + Enable = fun(Enable) -> erlang:system_flag(scheduler_wall_time, Enable) end, + IsOn = fun() -> undefined /= erlang:statistics(scheduler_wall_time) end, + Tester = self(), + Loop = fun Loop() -> + receive + die -> normal; + {apply, Bool} -> + Res = Enable(Bool), + Tester ! {self(), Res}, + Loop() + end + end, + + %% Counter should be 0 + false = Enable(false), + + false = Enable(true), + true = Enable(true), + true = Enable(false), + true = Enable(false), + + %% Counter should be 0 + false = IsOn(), + + P1 = spawn_link(Loop), + P1 ! {apply, true}, + receive {P1, R1} -> false = R1 end, + + %% P1 has turned it on counter should be one + true = IsOn(), + true = Enable(true), + true = Enable(false), + true = IsOn(), + + P1 ! {apply, false}, + receive {P1, R2} -> true = R2 end, + false = IsOn(), + + P1 ! {apply, true}, + receive {P1, R3} -> false = R3 end, + true = IsOn(), + true = Enable(false), + + + P1 ! die, + timer:sleep(100), + false = IsOn(), + false = Enable(false), + + P2 = spawn_link(Loop), + P2 ! {apply, true}, + receive {P2, R4} -> false = R4 end, + true = IsOn(), + P2 ! {apply, true}, + receive {P2, R5} -> true = R5 end, + true = IsOn(), + + P2 ! die, + timer:sleep(100), + false = IsOn(), + + ok. diff --git a/lib/kernel/test/kernel_bench.spec b/lib/kernel/test/kernel_bench.spec index 8de60dae31..4de133f21b 100644 --- a/lib/kernel/test/kernel_bench.spec +++ b/lib/kernel/test/kernel_bench.spec @@ -1 +1,2 @@ {groups,"../kernel_test",zlib_SUITE,[bench]}. +{groups,"../kernel_test",file_SUITE,[bench]}. diff --git a/lib/kernel/test/kernel_config_SUITE.erl b/lib/kernel/test/kernel_config_SUITE.erl index 9a4578917d..a21020ff97 100644 --- a/lib/kernel/test/kernel_config_SUITE.erl +++ b/lib/kernel/test/kernel_config_SUITE.erl @@ -76,7 +76,7 @@ sync(Conf) when is_list(Conf) -> %% Reset wall_clock {T1,_} = erlang:statistics(wall_clock), io:format("~p~n", [{t1, T1}]), - Command = lists:concat([lib:progname(), + Command = lists:append([ct:get_progname(), " -detached -sname cp1 ", "-config ", Config, " -env ERL_CRASH_DUMP erl_crash_dump.cp1"]), diff --git a/lib/kernel/test/logger.cover b/lib/kernel/test/logger.cover new file mode 100644 index 0000000000..b30bcfe920 --- /dev/null +++ b/lib/kernel/test/logger.cover @@ -0,0 +1,14 @@ +%% -*- erlang -*- +{incl_mods,[error_logger, + logger, + logger_backend, + logger_config, + logger_disk_log_h, + logger_h_common, + logger_filters, + logger_formatter, + logger_server, + logger_simple, + logger_std_h, + logger_sup]}. + diff --git a/lib/kernel/test/logger.spec b/lib/kernel/test/logger.spec new file mode 100644 index 0000000000..cd76a754a4 --- /dev/null +++ b/lib/kernel/test/logger.spec @@ -0,0 +1,11 @@ +%% -*-erlang-*- +{suites,"../kernel_test", [error_logger_SUITE, + error_logger_warn_SUITE, + logger_SUITE, + logger_disk_log_h_SUITE, + logger_env_var_SUITE, + logger_filters_SUITE, + logger_formatter_SUITE, + logger_legacy_SUITE, + logger_simple_SUITE, + logger_std_h_SUITE]}. diff --git a/lib/kernel/test/logger_SUITE.erl b/lib/kernel/test/logger_SUITE.erl new file mode 100644 index 0000000000..f311a9c7ed --- /dev/null +++ b/lib/kernel/test/logger_SUITE.erl @@ -0,0 +1,831 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). + +-define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++ + ":"++integer_to_list(?LINE)). +-define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}). +-define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]). + +-define(MY_LOC(N),#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY}, + file=>?FILE, line=>?LINE-N}). + +-define(TRY(X), my_try(fun() -> X end)). + + +suite() -> + [{timetrap,{seconds,30}}]. + +init_per_suite(Config) -> + case logger:get_handler_config(logger_std_h) of + {ok,StdH} -> + ok = logger:remove_handler(logger_std_h), + [{logger_std_h,StdH}|Config]; + _ -> + Config + end. + +end_per_suite(Config) -> + case ?config(logger_std_h,Config) of + {HMod,HConfig} -> + ok = logger:add_handler(logger_std_h,HMod,HConfig); + _ -> + ok + end. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + {ok,LC} = logger:get_logger_config(), + [{logger_config,LC}|Config]. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [start_stop, + add_remove_handler, + multiple_handlers, + add_remove_filter, + change_config, + set_formatter, + log_all_levels_api, + macros, + set_level, + set_level_module, + cache_level_module, + format_report, + filter_failed, + handler_failed, + config_sanity_check, + log_failed, + emulator, + via_logger_process, + other_node, + compare_levels, + process_metadata]. + +start_stop(_Config) -> + S = whereis(logger), + true = is_pid(S), + ok. + +add_remove_handler(_Config) -> + register(callback_receiver,self()), + {ok,#{handlers:=Hs0}} = logger:get_logger_config(), + {error,{not_found,h1}} = logger:get_handler_config(h1), + ok = logger:add_handler(h1,?MODULE,#{}), + [add] = test_server:messages_get(), + {ok,#{handlers:=Hs}} = logger:get_logger_config(), + [h1|Hs0] = Hs, + {ok,{?MODULE,#{level:=info,filters:=[],filter_default:=log}}} = % defaults + logger:get_handler_config(h1), + ok = logger:set_handler_config(h1,filter_default,stop), + [changing_config] = test_server:messages_get(), + ?LOG_INFO("hello",[]), + ok = check_no_log(), + ok = logger:set_handler_config(h1,filter_default,log), + [changing_config] = test_server:messages_get(), + {ok,{?MODULE,#{filter_default:=log}}} = logger:get_handler_config(h1), + ?LOG_INFO("hello",[]), + ok = check_logged(info,"hello",[],?MY_LOC(1)), + ok = logger:remove_handler(h1), + [remove] = test_server:messages_get(), + {ok,#{handlers:=Hs0}} = logger:get_logger_config(), + {error,{not_found,h1}} = logger:get_handler_config(h1), + {error,{not_found,h1}} = logger:remove_handler(h1), + logger:info("hello",[]), + ok = check_no_log(), + ok. + +add_remove_handler(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +multiple_handlers(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}), + ok = logger:add_handler(h2,?MODULE,#{level=>error,filter_default=>log}), + ?LOG_ERROR("hello",[]), + ok = check_logged(error,"hello",[],?MY_LOC(1)), + ok = check_logged(error,"hello",[],?MY_LOC(2)), + ?LOG_INFO("hello",[]), + ok = check_logged(info,"hello",[],?MY_LOC(1)), + ok = check_no_log(), + ok. + +multiple_handlers(cleanup,_Config) -> + logger:remove_handler(h1), + logger:remove_handler(h2), + ok. + +add_remove_filter(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}), + LF = {fun(Log,_) -> Log#{level=>error} end, []}, + ok = logger:add_logger_filter(lf,LF), + {error,{already_exist,lf}} = logger:add_logger_filter(lf,LF), + {error,{already_exist,lf}} = logger:add_logger_filter(lf,{fun(Log,_) -> + Log + end, []}), + ?LOG_INFO("hello",[]), + ok = check_logged(error,"hello",[],?MY_LOC(1)), + ok = check_no_log(), + + ok = logger:add_handler(h2,?MODULE,#{level=>info,filter_default=>log}), + HF = {fun(#{level:=error}=Log,_) -> + Log#{level=>mylevel}; + (_,_) -> + ignore + end, + []}, + ok = logger:add_handler_filter(h1,hf,HF), + {error,{already_exist,hf}} = logger:add_handler_filter(h1,hf,HF), + {error,{already_exist,hf}} = logger:add_handler_filter(h1,hf,{fun(Log,_) -> + Log + end, []}), + ?LOG_INFO("hello",[]), + ok = check_logged(mylevel,"hello",[],?MY_LOC(1)), + ok = check_logged(error,"hello",[],?MY_LOC(2)), + + ok = logger:remove_logger_filter(lf), + {error,{not_found,lf}} = logger:remove_logger_filter(lf), + + ?LOG_INFO("hello",[]), + ok = check_logged(info,"hello",[],?MY_LOC(1)), + ok = check_logged(info,"hello",[],?MY_LOC(2)), + + ?LOG_ERROR("hello",[]), + ok = check_logged(mylevel,"hello",[],?MY_LOC(1)), + ok = check_logged(error,"hello",[],?MY_LOC(2)), + + ok = logger:remove_handler_filter(h1,hf), + {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf), + ?LOG_INFO("hello",[]), + ok = check_logged(info,"hello",[],?MY_LOC(1)), + ok = check_logged(info,"hello",[],?MY_LOC(2)), + + ?LOG_ERROR("hello",[]), + ok = check_logged(error,"hello",[],?MY_LOC(1)), + ok = check_logged(error,"hello",[],?MY_LOC(2)), + ok. + +add_remove_filter(cleanup,_Config) -> + logger:remove_logger_filter(lf), + logger:remove_handler(h1), + logger:remove_handler(h2), + ok. + +change_config(_Config) -> + %% Overwrite handler config - check that defaults are added + ok = logger:add_handler(h1,?MODULE,#{level=>debug,custom=>custom}), + {ok,{?MODULE,#{level:=debug,filter_default:=log,custom:=custom}}} = + logger:get_handler_config(h1), + register(callback_receiver,self()), + ok = logger:set_handler_config(h1,#{filter_default=>stop}), + [changing_config] = test_server:messages_get(), + {ok,{?MODULE,#{level:=info,filter_default:=stop}=C2}} = + logger:get_handler_config(h1), + false = maps:is_key(custom,C2), + {error,fail} = logger:set_handler_config(h1,#{fail=>true}), + {error,{attempting_syncronous_call_to_self,_}} = + logger:set_handler_config( + h1,#{call=>fun() -> logger:set_module_level(?MODULE,debug) end}), + {ok,{?MODULE,C2}} = logger:get_handler_config(h1), + + %% Change one key only + {error,fail} = logger:set_handler_config(h1,fail,true), + ok = logger:set_handler_config(h1,custom,custom), + [changing_config] = test_server:messages_get(), + {ok,{?MODULE,#{custom:=custom}=C3}} = logger:get_handler_config(h1), + C2 = maps:remove(custom,C3), + + %% Overwrite logger config - check that defaults are added + {ok,LConfig} = logger:get_logger_config(), + ok = logger:set_logger_config(#{filter_default=>stop}), + {ok,#{level:=info,filters:=[],handlers:=[],filter_default:=stop}=LC1} = + logger:get_logger_config(), + 4 = maps:size(LC1), + + %% Change one key only + ok = logger:set_logger_config(handlers,[h1]), + {ok,#{level:=info,filters:=[],handlers:=[h1],filter_default:=stop}} = + logger:get_logger_config(), + + %% Cleanup + ok = logger:set_logger_config(LConfig), + [] = test_server:messages_get(), + + ok. + +change_config(cleanup,Config) -> + logger:remove_handler(h1), + LC = ?config(logger_config,Config), + logger:set_logger_config(LC), + ok. + +set_formatter(_Config) -> + {error,{not_found,h1}}=logger:set_handler_config(h1,formatter,{?MODULE,[]}), + ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}), + ok = logger:set_handler_config(h1,formatter,{?MODULE,[]}), + logger:info("hello",[]), + receive + {_Log,#{formatter:={?MODULE,[]}}} -> + ok + after 500 -> + ct:fail({timeout,no_log,process_info(self(),messages)}) + end, + ok. + +set_formatter(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +log_all_levels_api(_Config) -> + ok = logger:set_logger_config(level,debug), + ok = logger:add_handler(h1,?MODULE,#{level=>debug,filter_default=>log}), + test_api(emergency), + test_api(alert), + test_api(critical), + test_api(error), + test_api(warning), + test_api(notice), + test_api(info), + test_api(debug), + test_log_function(emergency), + ok. + +log_all_levels_api(cleanup,_Config) -> + logger:remove_handler(h1), + logger:set_logger_config(level,info), + ok. + +macros(_Config) -> + ok = logger:set_module_level(?MODULE,debug), + ok = logger:add_handler(h1,?MODULE,#{level=>debug,filter_default=>log}), + test_macros(emergency), + ok. + +macros(cleanup,_Config) -> + logger:remove_handler(h1), + logger:reset_module_level(?MODULE), + ok. + +set_level(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>debug,filter_default=>log}), + logger:debug(?map_rep), + ok = check_no_log(), + logger:info(M1=?map_rep), + ok = check_logged(info,M1,#{}), + ok = logger:set_logger_config(level,debug), + logger:debug(M2=?map_rep), + ok = check_logged(debug,M2,#{}), + ok. + +set_level(cleanup,_Config) -> + logger:remove_handler(h1), + logger:set_logger_config(level,info), + ok. + +set_level_module(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}), + {error,{invalid_level,bad}} = logger:set_module_level(?MODULE,bad), + {error,{not_a_module,{bad}}} = logger:set_module_level({bad},warning), + ok = logger:set_module_level(?MODULE,warning), + logger:info(?map_rep,?MY_LOC(0)), + ok = check_no_log(), + logger:warning(M1=?map_rep,?MY_LOC(0)), + ok = check_logged(warning,M1,?MY_LOC(1)), + ok = logger:set_module_level(?MODULE,info), + logger:info(M2=?map_rep,?MY_LOC(0)), + ok = check_logged(info,M2,?MY_LOC(1)), + + {error,{not_a_module,{bad}}} = logger:reset_module_level({bad}), + ok = logger:reset_module_level(?MODULE), + + ok. + +set_level_module(cleanup,_Config) -> + logger:remove_handler(h1), + logger:reset_module_level(?MODULE), + ok. + +cache_level_module(_Config) -> + ok = logger:reset_module_level(?MODULE), + [] = ets:lookup(logger,?MODULE), %dirty - add API in logger_config? + ?LOG_INFO(?map_rep), + %% Caching is done asynchronously, so wait a bit for the update + timer:sleep(100), + [_] = ets:lookup(logger,?MODULE), %dirty - add API in logger_config? + ok = logger:reset_module_level(?MODULE), + [] = ets:lookup(logger,?MODULE), %dirty - add API in logger_config? + ok. + +cache_level_module(cleanup,_Config) -> + logger:reset_module_level(?MODULE), + ok. + +format_report(_Config) -> + {"~ts",["string"]} = logger:format_report("string"), + {"~tp",[term]} = logger:format_report(term), + {"~tp",[[]]} = logger:format_report([]), + {" ~tp: ~tp",[key,value]} = logger:format_report([{key,value}]), + KeyVals = [{key1,value1},{key2,"value2"},{key3,[]}], + KeyValRes = + {" ~tp: ~tp\n ~tp: ~ts\n ~tp: ~tp", + [key1,value1,key2,"value2",key3,[]]} = + logger:format_report(KeyVals), + KeyValRes = logger:format_report(maps:from_list(KeyVals)), + KeyValRes = logger:format_otp_report(#{label=>{?MODULE,test},report=>KeyVals}), + {" ~tp: ~tp\n ~tp: ~tp", + [label,{?MODULE,test},report,KeyVals]} = + logger:format_report(#{label=>{?MODULE,test},report=>KeyVals}), + + {" ~tp: ~tp\n ~tp",[key1,value1,term]} = + logger:format_report([{key1,value1},term]), + + {" ~tp: ~tp\n ~tp",[key1,value1,[]]} = + logger:format_report([{key1,value1},[]]), + + {"~tp",[[]]} = logger:format_report([[],[],[]]), + + ok. + +filter_failed(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}), + + %% Logger filters + {error,{invalid_filter,_}} = + logger:add_logger_filter(lf,{fun(_) -> ok end,args}), + ok = logger:add_logger_filter(lf,{fun(_,_) -> a=b end,args}), + {ok,#{filters:=[_]}} = logger:get_logger_config(), + ok = logger:info(M1=?map_rep), + ok = check_logged(info,M1,#{}), + {error,{not_found,lf}} = logger:remove_logger_filter(lf), + + ok = logger:add_logger_filter(lf,{fun(_,_) -> faulty_return end,args}), + {ok,#{filters:=[_]}} = logger:get_logger_config(), + ok = logger:info(M2=?map_rep), + ok = check_logged(info,M2,#{}), + {error,{not_found,lf}} = logger:remove_logger_filter(lf), + + %% Handler filters + {error,{not_found,h0}} = + logger:add_handler_filter(h0,hf,{fun(_,_) -> ignore end,args}), + {error,{not_found,h0}} = logger:remove_handler_filter(h0,hf), + {error,{invalid_filter,_}} = + logger:add_handler_filter(h1,hf,{fun(_) -> ok end,args}), + ok = logger:add_handler_filter(h1,hf,{fun(_,_) -> a=b end,args}), + {ok,{?MODULE,#{filters:=[_]}}} = logger:get_handler_config(h1), + ok = logger:info(M3=?map_rep), + ok = check_logged(info,M3,#{}), + {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf), + + ok = logger:add_handler_filter(h1,hf,{fun(_,_) -> faulty_return end,args}), + {ok,{?MODULE,#{filters:=[_]}}} = logger:get_handler_config(h1), + ok = logger:info(M4=?map_rep), + ok = check_logged(info,M4,#{}), + {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf), + + ok. + +filter_failed(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +handler_failed(_Config) -> + {error,{invalid_id,1}} = logger:add_handler(1,?MODULE,#{}), + {error,{invalid_module,"nomodule"}} = logger:add_handler(h1,"nomodule",#{}), + {error,{invalid_handler_config,bad}} = logger:add_handler(h1,?MODULE,bad), + {error,{invalid_filters,false}} = + logger:add_handler(h1,?MODULE,#{filters=>false}), + {error,{invalid_filter_default,true}} = + logger:add_handler(h1,?MODULE,#{filter_default=>true}), + {error,{invalid_formatter,[]}} = + logger:add_handler(h1,?MODULE,#{formatter=>[]}), + ok = logger:add_handler(h1,nomodule,#{filter_default=>log}), + logger:info(?map_rep), + check_no_log(), + #{logger:=#{handlers:=Ids1}, + handlers:=H1} = logger:i(), + false = lists:member(h1,Ids1), + false = lists:keymember(h1,1,H1), + {error,{not_found,h1}} = logger:remove_handler(h1), + + ok = logger:add_handler(h2,?MODULE,#{filter_default=>log,crash=>true}), + {error,{already_exist,h2}} = logger:add_handler(h2,othermodule,#{}), + + logger:info(?map_rep), + check_no_log(), + #{logger:=#{handlers:=Ids2}, + handlers:=H2} = logger:i(), + false = lists:member(h2,Ids2), + false = lists:keymember(h2,1,H2), + {error,{not_found,h2}} = logger:remove_handler(h2), + + ok. + +handler_failed(cleanup,_Config) -> + logger:remove_handler(h1), + logger:remove_handler(h2), + ok. + +config_sanity_check(_Config) -> + %% Logger config + {error,{invalid_filter_default,bad}} = + logger:set_logger_config(filter_default,bad), + {error,{invalid_level,bad}} = logger:set_logger_config(level,bad), + {error,{invalid_handlers,bad}} = logger:set_logger_config(handlers,bad), + {error,{invalid_id,{bad,bad}}} = + logger:set_logger_config(handlers,[{bad,bad}]), + {error,{invalid_id,"bad"}} = logger:set_logger_config(handlers,["bad"]), + {error,{invalid_filters,bad}} = logger:set_logger_config(filters,bad), + {error,{invalid_filter,bad}} = logger:set_logger_config(filters,[bad]), + {error,{invalid_filter,{_,_}}} = + logger:set_logger_config(filters,[{id,bad}]), + {error,{invalid_filter,{_,{_,_}}}} = + logger:set_logger_config(filters,[{id,{bad,args}}]), + {error,{invalid_filter,{_,{_,_}}}} = + logger:set_logger_config(filters,[{id,{fun() -> ok end,args}}]), + {error,{invalid_logger_config,{bad,bad}}} = + logger:set_logger_config(bad,bad), + + %% Handler config + {error,{not_found,h1}} = logger:set_handler_config(h1,a,b), + ok = logger:add_handler(h1,?MODULE,#{}), + {error,{invalid_filter_default,bad}} = + logger:set_handler_config(h1,filter_default,bad), + {error,{invalid_level,bad}} = logger:set_handler_config(h1,level,bad), + {error,{invalid_filters,bad}} = logger:set_handler_config(h1,filters,bad), + {error,{invalid_filter,bad}} = logger:set_handler_config(h1,filters,[bad]), + {error,{invalid_filter,{_,_}}} = + logger:set_handler_config(h1,filters,[{id,bad}]), + {error,{invalid_filter,{_,{_,_}}}} = + logger:set_handler_config(h1,filters,[{id,{bad,args}}]), + {error,{invalid_filter,{_,{_,_}}}} = + logger:set_handler_config(h1,filters,[{id,{fun() -> ok end,args}}]), + {error,{invalid_formatter,bad}} = + logger:set_handler_config(h1,formatter,bad), + {error,{invalid_module,{bad}}} = + logger:set_handler_config(h1,formatter,{{bad},cfg}), + {error,{invalid_formatter_config,bad}} = + logger:set_handler_config(h1,formatter,{logger_formatter,bad}), + {error,{invalid_formatter_config,{bad,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter,#{bad=>bad}}), + {error,{invalid_formatter_config,{template,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{template=>bad}}), + {error,{invalid_formatter_template,[1]}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{template=>[1]}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{template=>[]}}), + {error,{invalid_formatter_config,{single_line,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{single_line=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{single_line=>true}}), + {error,{invalid_formatter_config,{legacy_header,bad}}} = + logger:set_handler_config(h1,formatter,{logger_formatter, + #{legacy_header=>bad}}), + ok = logger:set_handler_config(h1,formatter,{logger_formatter, + #{legacy_header=>true}}), + ok = logger:set_handler_config(h1,custom,custom), + ok. + +config_sanity_check(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +log_failed(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}), + {error,function_clause} = ?TRY(logger:log(bad,?map_rep)), + {error,function_clause} = ?TRY(logger:log(info,?map_rep,bad)), + {error,function_clause} = ?TRY(logger:log(info,fun() -> ?map_rep end,bad)), + {error,function_clause} = ?TRY(logger:log(info,fun() -> ?map_rep end,bad,#{})), + {error,function_clause} = ?TRY(logger:log(info,bad,bad,bad)), + {error,function_clause} = ?TRY(logger:log(info,bad,bad,#{})), + check_no_log(), + ok = logger:log(info,M1=?str,#{}), + check_logged(info,M1,#{}), + ok = logger:log(info,M2=?map_rep,#{}), + check_logged(info,M2,#{}), + ok = logger:log(info,M3=?keyval_rep,#{}), + check_logged(info,M3,#{}), + + %% Should we check report input more thoroughly? + ok = logger:log(info,M4=?keyval_rep++[other,stuff,in,list],#{}), + check_logged(info,M4,#{}), + + %% This might break a handler since it is assumed to be a format + %% string and args, so it depends how the handler protects itself + %% against something like io_lib:format("ok","ok") + ok = logger:log(info,"ok","ok",#{}), + check_logged(info,"ok","ok",#{}), + + ok. + +log_failed(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +emulator(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log, + tc_proc=>self()}), + Msg = "Error in process ~p on node ~p with exit value:~n~p~n", + Error = {badmatch,4}, + Stack = [{module, function, 2, []}], + Pid = spawn(?MODULE, generate_error, [Error, Stack]), + check_logged(error, Msg, [Pid, node(), {Error, Stack}], + #{gl=>group_leader(), + error_logger=>#{tag=>error,emulator=>true}}), + ok. + +emulator(cleanup, _Config) -> + logger:remove_handler(h1), + ok. + +generate_error(Error, Stack) -> + erlang:raise(error, Error, Stack). + +via_logger_process(Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log, + tc_proc=>self()}), + + %% Explicitly send a message to the logger process + %% This is used by code_server, erl_prim_loader, init, prim_file, ... + Msg = ?str, + logger ! {log,error,Msg,[],#{}}, + check_logged(error, Msg, [], #{}), + + case os:type() of + {win32,_} -> + %% Skip this part on windows - cant change file mode" + ok; + _ -> + %% This should trigger the same thing from erl_prim_loader + Dir = filename:join(?config(priv_dir,Config),"dummydir"), + ok = file:make_dir(Dir), + ok = file:change_mode(Dir,8#0222), + error = erl_prim_loader:list_dir(Dir), + check_logged(error, + #{report=>"File operation error: eacces. Target: " ++ + Dir ++". Function: list_dir. "}, + #{pid=>self(), + gl=>group_leader(), + error_logger=>#{tag=>error_report, + type=>std_error}}), + ok + end. + +via_logger_process(cleanup, Config) -> + Dir = filename:join(?config(priv_dir,Config),"dummydir"), + _ = file:change_mode(Dir,8#0664), + _ = file:del_dir(Dir), + logger:remove_handler(h1), + ok. + +other_node(_Config) -> + ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log, + tc_proc=>self()}), + {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]), + rpc:call(Node,logger,error,[Msg=?str,#{}]), + check_logged(error,Msg,#{}), + ok. + +other_node(cleanup,_Config) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes], + logger:remove_handler(h1), + ok. + +compare_levels(_Config) -> + Levels = [emergency,alert,critical,error,warning,notice,info,debug], + ok = compare(Levels), + {error,badarg} = ?TRY(logger:compare_levels(bad,bad)), + {error,badarg} = ?TRY(logger:compare_levels({bad},info)), + {error,badarg} = ?TRY(logger:compare_levels(info,"bad")), + ok. + +compare([L|Rest]) -> + eq = logger:compare_levels(L,L), + [gt = logger:compare_levels(L,L1) || L1 <- Rest], + [lt = logger:compare_levels(L1,L) || L1 <- Rest], + compare(Rest); +compare([]) -> + ok. + +process_metadata(_Config) -> + undefined = logger:get_process_metadata(), + {error,badarg} = ?TRY(logger:set_process_metadata(bad)), + ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}), + Time = erlang:monotonic_time(microsecond), + ProcMeta = #{time=>Time,line=>0,custom=>proc}, + ok = logger:set_process_metadata(ProcMeta), + S1 = ?str, + ?LOG_INFO(S1,#{custom=>macro}), + check_logged(info,S1,#{time=>Time,line=>0,custom=>macro}), + + Time2 = erlang:monotonic_time(microsecond), + S2 = ?str, + ?LOG_INFO(S2,#{time=>Time2,line=>1,custom=>macro}), + check_logged(info,S2,#{time=>Time2,line=>1,custom=>macro}), + + logger:info(S3=?str,#{custom=>func}), + check_logged(info,S3,#{time=>Time,line=>0,custom=>func}), + + ProcMeta = logger:get_process_metadata(), + ok = logger:update_process_metadata(#{custom=>changed,custom2=>added}), + Expected = ProcMeta#{custom:=changed,custom2=>added}, + Expected = logger:get_process_metadata(), + ok = logger:unset_process_metadata(), + undefined = logger:get_process_metadata(), + + ok. + +process_metadata(cleanup,_Config) -> + logger:remove_handler(h1), + ok. + +%%%----------------------------------------------------------------- +%%% Internal +check_logged(Level,Format,Args,Meta) -> + do_check_logged(Level,{Format,Args},Meta). + +check_logged(Level,Msg,Meta) when ?IS_REPORT(Msg) -> + do_check_logged(Level,{report,Msg},Meta); +check_logged(Level,Msg,Meta) when ?IS_STRING(Msg) -> + do_check_logged(Level,{string,Msg},Meta). + +do_check_logged(Level,Msg0,Meta0) -> + receive + {#{level:=Level,msg:=Msg,meta:=Meta},_} -> + check_msg(Msg0,Msg), + check_maps(Meta0,Meta,meta) + after 500 -> + ct:fail({timeout,no_log,process_info(self(),messages)}) + end. + +check_no_log() -> + receive + X -> ct:fail({got_unexpected_log,X}) + after 500 -> + ok + end. + +check_msg(Msg,Msg) -> + ok; +check_msg({report,Expected},{report,Got}) when is_map(Expected), is_map(Got) -> + check_maps(Expected,Got,msg); +check_msg(Expected,Got) -> + ct:fail({unexpected,msg,Expected,Got}). + +check_maps(Expected,Got,What) -> + case maps:merge(Got,Expected) of + Got -> + ok; + _ -> + ct:fail({unexpected,What,Expected,Got}) + end. + +%% Handler +adding_handler(_Id,Config) -> + maybe_send(add), + {ok,Config}. +removing_handler(_Id,_Config) -> + maybe_send(remove), + ok. +changing_config(_Id,_Old,#{call:=Fun}) -> + Fun(); +changing_config(_Id,_Old,#{fail:=true}) -> + {error,fail}; +changing_config(_Id,_Old,Config) -> + maybe_send(changing_config), + {ok,Config}. + +maybe_send(Msg) -> + case whereis(callback_receiver) of + undefined -> ok; + Pid -> Pid ! Msg + end. + +log(_Log,#{crash:=true}) -> + a=b; +log(Log,Config) -> + TcProc = maps:get(tc_proc,Config,self()), + TcProc ! {Log,Config}, + ok. + +test_api(Level) -> + logger:Level(#{Level=>rep}), + ok = check_logged(Level,#{Level=>rep},#{}), + logger:Level(#{Level=>rep},#{my=>meta}), + ok = check_logged(Level,#{Level=>rep},#{my=>meta}), + logger:Level("~w: ~w",[Level,fa]), + ok = check_logged(Level,"~w: ~w",[Level,fa],#{}), + logger:Level("~w: ~w ~w",[Level,fa,meta],#{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}), + logger:Level(fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end,x, + #{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],#{my=>meta}), + logger:Level(fun(x) -> #{Level=>fun_to_r,meta=>true} end,x, + #{my=>meta}), + ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},#{my=>meta}), + logger:Level(fun(x) -> <<"fun_to_s">> end,x,#{}), + ok = check_logged(Level,<<"fun_to_s">>,#{}), + logger:Level(F1=fun(x) -> {fun_to_bad} end,x,#{}), + ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp", + [{F1,x},{fun_to_bad}],#{}), + logger:Level(F2=fun(x) -> erlang:error(fun_that_crashes) end,x,#{}), + ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp", + [{F2,x},{error,fun_that_crashes}],#{}), + ok. + +test_log_function(Level) -> + logger:log(Level,#{Level=>rep}), + ok = check_logged(Level,#{Level=>rep},#{}), + logger:log(Level,#{Level=>rep},#{my=>meta}), + ok = check_logged(Level,#{Level=>rep},#{my=>meta}), + logger:log(Level,"~w: ~w",[Level,fa]), + ok = check_logged(Level,"~w: ~w",[Level,fa],#{}), + logger:log(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}), + logger:log(Level,fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end, + x, #{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],#{my=>meta}), + logger:log(Level,fun(x) -> #{Level=>fun_to_r,meta=>true} end, + x, #{my=>meta}), + ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},#{my=>meta}), + logger:log(Level,fun(x) -> <<"fun_to_s">> end,x,#{}), + ok = check_logged(Level,<<"fun_to_s">>,#{}), + logger:log(Level,F1=fun(x) -> {fun_to_bad} end,x,#{}), + ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp", + [{F1,x},{fun_to_bad}],#{}), + logger:log(Level,F2=fun(x) -> erlang:error(fun_that_crashes) end,x,#{}), + ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp", + [{F2,x},{error,fun_that_crashes}],#{}), + ok. + +test_macros(emergency=Level) -> + ?LOG_EMERGENCY(#{Level=>rep}), + ok = check_logged(Level,#{Level=>rep},?MY_LOC(1)), + ?LOG_EMERGENCY(#{Level=>rep},#{my=>meta}), + ok = check_logged(Level,#{Level=>rep},(?MY_LOC(1))#{my=>meta}), + ?LOG_EMERGENCY("~w: ~w",[Level,fa]), + ok = check_logged(Level,"~w: ~w",[Level,fa],?MY_LOC(1)), + ?LOG_EMERGENCY("~w: ~w ~w",[Level,fa,meta],#{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],(?MY_LOC(1))#{my=>meta}), + ?LOG_EMERGENCY(fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end, + x, #{my=>meta}), + ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta], + (?MY_LOC(3))#{my=>meta}), + ?LOG_EMERGENCY(fun(x) -> #{Level=>fun_to_r,meta=>true} end, x, #{my=>meta}), + ok = check_logged(Level,#{Level=>fun_to_r,meta=>true}, + (?MY_LOC(2))#{my=>meta}), + ?LOG_EMERGENCY(fun(x) -> <<"fun_to_s">> end,x,#{}), + ok = check_logged(Level,<<"fun_to_s">>,?MY_LOC(1)), + F1=fun(x) -> {fun_to_bad} end, + ?LOG_EMERGENCY(F1,x,#{}), + ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp", + [{F1,x},{fun_to_bad}],#{}), + F2=fun(x) -> erlang:error(fun_that_crashes) end, + ?LOG_EMERGENCY(F2,x,#{}), + ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp", + [{F2,x},{error,fun_that_crashes}],#{}), + ok. + +%%%----------------------------------------------------------------- +%%% Called by macro ?TRY(X) +my_try(Fun) -> + try Fun() catch C:R -> {C,R} end. diff --git a/lib/kernel/test/logger_bench_SUITE.erl b/lib/kernel/test/logger_bench_SUITE.erl new file mode 100644 index 0000000000..d47122ea9d --- /dev/null +++ b/lib/kernel/test/logger_bench_SUITE.erl @@ -0,0 +1,500 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_bench_SUITE). + +-compile(export_all). + +%%%----------------------------------------------------------------- +%%% To include lager tests, add paths to lager and goldrush +%%% (goldrush is a dependency inside the lager repo) +%%% +%%% To print data to .csv files, add the following to a config file: +%%% {print_csv,[{console_handler,[{path,"/some/dir/"}]}]}. +%%% +%%%----------------------------------------------------------------- + +-include_lib("common_test/include/ct.hrl"). +-include_lib("common_test/include/ct_event.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). + +-define(msg,lists:flatten(string:pad("Log from "++atom_to_list(?FUNCTION_NAME)++ + ":"++integer_to_list(?LINE), + 80,trailing,$*))). +-define(meta,#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY}, + pid=>self()}). + +-define(NO_COMPARE,[profile]). + +-define(TIMES,100000). + +suite() -> + [{timetrap,{seconds,120}}]. + +init_per_suite(Config) -> + DataDir = ?config(data_dir,Config), + have_lager() andalso make(DataDir), + Config. + +end_per_suite(_Config) -> + ok. + +init_per_group(Group, Config) -> + H = remove_all_handlers(), + do_init_per_group(Group), + [{handlers,H}|Config]. + +do_init_per_group(minimal_handler) -> + ok = logger:add_handler(?MODULE,?MODULE,#{level=>error,filter_default=>log}); +do_init_per_group(console_handler) -> + ok = logger:add_handler(?MODULE,logger_std_h, + #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS, + logger_std_h=>#{type=>standard_io, + toggle_sync_qlen => ?TIMES+1, + drop_new_reqs_qlen => ?TIMES+2, + flush_reqs_qlen => ?TIMES+3, + enable_burst_limit => false}}), + have_lager() andalso lager_helper:start(), + ok. + +end_per_group(Group, Config) -> + case ?config(saved_config,Config) of + {_,[{bench,Bench}]} -> + print_compare_chart(Group,Bench); + _ -> + ok + end, + add_all_handlers(?config(handlers,Config)), + do_end_per_group(Group). + +do_end_per_group(minimal_handler) -> + ok = logger:remove_handler(?MODULE); +do_end_per_group(console_handler) -> + ok = logger:remove_handler(?MODULE), + have_lager() andalso lager_helper:stop(), + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + wait_for_handlers(), + ok. + +wait_for_handlers() -> + wait_for_handler(?MODULE), + wait_for_handler(lager_event). + +wait_for_handler(Handler) -> + case whereis(Handler) of + undefined -> + io:format("~p: noproc1",[Handler]), + ok; + Pid -> + case process_info(Pid,message_queue_len) of + {_,0} -> + io:format("~p: queue=~p",[Handler,0]), + ok; + {_,Q} -> + io:format("~p: queue=~p",[Handler,Q]), + timer:sleep(2000), + wait_for_handler(Handler); + undefined -> + io:format("~p: noproc2",[Handler]), + ok + end + end. + +groups() -> + [{minimal_handler,[],[log, + log_drop, + log_drop_by_handler, + macro, + macro_drop, + macro_drop_by_handler, + error_logger, + error_logger_drop, + error_logger_drop_by_handler + ]}, + {console_handler,[],[%profile, + log, + log_drop, + log_drop_by_handler, + %% log_handler_complete, + macro, + macro_drop, + macro_drop_by_handler, + %% macro_handler_complete, + error_logger, + error_logger_drop, + error_logger_drop_by_handler%% , + %% error_logger_handler_complete + ] ++ lager_cases()} + ]. + +lager_cases() -> + case have_lager() of + true -> + [lager_log, + lager_log_drop, + lager_log_drop_by_handler, + %% lager_log_handler_complete, + lager_parsetrans, + lager_parsetrans_drop, + lager_parsetrans_drop_by_handler%% , + %% lager_parsetrans_handler_complete + ]; + false -> + [] + end. + + +all() -> + [{group,minimal_handler}, + {group,console_handler} + ]. + +log(Config) -> + Times = ?TIMES, + run_benchmark(Config,?FUNCTION_NAME,fun do_log_func/2, [error,?msg], Times). + +log_drop(Config) -> + Times = ?TIMES*100, + ok = logger:set_logger_config(level,error), + run_benchmark(Config,?FUNCTION_NAME,fun do_log_func/2, [info,?msg], Times). + +log_drop(cleanup,_Config) -> + ok = logger:set_logger_config(level,info). + +log_drop_by_handler(Config) -> + Times = ?TIMES, + %% just ensure correct levels + ok = logger:set_logger_config(level,info), + ok = logger:set_handler_config(?MODULE,level,error), + run_benchmark(Config,?FUNCTION_NAME,fun do_log_func/2, [info,?msg], Times). + +log_handler_complete(Config) -> + ok = logger:set_handler_config(?MODULE,formatter, + {?MODULE,?DEFAULT_FORMAT_CONFIG}), + handler_complete(Config, ?FUNCTION_NAME, fun do_log_func/2, [error,?msg]). + +log_handler_complete(cleanup,_Config) -> + ok=logger:set_handler_config(?MODULE,formatter, + {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}). + +macro(Config) -> + Times = ?TIMES, + run_benchmark(Config,?FUNCTION_NAME,fun do_log_macro/2,[error,?msg], Times). + +macro_drop(Config) -> + Times = ?TIMES*100, + ok = logger:set_logger_config(level,error), + run_benchmark(Config,?FUNCTION_NAME,fun do_log_macro/2,[info,?msg], Times). + +macro_drop(cleanup,_Config) -> + ok = logger:set_logger_config(level,info). + +macro_drop_by_handler(Config) -> + Times = ?TIMES, + %% just ensure correct levels + ok = logger:set_logger_config(level,info), + ok = logger:set_handler_config(?MODULE,level,error), + run_benchmark(Config,?FUNCTION_NAME,fun do_log_macro/2, [info,?msg], Times). + +macro_handler_complete(Config) -> + ok = logger:set_handler_config(?MODULE,formatter, + {?MODULE,?DEFAULT_FORMAT_CONFIG}), + handler_complete(Config, ?FUNCTION_NAME, fun do_log_macro/2, [error,?msg]). + +macro_handler_complete(cleanup,_Config) -> + ok=logger:set_handler_config(?MODULE,formatter, + {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}). + +error_logger(Config) -> + Times = ?TIMES, + run_benchmark(Config,?FUNCTION_NAME,fun do_error_logger/2, [error,?msg], Times). + +error_logger_drop(Config) -> + Times = ?TIMES*100, + ok = logger:set_logger_config(level,error), + run_benchmark(Config,?FUNCTION_NAME,fun do_error_logger/2, [info,?msg], Times). + +error_logger_drop(cleanup,_Config) -> + ok = logger:set_logger_config(level,info). + +error_logger_drop_by_handler(Config) -> + Times = ?TIMES, + %% just ensure correct levels + ok = logger:set_logger_config(level,info), + ok = logger:set_handler_config(?MODULE,level,error), + run_benchmark(Config,?FUNCTION_NAME,fun do_log_func/2, [info,?msg], Times). + +error_logger_handler_complete(Config) -> + ok = logger:set_handler_config(?MODULE,formatter, + {?MODULE,?DEFAULT_FORMAT_CONFIG}), + handler_complete(Config, ?FUNCTION_NAME, fun do_error_logger/2, [error,?msg]). + +error_logger_handler_complete(cleanup,_Config) -> + ok=logger:set_handler_config(?MODULE,formatter, + {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}). + +lager_log(Config) -> + Times = ?TIMES, + run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_func/2, [error,?msg], Times). + +lager_log_drop(Config) -> + Times = ?TIMES*100, + run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_func/2, [info,?msg], Times). + +lager_log_drop_by_handler(Config) -> + %% This concept does not exist, so doing same as lager_log_drop/1 + Times = ?TIMES, + run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_func/2, [info,?msg], Times). + +lager_log_handler_complete(Config) -> + handler_complete(Config, ?FUNCTION_NAME, fun lager_helper:do_func/2, [error,?msg]). + +lager_parsetrans(Config) -> + Times = ?TIMES, + run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_parsetrans/2, [error,?msg], Times). + +lager_parsetrans_drop(Config) -> + Times = ?TIMES*100, + run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_parsetrans/2, [info,?msg], Times). + +lager_parsetrans_drop_by_handler(Config) -> + %% This concept does not exist, so doing same as lager_parsetrans_drop/1 + Times = ?TIMES, + run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_parsetrans/2, [info,?msg], Times). + +lager_parsetrans_handler_complete(Config) -> + handler_complete(Config, ?FUNCTION_NAME, fun lager_helper:do_parsetrans/2, [error,?msg]). + + +profile(Config) -> + Times = ?TIMES, + %% fprof:apply(fun repeated_apply/3,[fun lager_helper:do_func/2,[error,?msg],Times]), + fprof:apply(fun repeated_apply/3,[fun do_log_func/2,[error,?msg],Times]), + ok = fprof:profile(), + ok = fprof:analyse(dest,"../fprof.analyse"), + ok. + +%%%----------------------------------------------------------------- +%%% Internal +%% Handler +log(_Log,_Config) -> + ok. + +format(Log=#{meta:=#{pid:=Pid}},Config) when is_pid(Pid) -> + String = ?DEFAULT_FORMATTER:format(Log,Config), + Pid ! done, + String; +format(Log=#{meta:=#{pid:=PidStr}},Config) when is_list(PidStr) -> + String = ?DEFAULT_FORMATTER:format(Log,Config), + list_to_pid(PidStr) ! done, + String. + +handler_complete(Config, TC, Fun, Args) -> + Times = ?TIMES, + Start = os:perf_counter(microsecond), + repeated_apply(Fun, Args, Times), + MSecs = wait_for_done(Start,Times), + calc_and_report(Config,TC,MSecs,Times). + +wait_for_done(Start,0) -> + os:perf_counter(microsecond) - Start; +wait_for_done(Start,N) -> + receive + done -> + wait_for_done(Start,N-1) + after 20000 -> + ct:fail("missing " ++ integer_to_list(N) ++ " replys") + end. + +%%%----------------------------------------------------------------- +%%% Benchmark stuff +run_benchmark(Config,Tag,Fun,Args,Times) -> + _ = erlang:apply(Fun, Args), % apply once to ensure level is cached + MSecs = measure_repeated_op(Fun, Args, Times), + %% fprof:profile(), + %% fprof:analyse(dest,"../"++atom_to_list(Tag)++".prof"), + calc_and_report(Config,Tag,MSecs,Times). + +measure_repeated_op(Fun, Args, Times) -> + Start = os:perf_counter(microsecond), + %% fprof:apply(fun repeated_apply/3, [Fun, Args, Times]), + repeated_apply(Fun, Args, Times), + os:perf_counter(microsecond) - Start. + +repeated_apply(_F, _Args, Times) when Times =< 0 -> + ok; +repeated_apply(F, Args, Times) -> + erlang:apply(F, Args), + repeated_apply(F, Args, Times - 1). + +calc_and_report(Config,Tag,MSecs,Times) -> + IOPS = trunc(Times * (1000000 / MSecs)), + ct_event:notify(#event{ name = benchmark_data, data = [{value,IOPS}] }), + ct:print("~p:~n~p IOPS, ~p us", [Tag, IOPS, MSecs]), + ct:comment("~p IOPS, ~p us", [IOPS, MSecs]), + Bench = case ?config(saved_config,Config) of + {_,[{bench,B}]} -> B; + undefined -> [] + end, + {save_config,[{bench,[{Tag,IOPS,MSecs}|Bench]}]}. + +remove_all_handlers() -> + #{handlers:=Hs} = logger:i(), + [logger:remove_handler(Id) || {Id,_,_} <- Hs], + Hs. + +add_all_handlers(Hs) -> + [logger:add_handler(Id,Mod,Config) || {Id,Mod,Config} <- Hs], + ok. + +%%%----------------------------------------------------------------- +%%% Call logger in different ways +do_log_func(Level,Msg) -> + logger:Level(Msg,[],?meta). + +do_log_macro(error,Msg) -> + ?LOG_ERROR(Msg,[]); +do_log_macro(info,Msg) -> + ?LOG_INFO(Msg,[]); +do_log_macro(debug,Msg) -> + ?LOG_DEBUG(Msg,[]). + +do_error_logger(error,Msg) -> + error_logger:error_msg(Msg,[]); +do_error_logger(info,Msg) -> + error_logger:info_msg(Msg,[]). + +%%%----------------------------------------------------------------- +%%% +print_compare_chart(Group,Bench) -> + io:format("~-20s~12s~12s~12s~12s", + ["Microseconds:","Log","Drop","HDrop","Complete"]), + io:format(user,"~-20s~12s~12s~12s~12s~n", + ["Microseconds:","Log","Drop","HDrop","Complete"]), + {Log,Drop,HDrop,Comp} = sort_bench(Bench,[],[],[],[]), + print_compare_chart(Log,Drop,HDrop,Comp), + io:format(user,"~n",[]), + maybe_print_csv_files(Group, + [{log,Log},{drop,Drop},{hdrop,HDrop},{comp,Comp}]). + +print_compare_chart([{What,LIOPS,LMSecs}|Log], + [{What,DIOPS,DMSecs}|Drop], + [{What,HIOPS,HMSecs}|HDrop], + [{What,CIOPS,CMSecs}|Comp]) -> + io:format("~-20w~12w~12w~12w~12w",[What,LMSecs,DMSecs,HMSecs,CMSecs]), + io:format(user,"~-20w~12w~12w~12w~12w~n",[What,LMSecs,DMSecs,HMSecs,CMSecs]), + print_compare_chart(Log,Drop,HDrop,Comp); +print_compare_chart([{What,LIOPS,LMSecs}|Log], + [{What,DIOPS,DMSecs}|Drop], + [{What,HIOPS,HMSecs}|HDrop], + []=Comp) -> + io:format("~-20w~12w~12w~12w",[What,LMSecs,DMSecs,HMSecs]), + io:format(user,"~-20w~12w~12w~12w~n",[What,LMSecs,DMSecs,HMSecs]), + print_compare_chart(Log,Drop,HDrop,Comp); +print_compare_chart([],[],[],[]) -> + ok; +print_compare_chart(Log,Drop,HDrop,Comp) -> + ct:fail({Log,Drop,HDrop,Comp}). + +sort_bench([{TC,IOPS,MSecs}|Bench],Log,Drop,HDrop,Comp) -> + case lists:member(TC,?NO_COMPARE) of + true -> + sort_bench(Bench,Log,Drop,HDrop,Comp); + false -> + TCStr = atom_to_list(TC), + {What,Type} = + case re:run(TCStr,"(.*)_(drop.*)", + [{capture,all_but_first,list}]) of + {match,[WhatStr,TypeStr]} -> + {list_to_atom(WhatStr),list_to_atom(TypeStr)}; + nomatch -> + case re:run(TCStr,"(.*)_(handler_complete.*)", + [{capture,all_but_first,list}]) of + {match,[WhatStr,TypeStr]} -> + {list_to_atom(WhatStr),list_to_atom(TypeStr)}; + nomatch -> + {TC,log} + end + end, + case Type of + log -> + sort_bench(Bench,[{What,IOPS,MSecs}|Log],Drop,HDrop,Comp); + drop -> + sort_bench(Bench,Log,[{What,IOPS,MSecs}|Drop],HDrop,Comp); + drop_by_handler -> + sort_bench(Bench,Log,Drop,[{What,IOPS,MSecs}|HDrop],Comp); + handler_complete -> + sort_bench(Bench,Log,Drop,HDrop,[{What,IOPS,MSecs}|Comp]) + end + end; +sort_bench([],Log,Drop,HDrop,Comp) -> + {lists:keysort(1,Log), + lists:keysort(1,Drop), + lists:keysort(1,HDrop), + lists:keysort(1,Comp)}. + +maybe_print_csv_files(Group,Data) -> + case ct:get_config({print_csv,Group}) of + undefined -> + ok; + Cfg -> + Path = proplists:get_value(path,Cfg,".."), + Files = [begin + File = filename:join(Path,F)++".csv", + case filelib:is_regular(File) of + true -> + {ok,Fd} = file:open(File,[append]), + Fd; + false -> + {ok,Fd} = file:open(File,[write]), + ok = file:write(Fd, + "error_logger,lager_log," + "lager_parsetrans,logger_log," + "logger_macro\n"), + Fd + end + end || {F,_} <- Data], + [print_csv_file(F,D) || {F,D} <- lists:zip(Files,Data)], + [file:close(Fd) || Fd <- Files], + ok + end. + +print_csv_file(Fd,{_,Data}) -> + AllIOPS = [integer_to_list(IOPS) || {_,IOPS,_} <- Data], + ok = file:write(Fd,lists:join(",",AllIOPS)++"\n"). + +have_lager() -> + code:ensure_loaded(lager) == {module,lager}. + +make(Dir) -> + {ok,Cwd} = file:get_cwd(), + ok = file:set_cwd(Dir), + up_to_date = make:all([load]), + ok = file:set_cwd(Cwd), + code:add_path(Dir). diff --git a/lib/kernel/test/logger_bench_SUITE_data/Emakefile b/lib/kernel/test/logger_bench_SUITE_data/Emakefile new file mode 100644 index 0000000000..85c82bdaab --- /dev/null +++ b/lib/kernel/test/logger_bench_SUITE_data/Emakefile @@ -0,0 +1 @@ +{['lager_helper'],[{outdir,"."},debug_info,{i,"/home/uabshan/Work/git/otp/lib/kernel/src"},{i,"/home/uabshan/Work/git/otp/lib/kernel/include"}]}. diff --git a/lib/kernel/test/logger_bench_SUITE_data/lager_helper.erl b/lib/kernel/test/logger_bench_SUITE_data/lager_helper.erl new file mode 100644 index 0000000000..296ced4276 --- /dev/null +++ b/lib/kernel/test/logger_bench_SUITE_data/lager_helper.erl @@ -0,0 +1,73 @@ +-module(lager_helper). + +-compile(export_all). +-compile({parse_transform,lager_transform}). + +-include_lib("kernel/src/logger_internal.hrl"). + +start() -> + application:load(lager), + application:set_env(lager, error_logger_redirect, false), + application:set_env(lager, async_threshold, 100010), + application:set_env(lager, async_threshold_window, 100), + application:set_env(lager,handlers,[{?MODULE,[{level,error}]}]), + lager:start(). + +stop() -> + application:stop(lager). + +do_func(Level,Msg) -> + lager:log(Level,[{pid,self()}],Msg,[]). + +do_parsetrans(error,Msg) -> + lager:error(Msg,[]); +do_parsetrans(info,Msg) -> + lager:info(Msg,[]). + +%%%----------------------------------------------------------------- +%%% Dummy handler for lager +-record(state, {level :: {'mask', integer()}, + formatter :: atom(), + format_config :: any()}). +init(Opts) -> + Level = proplists:get_value(level,Opts,info), + Formatter = proplists:get_value(formatter,Opts,logger_bench_SUITE), + FormatConfig = proplists:get_value(format_config,Opts,?DEFAULT_FORMAT_CONFIG), + {ok,#state{level=lager_util:config_to_mask(Level), + formatter=Formatter, + format_config=FormatConfig}}. + +handle_call(get_loglevel, #state{level=Level} = State) -> + {ok, Level, State}; +handle_call({set_loglevel, Level}, State) -> + try lager_util:config_to_mask(Level) of + Levels -> + {ok, ok, State#state{level=Levels}} + catch + _:_ -> + {ok, {error, bad_log_level}, State} + end; +handle_call(_Request, State) -> + {ok, ok, State}. + +handle_event({log, Message}, + #state{level=L,formatter=Formatter,format_config=FormatConfig} = State) -> + case lager_util:is_loggable(Message, L, ?MODULE) of + true -> + Metadata = + case maps:from_list(lager_msg:metadata(Message)) of + Meta = #{pid:=Pid} when is_pid(Pid) -> + Meta; + Meta = #{pid:=PidStr} when is_list(PidStr) -> + Meta + end, + Log = #{level=>lager_msg:severity(Message), + msg=>{report,lager_msg:message(Message)}, + meta=>Metadata}, + io:put_chars(user, Formatter:format(Log,FormatConfig)), + {ok, State}; + false -> + {ok, State} + end; +handle_event(_Event, State) -> + {ok, State}. diff --git a/lib/kernel/test/logger_disk_log_h_SUITE.erl b/lib/kernel/test/logger_disk_log_h_SUITE.erl new file mode 100644 index 0000000000..63e5b56021 --- /dev/null +++ b/lib/kernel/test/logger_disk_log_h_SUITE.erl @@ -0,0 +1,1414 @@ +-module(logger_disk_log_h_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). +-include_lib("kernel/src/logger_h_common.hrl"). +-include_lib("stdlib/include/ms_transform.hrl"). +-include_lib("kernel/include/file.hrl"). + +-define(check_no_log, [] = test_server:messages_get()). + +-define(check(Expected), + receive {log,Expected} -> + [] = test_server:messages_get() + after 1000 -> + ct:fail({report_not_received, + {line,?LINE}, + {got,test_server:messages_get()}}) + end). + +-define(msg,"Log from "++atom_to_list(?FUNCTION_NAME)++ + ":"++integer_to_list(?LINE)). +-define(bin(Msg), list_to_binary(Msg++"\n")). +-define(log_no(File,N), lists:concat([File,".",N])). +-define(domain,#{domain=>[?MODULE]}). + +-define(SYNC_REP_INT, if is_atom(?FILESYNC_REPEAT_INTERVAL) -> 5500; + true -> ?FILESYNC_REPEAT_INTERVAL + 500 + end). + +suite() -> + [{timetrap,{seconds,30}}]. + +init_per_suite(Config) -> + timer:start(), % to avoid progress report + Config. + +end_per_suite(_Config) -> + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(TestHooksCase, Config) when + TestHooksCase == write_failure; + TestHooksCase == sync_failure -> + if ?TEST_HOOKS_TAB == undefined -> + {skip,"Define the TEST_HOOKS macro to run this test"}; + true -> + ct:print("********** ~w **********", [TestHooksCase]), + Config + end; +init_per_testcase(TestCase, Config) -> + ct:print("********** ~w **********", [TestCase]), + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [start_stop_handler, + create_log, + open_existing_log, + disk_log_opts, + default_formatter, + logging, + errors, + formatter_fail, + config_fail, + bad_input, + info_and_reset, + reconfig, + disk_log_sync, + disk_log_full, + disk_log_wrap, + disk_log_events, + write_failure, + sync_failure, + op_switch_to_sync, + op_switch_to_drop, + op_switch_to_flush, + limit_burst_disabled, + limit_burst_enabled_one, + limit_burst_enabled_period, + kill_disabled, + qlen_kill_new, + %% qlen_kill_std, + mem_kill_new, + %% mem_kill_std, + restart_after, + handler_requests_under_load + ]. + +start_stop_handler(_Config) -> + ok = logger:add_handler(?MODULE, logger_disk_log_h, #{}), + {error,{already_exist,?MODULE}} = + logger:add_handler(?MODULE, logger_disk_log_h, #{}), + true = is_pid(whereis(?MODULE)), + ok = logger:remove_handler(?MODULE), + timer:sleep(500), + undefined = whereis(?MODULE). +start_stop_handler(cleanup, _Config) -> + logger:remove_handler(?MODULE). + +create_log(Config) -> + PrivDir = ?config(priv_dir,Config), + %% test new handler + Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_A"])), + LogFile1 = filename:join(PrivDir, Name1), + ok = start_and_add(Name1, #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}, + #{file=>LogFile1}), + logger:info("hello", ?domain), + logger_disk_log_h:disk_log_sync(Name1), + ct:pal("Checking contents of ~p", [?log_no(LogFile1,1)]), + try_read_file(?log_no(LogFile1,1), {ok,<<"hello\n">>}, 5000), + + %% test second handler + Name2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_B"])), + DLName = lists:concat([?FUNCTION_NAME,"_B_log"]), + LogFile2 = filename:join(PrivDir, DLName), + ok = start_and_add(Name2, #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}, + #{file=>LogFile2}), + logger:info("dummy", ?domain), + logger_disk_log_h:disk_log_sync(Name2), + ct:pal("Checking contents of ~p", [?log_no(LogFile2,1)]), + try_read_file(?log_no(LogFile2,1), {ok,<<"dummy\n">>}, 5000), + + remove_and_stop(Name1), + remove_and_stop(Name2), + try_read_file(?log_no(LogFile1,1), {ok,<<"hello\ndummy\n">>}, 1), + try_read_file(?log_no(LogFile2,1), {ok,<<"dummy\n">>}, 5000), + ok. + +open_existing_log(Config) -> + PrivDir = ?config(priv_dir,Config), + %% test new handler + HName = ?FUNCTION_NAME, + DLName = lists:concat([?FUNCTION_NAME,"_log"]), + LogFile1 = filename:join(PrivDir, DLName), + ok = start_and_add(HName, #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}, + #{file=>LogFile1}), + logger:info("one", ?domain), + logger_disk_log_h:disk_log_sync(HName), + ct:pal("Checking contents of ~p", [?log_no(LogFile1,1)]), + try_read_file(?log_no(LogFile1,1), {ok,<<"one\n">>}, 5000), + logger:info("two", ?domain), + ok = remove_and_stop(HName), + try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\n">>}, 5000), + + logger:info("two and a half", ?domain), + + ok = start_and_add(HName, #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}, + #{file=>LogFile1}), + logger:info("three", ?domain), + logger_disk_log_h:disk_log_sync(HName), + try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\nthree\n">>}, 5000), + remove_and_stop(HName), + try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\nthree\n">>}, 5000). + +disk_log_opts(Config) -> + Get = fun(Key, PL) -> proplists:get_value(Key, PL) end, + PrivDir = ?config(priv_dir,Config), + WName = list_to_atom(lists:concat([?FUNCTION_NAME,"_W"])), + WFile = lists:concat([?FUNCTION_NAME,"_W_log"]), + Size = length("12345"), + ConfigW = #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter => {?MODULE,no_nl}}, + WFileFull = filename:join(PrivDir, WFile), + DLOptsW = #{file => WFileFull, + type => wrap, + max_no_bytes => Size, + max_no_files => 2}, + ok = start_and_add(WName, ConfigW, DLOptsW), + WInfo1 = disk_log:info(WName), + ct:log("Fullname = ~s", [WFileFull]), + {WFileFull,wrap,{Size,2},1} = {Get(file,WInfo1),Get(type,WInfo1), + Get(size,WInfo1),Get(current_file,WInfo1)}, + logger:info("123", ?domain), + logger_disk_log_h:disk_log_sync(WName), + timer:sleep(500), + 1 = Get(current_file, disk_log:info(WName)), + + logger:info("45", ?domain), + logger_disk_log_h:disk_log_sync(WName), + timer:sleep(500), + 1 = Get(current_file, disk_log:info(WName)), + + logger:info("6", ?domain), + logger_disk_log_h:disk_log_sync(WName), + timer:sleep(500), + 2 = Get(current_file, disk_log:info(WName)), + + logger:info("7890", ?domain), + logger_disk_log_h:disk_log_sync(WName), + timer:sleep(500), + 2 = Get(current_file, disk_log:info(WName)), + + HName1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_H1"])), + HFile1 = lists:concat([?FUNCTION_NAME,"_H1_log"]), + ConfigH = #{filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter => {?MODULE,no_nl}}, + HFile1Full = filename:join(PrivDir, HFile1), + DLOptsH1 = #{file => HFile1Full, + type => halt}, + ok = start_and_add(HName1, ConfigH, DLOptsH1), + HInfo1 = disk_log:info(HName1), + ct:log("Fullname = ~s", [HFile1Full]), + {HFile1Full,halt,infinity} = {Get(file,HInfo1),Get(type,HInfo1), + Get(size,HInfo1)}, + logger:info("12345", ?domain), + logger_disk_log_h:disk_log_sync(HName1), + timer:sleep(500), + 1 = Get(no_written_items, disk_log:info(HName1)), + + HName2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_H2"])), + HFile2 = lists:concat([?FUNCTION_NAME,"_H2_log"]), + HFile2Full = filename:join(PrivDir, HFile2), + DLOptsH2 = DLOptsH1#{file => HFile2Full, + max_no_bytes => 1000}, + ok = start_and_add(HName2, ConfigH, DLOptsH2), + HInfo3 = disk_log:info(HName2), + ct:log("Fullname = ~s", [HFile2Full]), + {HFile2Full,halt,1000} = {Get(file,HInfo3),Get(type,HInfo3), + Get(size,HInfo3)}, + + remove_and_stop(WName), + remove_and_stop(HName1), + remove_and_stop(HName2), + ok. + +default_formatter(Config) -> + PrivDir = ?config(priv_dir,Config), + LogFile = filename:join(PrivDir,atom_to_list(?FUNCTION_NAME)), + HConfig = #{disk_log_opts => #{file=>LogFile}, + filter_default=>log}, + ct:pal("Log: ~p", [LogFile]), + ok = logger:add_handler(?MODULE, logger_disk_log_h, HConfig), + ok = logger:set_handler_config(?MODULE,formatter, + {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}), + LogName = lists:concat([LogFile, ".1"]), + logger:info("dummy"), + wait_until_written(LogName), + {ok,Bin} = file:read_file(LogName), + match = re:run(Bin, "=INFO REPORT====.*\ndummy", [{capture,none}]), + ok. +default_formatter(cleanup, _Config) -> + logger:remove_handler(?MODULE). + +logging(Config) -> + PrivDir = ?config(priv_dir,Config), + %% test new handler + Name = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])), + LogFile = filename:join(PrivDir, Name), + ok = start_and_add(Name, #{filter_default=>log, + formatter=>{?MODULE,self()}}, + #{file => LogFile}), + MsgFormatter = fun(Term) -> {io_lib:format("Term:~p",[Term]),[]} end, + logger:info([{x,y}], #{report_cb => MsgFormatter}), + logger:info([{x,y}], #{}), + ct:pal("Checking contents of ~p", [?log_no(LogFile,1)]), + try_read_file(?log_no(LogFile,1), {ok,<<"Term:[{x,y}]\n x: y\n">>}, 5000). + +logging(cleanup, _Config) -> + Name = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])), + remove_and_stop(Name). + +errors(Config) -> + PrivDir = ?config(priv_dir,Config), + Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])), + LogFile1 = filename:join(PrivDir,Name1), + HConfig = #{disk_log_opts=>#{file=>LogFile1}, + filter_default=>log, + formatter=>{?MODULE,self()}}, + ok = logger:add_handler(Name1, logger_disk_log_h, HConfig), + {error,{already_exist,Name1}} = + logger:add_handler(Name1, logger_disk_log_h, #{}), + + %%! TODO: + %%! Check how bad log_opts are handled! + + {error,{illegal_config_change,_,_}} = + logger:set_handler_config(Name1, + disk_log_opts, + #{file=>LogFile1, + type=>halt}), + {error,{illegal_config_change,_,_}} = + logger:set_handler_config(Name1,id,new), + + ok = logger:remove_handler(Name1), + {error,{not_found,Name1}} = logger:remove_handler(Name1), + ok. + +errors(cleanup, _Config) -> + Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])), + _ = logger:remove_handler(Name1). + +formatter_fail(Config) -> + PrivDir = ?config(priv_dir,Config), + Name = ?FUNCTION_NAME, + LogFile = filename:join(PrivDir,Name), + ct:pal("Log = ~p", [LogFile]), + HConfig = #{disk_log_opts => #{file=>LogFile}, + filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE])}, + %% no formatter! + logger:add_handler(Name, logger_disk_log_h, HConfig), + Pid = whereis(Name), + true = is_pid(Pid), + {ok,#{handlers:=H}} = logger:get_logger_config(), + true = lists:member(Name,H), + + %% Formatter is added automatically + {ok,{_,#{formatter:={logger_formatter,_}}}} = + logger:get_handler_config(Name), + logger:info(M1=?msg,?domain), + Got1 = try_match_file(?log_no(LogFile,1),"=INFO REPORT====.*\n"++M1,5000), + + ok = logger:set_handler_config(Name,formatter,{nonexistingmodule,#{}}), + logger:info(M2=?msg,?domain), + Got2 = try_match_file(?log_no(LogFile,1), + Got1++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M2, + 5000), + + ok = logger:set_handler_config(Name,formatter,{?MODULE,crash}), + logger:info(M3=?msg,?domain), + Got3 = try_match_file(?log_no(LogFile,1), + Got2++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M3, + 5000), + + ok = logger:set_handler_config(Name,formatter,{?MODULE,bad_return}), + logger:info(?msg,?domain), + try_match_file(?log_no(LogFile,1), + Got3++"FORMATTER ERROR: bad_return_value", + 5000), + + %% Check that handler is still alive and was never dead + Pid = whereis(Name), + {ok,#{handlers:=H}} = logger:get_logger_config(), + ok. + +formatter_fail(cleanup,_Config) -> + _ = logger:remove_handler(?FUNCTION_NAME), + ok. + +config_fail(_Config) -> + {error,{handler_not_added,{invalid_config,logger_disk_log_h,{bad,bad}}}} = + logger:add_handler(?MODULE,logger_disk_log_h, + #{logger_disk_log_h => #{bad => bad}, + filter_default=>log, + formatter=>{?MODULE,self()}}), + {error,{handler_not_added,{invalid_levels,{42,42,_}}}} = + logger:add_handler(?MODULE,logger_disk_log_h, + #{logger_disk_log_h => #{toggle_sync_qlen=>42, + drop_new_reqs_qlen=>42}}), + + ok = logger:add_handler(?MODULE,logger_disk_log_h, + #{filter_default=>log, + formatter=>{?MODULE,self()}}), + %% can't change the disk log options for a log already in use + {error,{illegal_config_change,_,_}} = + logger:set_handler_config(?MODULE,disk_log_opts, + #{max_no_files=>2}), + %% can't change name of an existing handler + {error,{illegal_config_change,_,_}} = + logger:set_handler_config(?MODULE,id,bad), + %% incorrect values of OP params + {error,{invalid_levels,_}} = + logger:set_handler_config(?MODULE,logger_disk_log_h, + #{toggle_sync_qlen=>100, + flush_reqs_qlen=>99}), + %% invalid name of config parameter + {error,{invalid_config,logger_disk_log_h,{filesync_rep_int,2000}}} = + logger:set_handler_config(?MODULE, logger_disk_log_h, + #{filesync_rep_int => 2000}), + ok. +config_fail(cleanup,_Config) -> + logger:remove_handler(?MODULE). + +bad_input(_Config) -> + {error,{badarg,{disk_log_sync,["BadType"]}}} = + logger_disk_log_h:disk_log_sync("BadType"), + {error,{badarg,{info,["BadType"]}}} = logger_disk_log_h:info("BadType"), + {error,{badarg,{reset,["BadType"]}}} = logger_disk_log_h:reset("BadType"). + +info_and_reset(_Config) -> + ok = logger:add_handler(?MODULE,logger_disk_log_h, + #{filter_default=>log, + formatter=>{?MODULE,self()}}), + #{id := ?MODULE} = logger_disk_log_h:info(?MODULE), + ok = logger_disk_log_h:reset(?MODULE). +info_and_reset(cleanup,_Config) -> + logger:remove_handler(?MODULE). + +reconfig(Config) -> + Dir = ?config(priv_dir,Config), + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + #{id := ?MODULE, + toggle_sync_qlen := ?TOGGLE_SYNC_QLEN, + drop_new_reqs_qlen := ?DROP_NEW_REQS_QLEN, + flush_reqs_qlen := ?FLUSH_REQS_QLEN, + enable_burst_limit := ?ENABLE_BURST_LIMIT, + burst_limit_size := ?BURST_LIMIT_SIZE, + burst_window_time := ?BURST_WINDOW_TIME, + enable_kill_overloaded := ?ENABLE_KILL_OVERLOADED, + handler_overloaded_qlen := ?HANDLER_OVERLOADED_QLEN, + handler_overloaded_mem := ?HANDLER_OVERLOADED_MEM, + handler_restart_after := ?HANDLER_RESTART_AFTER, + filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL, + log_opts := #{type := ?DISK_LOG_TYPE, + max_no_files := ?DISK_LOG_MAX_NO_FILES, + max_no_bytes := ?DISK_LOG_MAX_NO_BYTES, + file := _DiskLogFile}} = + logger_disk_log_h:info(?MODULE), + + ok = logger:set_handler_config(?MODULE, logger_disk_log_h, + #{toggle_sync_qlen => 1, + drop_new_reqs_qlen => 2, + flush_reqs_qlen => 3, + enable_burst_limit => false, + burst_limit_size => 10, + burst_window_time => 10, + enable_kill_overloaded => true, + handler_overloaded_qlen => 100000, + handler_overloaded_mem => 10000000, + handler_restart_after => never, + filesync_repeat_interval => no_repeat}), + #{id := ?MODULE, + toggle_sync_qlen := 1, + drop_new_reqs_qlen := 2, + flush_reqs_qlen := 3, + enable_burst_limit := false, + burst_limit_size := 10, + burst_window_time := 10, + enable_kill_overloaded := true, + handler_overloaded_qlen := 100000, + handler_overloaded_mem := 10000000, + handler_restart_after := never, + filesync_repeat_interval := no_repeat} = + logger_disk_log_h:info(?MODULE), + + ok = logger:remove_handler(?MODULE), + + File = filename:join(Dir, "logfile"), + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}, + disk_log_opts=> + #{type => halt, + max_no_files => 1, + max_no_bytes => 1024, + file => File}}), + #{log_opts := #{type := halt, + max_no_files := 1, + max_no_bytes := 1024, + file := File}} = + logger_disk_log_h:info(?MODULE), + ok. + +reconfig(cleanup, _Config) -> + logger:remove_handler(?MODULE). + +disk_log_sync(Config) -> + Dir = ?config(priv_dir,Config), + File = filename:join(Dir, ?FUNCTION_NAME), + Log = lists:concat([File,".1"]), + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{disk_log_opts => #{file => File}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,nl}}), + + start_tracer([{disk_log,blog,2}, + {disk_log,sync,1}], + [{disk_log,blog,<<"first\n">>}, + {disk_log,sync}]), + + logger:info("first", ?domain), + %% wait for automatic disk_log_sync + check_tracer(?FILESYNC_REPEAT_INTERVAL*2), + + start_tracer([{disk_log,blog,2}, + {disk_log,sync,1}], + [{disk_log,blog,<<"second\n">>}, + {disk_log,blog,<<"third\n">>}, + {disk_log,sync}]), + %% two log requests in fast succession will make the handler skip + %% an automatic disk log sync + logger:info("second", ?domain), + logger:info("third", ?domain), + %% do explicit disk_log_sync + logger_disk_log_h:disk_log_sync(?MODULE), + check_tracer(100), + + %% check that if there's no repeated disk_log_sync active, + %% a disk_log_sync is still performed when handler goes idle + logger:set_handler_config(?MODULE, logger_disk_log_h, + #{filesync_repeat_interval => no_repeat}), + no_repeat = maps:get(filesync_repeat_interval, + logger_disk_log_h:info(?MODULE)), + + start_tracer([{disk_log,blog,2}, + {disk_log,sync,1}], + [{disk_log,blog,<<"fourth\n">>}, + {disk_log,blog,<<"fifth\n">>}, + {disk_log,sync}]), + + logger:info("fourth", ?domain), + timer:sleep(?IDLE_DETECT_TIME_MSEC*2), + logger:info("fifth", ?domain), + %% wait for automatic disk_log_sync + check_tracer(?IDLE_DETECT_TIME_MSEC*2), + + try_read_file(Log, {ok,<<"first\nsecond\nthird\nfourth\nfifth\n">>}, 1000), + + %% switch repeated disk_log_sync on and verify that the looping works + SyncInt = 1000, + WaitT = 4500, + OneSync = {logger_disk_log_h,handle_cast,repeated_disk_log_sync}, + %% receive 1 initial repeated_disk_log_sync, then 1 per sec + start_tracer([{logger_disk_log_h,handle_cast,2}], + [OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]), + + logger:set_handler_config(?MODULE, logger_disk_log_h, + #{filesync_repeat_interval => SyncInt}), + SyncInt = maps:get(filesync_repeat_interval, + logger_disk_log_h:info(?MODULE)), + timer:sleep(WaitT), + logger:set_handler_config(?MODULE, logger_disk_log_h, + #{filesync_repeat_interval => no_repeat}), + check_tracer(100), + ok. +disk_log_sync(cleanup,_Config) -> + dbg:stop_clear(), + logger:remove_handler(?MODULE). + +disk_log_wrap(Config) -> + Get = fun(Key, PL) -> proplists:get_value(Key, PL) end, + Dir = ?config(priv_dir,Config), + File = filename:join(Dir, ?FUNCTION_NAME), + ct:pal("Log = ~p", [File]), + MaxFiles = 3, + MaxBytes = 5, + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}, + disk_log_opts=> + #{type => wrap, + max_no_files => MaxFiles, + max_no_bytes => MaxBytes, + file => File}}), + Info = disk_log:info(?MODULE), + {File,wrap,{MaxBytes,MaxFiles},1} = + {Get(file,Info),Get(type,Info),Get(size,Info),Get(current_file,Info)}, + Tester = self(), + TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) -> + Pid ! {trace,Mod,Func,Details}, + Pid + end, + {ok,_} = dbg:tracer(process, {TraceFun, Tester}), + {ok,_} = dbg:p(whereis(?MODULE), [c]), + {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []), + + Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,MaxBytes)], + ct:pal("String = ~p (~w)", [Text, erts_debug:size(Text)]), + %% fill first file + lists:foreach(fun(N) -> + Log = lists:concat([File,".",N]), + logger:info(Text, ?domain), + wait_until_written(Log), + ct:pal("N = ~w", + [N = Get(current_file, + disk_log:info(?MODULE))]) + end, lists:seq(1,MaxFiles)), + + %% wait for trace messages + timer:sleep(1000), + dbg:stop_clear(), + Received = lists:flatmap(fun({trace,_M,handle_info, + [{disk_log,_Node,_Name,What},_]}) -> + [{trace,What}]; + ({log,_}) -> + [] + end, test_server:messages_get()), + ct:pal("Trace =~n~p", [Received]), + Received = [{trace,{wrap,0}} || _ <- lists:seq(1,MaxFiles-1)], + ok. + +disk_log_wrap(cleanup,_Config) -> + dbg:stop_clear(), + logger:remove_handler(?MODULE). + +disk_log_full(Config) -> + Dir = ?config(priv_dir,Config), + File = filename:join(Dir, ?FUNCTION_NAME), + ct:pal("Log = ~p", [File]), + MaxBytes = 50, + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}, + disk_log_opts=> + #{type => halt, + max_no_files => 1, + max_no_bytes => MaxBytes, + file => File}}), + + Tester = self(), + TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) -> + Pid ! {trace,Mod,Func,Details}, + Pid + end, + {ok,_} = dbg:tracer(process, {TraceFun, Tester}), + {ok,_} = dbg:p(whereis(?MODULE), [c]), + {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []), + + NoOfChars = 5, + Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,NoOfChars)], + [logger:info(Text, ?domain) || _ <- lists:seq(1,trunc(MaxBytes/NoOfChars)+1)], + + %% wait for trace messages + timer:sleep(2000), + dbg:stop_clear(), + Received = lists:flatmap(fun({trace,_M,handle_info, + [{disk_log,_Node,_Name,What},_]}) -> + [{trace,What}]; + ({log,_}) -> + [] + end, test_server:messages_get()), + ct:pal("Trace =~n~p", [Received]), + [{trace,full}, + {trace,{error_status,{error,{full,_}}}}] = Received, + ok. +disk_log_full(cleanup, _Config) -> + dbg:stop_clear(), + logger:remove_handler(?MODULE). + +disk_log_events(Config) -> + Node = node(), + Log = ?MODULE, + ok = logger:add_handler(?MODULE, + logger_disk_log_h, + #{filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + + %% Events copied from disk_log API + Events = + [{disk_log, Node, Log, {wrap, 0}}, + {disk_log, Node, Log, {truncated, 0}}, + {disk_log, Node, Log, {read_only, 42}}, + {disk_log, Node, Log, {blocked_log, 42}}, + {disk_log, Node, Log, {format_external, 42}}, + {disk_log, Node, Log, full}, + {disk_log, Node, Log, {error_status, ok}}], + + Tester = self(), + TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) -> + Pid ! {trace,Mod,Func,Details}, + Pid + end, + {ok,_} = dbg:tracer(process, {TraceFun, Tester}), + {ok,_} = dbg:p(whereis(?MODULE), [c]), + {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []), + + [whereis(?MODULE) ! E || E <- Events], + %% wait for trace messages + timer:sleep(2000), + dbg:stop_clear(), + Received = lists:map(fun({trace,_M,handle_info, + [Got,_]}) -> Got + end, test_server:messages_get()), + ct:pal("Trace =~n~p", [Received]), + NoOfEvents = length(Events), + NoOfEvents = length(Received), + lists:foreach(fun(Event) -> + true = lists:member(Event, Received) + end, Received), + ok. +disk_log_events(cleanup, _Config) -> + dbg:stop_clear(), + logger:remove_handler(?MODULE). + +write_failure(Config) -> + Dir = ?config(priv_dir, Config), + File = filename:join(Dir, ?FUNCTION_NAME), + Log = lists:concat([File,".1"]), + ct:pal("Log = ~p", [Log]), + + Node = start_h_on_new_node(Config, ?FUNCTION_NAME, File), + false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), + rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), + rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), + rpc:call(Node, ?MODULE, set_result, [disk_log_blog,ok]), + HState = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]), + ct:pal("LogOpts = ~p", [LogOpts = maps:get(log_opts, HState)]), + + ok = log_on_remote_node(Node, "Logged1"), + rpc:call(Node, logger_disk_log_h, disk_log_sync, [?STANDARD_HANDLER]), + ?check_no_log, + try_read_file(Log, {ok,<<"Logged1\n">>}, ?SYNC_REP_INT), + + rpc:call(Node, ?MODULE, set_result, [disk_log_blog,{error,no_such_log}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + + ?check({error,{?STANDARD_HANDLER,log,LogOpts,{error,no_such_log}}}), + + ok = log_on_remote_node(Node, "No second error printout"), + ?check_no_log, + + rpc:call(Node, ?MODULE, set_result, [disk_log_blog, + {error,{full,?STANDARD_HANDLER}}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + ?check({error,{?STANDARD_HANDLER,log,LogOpts, + {error,{full,?STANDARD_HANDLER}}}}), + + rpc:call(Node, ?MODULE, set_result, [disk_log_blog,ok]), + ok = log_on_remote_node(Node, "Logged2"), + rpc:call(Node, logger_disk_log_h, disk_log_sync, [?STANDARD_HANDLER]), + ?check_no_log, + try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, ?SYNC_REP_INT), + ok. +write_failure(cleanup, _Config) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes]. + + +sync_failure(Config) -> + Dir = ?config(priv_dir, Config), + FileName = lists:concat([?MODULE,"_",?FUNCTION_NAME]), + File = filename:join(Dir, FileName), + + + Node = start_h_on_new_node(Config, ?FUNCTION_NAME, File), + false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), + rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), + rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), + rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]), + HState = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]), + LogOpts = maps:get(log_opts, HState), + + SyncInt = 500, + ok = rpc:call(Node, logger, set_handler_config, + [?STANDARD_HANDLER, logger_disk_log_h, + #{filesync_repeat_interval => SyncInt}]), + Info = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]), + SyncInt = maps:get(filesync_repeat_interval, Info), + + ok = log_on_remote_node(Node, "Logged1"), + ?check_no_log, + + rpc:call(Node, ?MODULE, set_result, [disk_log_sync,{error,no_such_log}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + + ?check({error,{?STANDARD_HANDLER,sync,LogOpts,{error,no_such_log}}}), + + ok = log_on_remote_node(Node, "No second error printout"), + ?check_no_log, + + rpc:call(Node, ?MODULE, set_result, + [disk_log_sync,{error,{blocked_log,?STANDARD_HANDLER}}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + ?check({error,{?STANDARD_HANDLER,sync,LogOpts, + {error,{blocked_log,?STANDARD_HANDLER}}}}), + + rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]), + ok = log_on_remote_node(Node, "Logged2"), + ?check_no_log, + ok. +sync_failure(cleanup, _Config) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes]. + +start_h_on_new_node(_Config, Func, File) -> + Pa = filename:dirname(code:which(?MODULE)), + Dest = + case os:type() of + {win32,_} -> + lists:concat([" {disk_log,\\\"",File,"\\\"}"]); + _ -> + lists:concat([" \'{disk_log,\"",File,"\"}\'"]) + end, + Args = lists:concat([" -kernel ",logger_dest,Dest," -pa ",Pa]), + NodeName = lists:concat([?MODULE,"_",Func]), + ct:pal("Starting ~s with ~tp", [NodeName,Args]), + {ok,Node} = test_server:start_node(NodeName, peer, [{args, Args}]), + Pid = rpc:call(Node,erlang,whereis,[?STANDARD_HANDLER]), + true = is_pid(Pid), + ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter, + {?MODULE,nl}]), + Node. + +log_on_remote_node(Node,Msg) -> + _ = spawn_link(Node, + fun() -> erlang:group_leader(whereis(user),self()), + logger:info(Msg) + end), + ok. + +%% functions for test hook macros to be called by rpc +set_internal_log(_Mod, _Func) -> + ?set_internal_log({_Mod,_Func}). +set_result(_Op, _Result) -> + ?set_result(_Op, _Result). +set_defaults() -> + ?set_defaults(). + +%% internal log function that sends the term to the test case process +internal_log(Type, Term) -> + [{tester,Tester}] = ets:lookup(?TEST_HOOKS_TAB, tester), + Tester ! {log,{Type,Term}}, + logger:internal_log(Type, Term), + ok. + + +%%%----------------------------------------------------------------- +%%% Overload protection tests + +op_switch_to_sync(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 3, + drop_new_reqs_qlen => 501, + flush_reqs_qlen => 2000, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 500, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + NumOfReqs = count_lines(Log), + ok = file:delete(Log). +op_switch_to_sync(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +op_switch_to_drop(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + + NewHConfig = + HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 2, + drop_new_reqs_qlen => 3, + flush_reqs_qlen => 600, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 500, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages dropped = ~w (~w)", + [NumOfReqs-Logged,NumOfReqs]), + true = (Logged < NumOfReqs), + ok = file:delete(Log). +op_switch_to_drop(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +op_switch_to_flush() -> + [{timetrap,{minutes,3}}]. +op_switch_to_flush(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + + %% it's important that both async and sync requests have been queued + %% when the flush happens (verify with coverage of flush_log_requests/2) + + NewHConfig = + HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 2, + drop_new_reqs_qlen => 99, + flush_reqs_qlen => 100, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 1000, + Procs = 500, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages flushed/dropped = ~w (~w)", + [(NumOfReqs*Procs)-Logged,NumOfReqs*Procs]), + true = (Logged < (NumOfReqs*Procs)), + ok = file:delete(Log). +op_switch_to_flush(cleanup, _Config) -> + ok = stop_handler(?MODULE). + + +limit_burst_disabled(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{logger_disk_log_h => DLHConfig#{enable_burst_limit => false, + burst_limit_size => 10, + burst_window_time => 2000, + drop_new_reqs_qlen => 200, + flush_reqs_qlen => 300}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + ok = file:delete(Log), + NumOfReqs = Logged. +limit_burst_disabled(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +limit_burst_enabled_one(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + ReqLimit = 10, + NewHConfig = + HConfig#{logger_disk_log_h => DLHConfig#{enable_burst_limit => true, + burst_limit_size => ReqLimit, + burst_window_time => 2000, + drop_new_reqs_qlen => 200, + flush_reqs_qlen => 300}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + ok = file:delete(Log), + ReqLimit = Logged. +limit_burst_enabled_one(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +limit_burst_enabled_period(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + ReqLimit = 10, + BurstTWin = 1000, + NewHConfig = + HConfig#{logger_disk_log_h => DLHConfig#{enable_burst_limit => true, + burst_limit_size => ReqLimit, + burst_window_time => BurstTWin, + drop_new_reqs_qlen => 20000, + flush_reqs_qlen => 20001}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + + Windows = 3, + Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w", + [Sent,Logged]), + ok = file:delete(Log), + true = (Logged > (ReqLimit*Windows)) andalso + (Logged < (ReqLimit*(Windows+2))). +limit_burst_enabled_period(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +kill_disabled(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{logger_disk_log_h=>DLHConfig#{enable_kill_overloaded=>false, + handler_overloaded_qlen=>10, + handler_overloaded_mem=>100}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + ok = file:delete(Log), + true = is_pid(whereis(?MODULE)), + ok. +kill_disabled(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +qlen_kill_new(Config) -> + {_Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + Pid0 = whereis(?MODULE), + {_,Mem0} = process_info(Pid0, memory), + RestartAfter = 2000, + NewHConfig = + HConfig#{logger_disk_log_h => + DLHConfig#{enable_kill_overloaded=>true, + handler_overloaded_qlen=>10, + handler_overloaded_mem=>Mem0+50000, + handler_restart_after=>RestartAfter}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + MRef = erlang:monitor(process, Pid0), + NumOfReqs = 100, + Procs = 2, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info), + %% send_burst({n,NumOfReqs}, seq, {chars,79}, info), + receive + {'DOWN', MRef, _, _, Info} -> + case Info of + {shutdown,{overloaded,?MODULE,QLen,Mem}} -> + ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]); + killed -> + ct:pal("Slow shutdown, handler process was killed!", []) + end, + timer:sleep(RestartAfter + 1000), + true = is_pid(whereis(?MODULE)), + ok + after + 5000 -> + Info = logger_disk_log_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info]), + ct:fail("Handler not dead! It should not have survived this!") + end. +qlen_kill_new(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +mem_kill_new(Config) -> + {_Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + Pid0 = whereis(?MODULE), + {_,Mem0} = process_info(Pid0, memory), + RestartAfter = 2000, + NewHConfig = + HConfig#{logger_disk_log_h => + DLHConfig#{enable_kill_overloaded=>true, + handler_overloaded_qlen=>50000, + handler_overloaded_mem=>Mem0+500, + handler_restart_after=>RestartAfter}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + MRef = erlang:monitor(process, Pid0), + NumOfReqs = 100, + Procs = 2, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info), + %% send_burst({n,NumOfReqs}, seq, {chars,79}, info), + receive + {'DOWN', MRef, _, _, Info} -> + case Info of + {shutdown,{overloaded,?MODULE,QLen,Mem}} -> + ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]); + killed -> + ct:pal("Slow shutdown, handler process was killed!", []) + end, + timer:sleep(RestartAfter * 2), + true = is_pid(whereis(?MODULE)), + ok + after + 5000 -> + Info = logger_disk_log_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info]), + ct:fail("Handler not dead! It should not have survived this!") + end. +mem_kill_new(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +restart_after(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig1 = + HConfig#{logger_disk_log_h=>DLHConfig#{enable_kill_overloaded=>true, + handler_overloaded_qlen=>10, + handler_restart_after=>never}}, + ok = logger:set_handler_config(?MODULE, NewHConfig1), + MRef1 = erlang:monitor(process, whereis(?MODULE)), + %% kill handler + send_burst({n,100}, {spawn,2,0}, {chars,79}, info), + receive + {'DOWN', MRef1, _, _, _Info1} -> + timer:sleep(?HANDLER_RESTART_AFTER + 1000), + undefined = whereis(?MODULE), + ok + after + 5000 -> + ct:fail("Handler not dead! It should not have survived this!") + end, + + {Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + RestartAfter = 2000, + NewHConfig2 = + HConfig#{logger_disk_log_h=>DLHConfig#{enable_kill_overloaded=>true, + handler_overloaded_qlen=>10, + handler_restart_after=>RestartAfter}}, + ok = logger:set_handler_config(?MODULE, NewHConfig2), + Pid0 = whereis(?MODULE), + MRef2 = erlang:monitor(process, Pid0), + %% kill handler + send_burst({n,100}, {spawn,2,0}, {chars,79}, info), + receive + {'DOWN', MRef2, _, _, _Info2} -> + timer:sleep(RestartAfter + 1000), + Pid1 = whereis(?MODULE), + true = is_pid(Pid1), + false = (Pid1 == Pid0), + ok + after + 5000 -> + ct:fail("Handler not dead! It should not have survived this!") + end, + ok. +restart_after(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +%% send handler requests (filesync, info, reset, change_config) +%% during high load to verify that sync, dropping and flushing is +%% handled correctly. +handler_requests_under_load() -> + [{timetrap,{minutes,3}}]. +handler_requests_under_load(Config) -> + {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 2, + drop_new_reqs_qlen => 1000, + flush_reqs_qlen => 2000, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{disk_log_sync,[]}, + {info,[]}, + {reset,[]}, + {change_config,[]}]) + end), + Procs = 100, + Sent = Procs * send_burst({n,5000}, {spawn,Procs,10}, {chars,79}, info), + Pid ! {self(),finish}, + ReqResult = receive {Pid,Result} -> Result end, + Logged = count_lines(Log), + ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w", + [Sent,Logged]), + FindError = fun(Res) -> + [E || E <- Res, + is_tuple(E) andalso (element(1,E) == error)] + end, + Errors = [{Req,FindError(Res)} || {Req,Res} <- ReqResult], + NoOfReqs = lists:foldl(fun({_,Res}, N) -> N + length(Res) end, 0, ReqResult), + ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]), + ok = file:delete(Log). +handler_requests_under_load(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) -> + receive + {From,finish} -> + From ! {self(),Reqs} + after + TO -> + Result = + case Req of + change_config -> + logger:set_handler_config(HName, logger_disk_log_h, + #{enable_kill_overloaded => + false}); + Func -> + logger_disk_log_h:Func(HName) + end, + send_requests(HName, TO, Rs ++ [{Req,[Result|Res]}]) + end. + +%%%----------------------------------------------------------------- +%%% +start_handler(Name, FuncName, Config) -> + Dir = ?config(priv_dir,Config), + File = filename:join(Dir, FuncName), + ct:pal("Logging to ~tp", [File]), + ok = logger:add_handler(Name, + logger_disk_log_h, + #{disk_log_opts=>#{file => File, + max_no_files => 1, + max_no_bytes => 100000000}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([Name]), + formatter=>{?MODULE,op}}), + {ok,{_,HConfig = #{logger_disk_log_h := DLHConfig}}} = + logger:get_handler_config(Name), + {lists:concat([File,".1"]),HConfig,DLHConfig}. + +stop_handler(Name) -> + ok = logger:remove_handler(Name), + ct:pal("Handler ~p stopped!", [Name]). + +send_burst(NorT, Type, {chars,Sz}, Class) -> + Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,Sz)], + case NorT of + {n,N} -> + %% process_flag(priority, high), + send_n_burst(N, Type, Text, Class), + %% process_flag(priority, normal), + N; + {t,T} -> + ct:pal("Sending messages sequentially for ~w ms", [T]), + T0 = erlang:monotonic_time(millisecond), + send_t_burst(T0, T, Text, Class, 0) + end. + +send_n_burst(0, _, _Text, _Class) -> + ok; +send_n_burst(N, seq, Text, Class) -> + ok = logger:Class(Text, ?domain), + send_n_burst(N-1, seq, Text, Class); +send_n_burst(N, {spawn,Ps,TO}, Text, Class) -> + ct:pal("~w processes each sending ~w messages", [Ps,N]), + PerProc = fun() -> + send_n_burst(N, seq, Text, Class) + end, + MRefs = [begin if TO == 0 -> ok; true -> timer:sleep(TO) end, + monitor(process,spawn_link(PerProc)) end || + _ <- lists:seq(1,Ps)], + lists:foreach(fun(MRef) -> + receive + {'DOWN', MRef, _, _, _} -> + ok + end + end, MRefs), + ct:pal("Message burst sent", []), + ok. + +send_t_burst(T0, T, Text, Class, N) -> + T1 = erlang:monotonic_time(millisecond), + if (T1-T0) > T -> + N; + true -> + ok = logger:Class(Text, ?domain), + send_t_burst(T0, T, Text, Class, N+1) + end. + +%%%----------------------------------------------------------------- +%%% Formatter callback +%%% Using this to send the formatted string back to the test case +%%% process - so it can check for logged events. +format(_,bad_return) -> + bad_return; +format(_,crash) -> + erlang:error(formatter_crashed); +format(#{msg:={report,R},meta:=#{report_cb:=Fun}}=Log,Config) -> + format(Log#{msg=>Fun(R)},Config); +format(#{msg:={string,String0}},no_nl) -> + String = unicode:characters_to_list(String0), + String; +format(#{msg:={string,String0}},nl) -> + String = unicode:characters_to_list(String0), + String++"\n"; +format(#{msg:={string,String0}},op) -> + String = unicode:characters_to_list(String0), + String++"\n"; +format(#{msg:={report,#{label:={supervisor,progress}}}},op) -> + ""; +format(#{msg:={report,#{label:={gen_server,terminate}}}},op) -> + ""; +format(#{msg:={report,#{label:={proc_lib,crash}}}},op) -> + ""; +format(#{msg:={F,A}},Pid) when is_list(F), is_list(A) -> + String = lists:flatten(io_lib:format(F,A)), + Pid ! {log,String}, + String++"\n"; +format(#{msg:={string,String0}},Pid) -> + String = unicode:characters_to_list(String0), + Pid ! {log,String}, + String++"\n"; +format(Msg,Tag) -> + Error = {unexpected_format,Msg,Tag}, + erlang:display(Error), + exit(Error). + +remove(Handler, LogName) -> + logger_disk_log_h:remove(Handler, LogName), + HState = #{log_names := Logs} = logger_disk_log_h:info(), + false = maps:is_key(LogName, HState), + false = lists:member(LogName, Logs), + false = logger_config:exist(logger, LogName), + {error,no_such_log} = disk_log:info(LogName), + ok. + +start_and_add(Name, Config, LogOpts) -> + ct:pal("Adding handler ~w with: ~p", + [Name,Config#{disk_log_opts=>LogOpts}]), + ok = logger:add_handler(Name, logger_disk_log_h, + Config#{disk_log_opts=>LogOpts}), + Pid = whereis(Name), + true = is_pid(Pid), + Name = proplists:get_value(name, disk_log:info(Name)), + ok. + +remove_and_stop(Handler) -> + ok = logger:remove_handler(Handler), + timer:sleep(500), + undefined = whereis(Handler), + ok. + +try_read_file(FileName, Expected, Time) -> + try_read_file(FileName, Expected, Time, undefined). + +try_read_file(FileName, Expected, Time, _) when Time > 0 -> + case file:read_file(FileName) of + Expected -> + ok; + Error = {error,_Reason} -> + erlang:error(Error); + SomethingElse -> + ct:pal("try_read_file read unexpected: ~p~n", [SomethingElse]), + timer:sleep(500), + try_read_file(FileName, Expected, Time-500, SomethingElse) + end; + +try_read_file(_, _, _, Incorrect) -> + ct:pal("try_read_file got incorrect pattern: ~p~n", [Incorrect]), + erlang:error({error,not_matching_pattern,Incorrect}). + +try_match_file(FileName, Pattern, Time) -> + try_match_file(FileName, Pattern, Time, <<>>). + +try_match_file(FileName, Pattern, Time, _) when Time > 0 -> + case file:read_file(FileName) of + {ok, Bin} -> + case re:run(Bin,Pattern,[{capture,none}]) of + match -> + unicode:characters_to_list(Bin); + _ -> + timer:sleep(100), + try_match_file(FileName, Pattern, Time-100, Bin) + end; + Error -> + erlang:error(Error) + end; +try_match_file(_,Pattern,_,Incorrect) -> + ct:pal("try_match_file did not match pattern: ~p~nGot: ~p~n", + [Pattern,Incorrect]), + erlang:error({error,not_matching_pattern,Pattern,Incorrect}). + +count_lines(File) -> + wait_until_written(File), + count_lines1(File). + +wait_until_written(File) -> + wait_until_written(File, -1). + +wait_until_written(File, Sz) -> + timer:sleep(2000), + case file:read_file_info(File) of + {ok,#file_info{size = Sz}} -> + timer:sleep(1000), + case file:read_file_info(File) of + {ok,#file_info{size = Sz1}} -> + ok; + {ok,#file_info{size = Sz2}} -> + wait_until_written(File, Sz2) + end; + {ok,#file_info{size = Sz1}} -> + wait_until_written(File, Sz1) + end. + +count_lines1(File) -> + Counter = fun Cnt(Dev,LC) -> + case file:read_line(Dev) of + eof -> LC; + _ -> Cnt(Dev,LC+1) + end + end, + {_,Dev} = file:open(File, [read]), + Lines = Counter(Dev, 0), + file:close(Dev), + Lines. + +start_tracer(Trace,Expected) -> + Pid = self(), + dbg:tracer(process,{fun tracer/2,{Pid,Expected}}), + dbg:p(whereis(?MODULE),[c]), + tpl(Trace), + ok. + +tpl([{M,F,A}|Trace]) -> + {ok,Match} = dbg:tpl(M,F,A,[]), + case lists:keyfind(matched,1,Match) of + {_,_,1} -> + ok; + _ -> + dbg:stop_clear(), + throw({skip,"Can't trace "++atom_to_list(M)++":"++ + atom_to_list(F)++"/"++integer_to_list(A)}) + end, + tpl(Trace); +tpl([]) -> + ok. + +tracer({trace,_,call,{logger_disk_log_h,handle_cast,[{Op,_}|_]}}, {Pid,[{Mod,Func,Op}|Expected]}) -> + maybe_tracer_done(Pid,Expected,{Mod,Func,Op}); +tracer({trace,_,call,{Mod=disk_log,Func=blog,[_,Data]}}, {Pid,[{Mod,Func,Data}|Expected]}) -> + maybe_tracer_done(Pid,Expected,{Mod,Func,Data}); +tracer({trace,_,call,{Mod,Func,_}}, {Pid,[{Mod,Func}|Expected]}) -> + maybe_tracer_done(Pid,Expected,{Mod,Func}); +tracer({trace,_,call,Call}, {Pid,Expected}) -> + ct:log("Tracer got unexpected: ~p~nExpected: ~p~n",[Call,Expected]), + Pid ! {tracer_got_unexpected,Call,Expected}, + {Pid,Expected}. + +maybe_tracer_done(Pid,[],Got) -> + ct:log("Tracer got: ~p~n",[Got]), + Pid ! tracer_done; +maybe_tracer_done(Pid,Expected,Got) -> + ct:log("Tracer got: ~p~n",[Got]), + {Pid,Expected}. + +check_tracer(T) -> + receive + tracer_done -> + dbg:stop_clear(), + ok; + {tracer_got_unexpected,Got,Expected} -> + dbg:stop_clear(), + ct:fail({tracer_got_unexpected,Got,Expected}) + after T -> + dbg:stop_clear(), + ct:fail({timeout,tracer}) + end. diff --git a/lib/kernel/test/logger_env_var_SUITE.erl b/lib/kernel/test/logger_env_var_SUITE.erl new file mode 100644 index 0000000000..c2d3364701 --- /dev/null +++ b/lib/kernel/test/logger_env_var_SUITE.erl @@ -0,0 +1,451 @@ +% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_env_var_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). + +-define(all_vars,[{kernel,logger_dest}, + {kernel,logger_level}, + {kernel,logger_log_progress}, + {kernel,logger_sasl_compatible}, + {kernel,error_logger}]). + +suite() -> + [{timetrap,{seconds,30}}]. + +init_per_suite(Config) -> + Env = [{App,Key,application:get_env(App,Key)} || {App,Key} <- ?all_vars], + Removed = cleanup(), + [{env,Env},{logger,Removed}|Config]. + +end_per_suite(Config) -> + [application:set_env(App,Key,Val) || + {App,Key,Val} <- ?config(env,Config), + Val =/= undefined], + Hs = ?config(logger,Config), + [ok = logger:add_handler(Id,Mod,C) || {Id,Mod,C} <- Hs], + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + cleanup(), + ok. + +groups() -> + []. + +all() -> + [default, + default_sasl_compatible, + dest_tty, + dest_tty_sasl_compatible, + dest_false, + dest_false_progress, + dest_false_sasl_compatible, + dest_silent, + dest_silent_sasl_compatible, + dest_file_old, + dest_file, + dest_disk_log, + %% disk_log_vars, % or test this in logger_disk_log_SUITE? + sasl_compatible_false, + sasl_compatible_false_no_progress, + sasl_compatible, + bad_dest%% , + %% bad_level, + %% bad_sasl_compatibility, + %% bad_progress + ]. + +default(Config) -> + {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, + undefined, + undefined, % dest + undefined, % level + undefined, % sasl comp (default=false) + undefined), % progress (default=false) + {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs), + true = is_pid(whereis(logger_std_h)), + info = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} = + lists:keyfind(domain,1,StdFilters), + true = lists:keymember(stop_progress,1,StdFilters), + false = lists:keymember(logger_simple,1,Hs), + false = lists:keymember(sasl_h,1,Hs), + false = is_pid(whereis(sasl_h)), + ok. + +default_sasl_compatible(Config) -> + {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, + undefined, + undefined, % dest + undefined, % level + true, % sasl comp (default=false) + undefined), % progress (default=false) + {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs), + true = is_pid(whereis(logger_std_h)), + info = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,prefix_of,[beam,erlang,otp]}}} = + lists:keyfind(domain,1,StdFilters), + false = lists:keymember(stop_progress,1,StdFilters), + false = lists:keymember(logger_simple,1,Hs), + true = lists:keymember(sasl_h,1,Hs), + true = is_pid(whereis(sasl_h)), + ok. + +dest_tty(Config) -> + {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + tty, % dest + undefined, % level + undefined, % sasl comp (default=false) + undefined), % progress (default=false) + {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs), + true = is_pid(whereis(logger_std_h)), + info = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} = + lists:keyfind(domain,1,StdFilters), + true = lists:keymember(stop_progress,1,StdFilters), + false = lists:keymember(logger_simple,1,Hs), + false = lists:keymember(sasl_h,1,Hs), + false = is_pid(whereis(sasl_h)), + ok. + +dest_tty_sasl_compatible(Config) -> + {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + tty, % dest + undefined, % level + true, % sasl comp (default=false) + undefined), % progress (default=false) + {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs), + true = is_pid(whereis(logger_std_h)), + info = maps:get(level,StdC), + StdFilters = maps:get(filters,StdC), + {domain,{_,{log,prefix_of,[beam,erlang,otp]}}} = + lists:keyfind(domain,1,StdFilters), + false = lists:keymember(stop_progress,1,StdFilters), + false = lists:keymember(logger_simple,1,Hs), + true = lists:keymember(sasl_h,1,Hs), + true = is_pid(whereis(sasl_h)), + ok. + +dest_false(Config) -> + {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + false, % dest + notice, % level + undefined, % sasl comp (default=false) + undefined), % progress (default=false) + false = lists:keymember(logger_std_h,1,Hs), + {logger_simple,logger_simple,SimpleC} = lists:keyfind(logger_simple,1,Hs), + notice = maps:get(level,SimpleC), + SimpleFilters = maps:get(filters,SimpleC), + {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} = + lists:keyfind(domain,1,SimpleFilters), + true = lists:keymember(stop_progress,1,SimpleFilters), + false = lists:keymember(sasl_h,1,Hs), + ok. + +dest_false_progress(Config) -> + {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + false, % dest + notice, % level + undefined, % sasl comp (default=false) + true), % progress (default=false) + false = lists:keymember(logger_std_h,1,Hs), + {logger_simple,logger_simple,SimpleC} = lists:keyfind(logger_simple,1,Hs), + notice = maps:get(level,SimpleC), + SimpleFilters = maps:get(filters,SimpleC), + {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} = + lists:keyfind(domain,1,SimpleFilters), + false = lists:keymember(stop_progress,1,SimpleFilters), + false = lists:keymember(sasl_h,1,Hs), + ok. + +dest_false_sasl_compatible(Config) -> + {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + false, % dest + notice, % level + true, % sasl comp (default=false) + undefined), % progress (default=false) + false = lists:keymember(logger_std_h,1,Hs), + {logger_simple,logger_simple,SimpleC} = lists:keyfind(logger_simple,1,Hs), + notice = maps:get(level,SimpleC), + SimpleFilters = maps:get(filters,SimpleC), + {domain,{_,{log,prefix_of,[beam,erlang,otp]}}} = + lists:keyfind(domain,1,SimpleFilters), + false = lists:keymember(stop_progress,1,SimpleFilters), + true = lists:keymember(sasl_h,1,Hs), + true = is_pid(whereis(sasl_h)), + ok. + +dest_silent(Config) -> + {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + silent, % dest + undefined, % level + undefined, % sasl comp (default=false) + undefined), % progress (default=false) + false = lists:keymember(logger_std_h,1,Hs), + false = lists:keymember(logger_simple,1,Hs), + false = lists:keymember(sasl_h,1,Hs), + ok. + +dest_silent_sasl_compatible(Config) -> + {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + silent, % dest + undefined, % level + true, % sasl comp (default=false) + undefined), % progress (default=false) + false = lists:keymember(logger_std_h,1,Hs), + false = lists:keymember(logger_simple,1,Hs), + true = lists:keymember(sasl_h,1,Hs), + true = is_pid(whereis(sasl_h)), + ok. + + +dest_file_old(Config) -> + {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, + error_logger, + file, % dest + undefined, % level + undefined, % sasl comp (default=false) + undefined), % progress (default=false) + check_log(Log, + file, % dest + 0), % progress in std logger + ok. + + +dest_file(Config) -> + {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + file, % dest + undefined, % level + undefined, % sasl comp (default=false) + undefined), % progress (default=false) + check_log(Log, + file, % dest + 0), % progress in std logger + ok. + + +dest_disk_log(Config) -> + {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + disk_log, % dest + undefined, % level + undefined, % sasl comp (default=false) + undefined), % progress (default=false) + check_log(Log, + disk_log, % dest + 0), % progress in std logger + ok. + + +sasl_compatible_false(Config) -> + {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + file, % dest + undefined, % level + false, % sasl comp + true), % progress + check_log(Log, + file, % dest + 4), % progress in std logger + ok. + +sasl_compatible_false_no_progress(Config) -> + {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + file, % dest + undefined, % level + false, % sasl comp + false), % progress + check_log(Log, + file, % dest + 0), % progress in std logger + ok. + +sasl_compatible(Config) -> + {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME, + logger_dest, + file, % dest + undefined, % level + true, % sasl comp + undefined), % progress + check_log(Log, + file, % dest + 0), % progress in std logger + ok. + +bad_dest(Config) -> + {error,{bad_config,{kernel,{logger_dest,baddest}}}} = + setup(Config,?FUNCTION_NAME, + logger_dest, + baddest, + undefined, + undefined, + undefined). + +bad_level(Config) -> + error = + setup(Config,?FUNCTION_NAME, + logger_dest, + tty, + badlevel, + undefined, + undefined). + +bad_sasl_compatibility(Config) -> + error = + setup(Config,?FUNCTION_NAME, + logger_dest, + tty, + info, + badcomp, + undefined). + +bad_progress(Config) -> + error = + setup(Config,?FUNCTION_NAME, + logger_dest, + tty, + info, + undefined, + badprogress). + +%%%----------------------------------------------------------------- +%%% Internal +setup(Config,Func,DestVar,Dest,Level,SaslComp,Progress) -> + ok = logger:add_handler(logger_simple,logger_simple, + #{filter_default=>log, + logger_simple=>#{buffer=>true}}), + Dir = ?config(priv_dir,Config), + File = lists:concat([?MODULE,"_",Func,".log"]), + Log = filename:join(Dir,File), + case Dest of + undefined -> + ok; + F when F==file; F==disk_log -> + application:set_env(kernel,DestVar,{Dest,Log}); + _ -> + application:set_env(kernel,DestVar,Dest) + end, + case Level of + undefined -> + ok; + _ -> + application:set_env(kernel,logger_level,Level) + end, + case SaslComp of + undefined -> + ok; + _ -> + application:set_env(kernel,logger_sasl_compatible,SaslComp) + end, + case Progress of + undefined -> + ok; + _ -> + application:set_env(kernel,logger_log_progress,Progress) + end, + case logger:setup_standard_handler() of + ok -> + application:start(sasl), + StdH = case Dest of + NoH when NoH==false; NoH==silent -> false; + _ -> true + end, + StdH = is_pid(whereis(?STANDARD_HANDLER)), + SaslH = if SaslComp -> true; + true -> false + end, + SaslH = is_pid(whereis(sasl_h)), + {ok,{Log,maps:get(handlers,logger:i())}}; + Error -> + Error + end. + +check_log(Log,Dest,NumProgress) -> + ok = logger:alert("dummy1"), + ok = logger:debug("dummy1"), + + %% Check that there are progress reports (supervisor and + %% application_controller) and an error report (the call above) in + %% the log. There should not be any info reports yet. + {ok,Bin1} = sync_and_read(Dest,Log), + ct:log("Log content:~n~s",[Bin1]), + match(Bin1,<<"PROGRESS REPORT">>,NumProgress), + match(Bin1,<<"ALERT REPORT">>,1), + match(Bin1,<<"INFO REPORT">>,0), + match(Bin1,<<"DEBUG REPORT">>,0), + + %% Then stop sasl and see that the info report from + %% application_controller is there + ok = application:stop(sasl), + {ok,Bin2} = sync_and_read(Dest,Log), + ct:log("Log content:~n~s",[Bin2]), + match(Bin2,<<"INFO REPORT">>,1), + match(Bin1,<<"DEBUG REPORT">>,0), + ok. + +match(Bin,Pattern,0) -> + nomatch = re:run(Bin,Pattern,[{capture,none}]); +match(Bin,Pattern,N) -> + {match,M} = re:run(Bin,Pattern,[{capture,all},global]), + N = length(M). + +sync_and_read(disk_log,Log) -> + logger_disk_log_h:disk_log_sync(?STANDARD_HANDLER), + file:read_file(Log ++ ".1"); +sync_and_read(file,Log) -> + logger_std_h:filesync(?STANDARD_HANDLER), + file:read_file(Log). + +cleanup() -> + application:stop(sasl), + [application:unset_env(App,Key) || {App,Key} <- ?all_vars], + #{handlers:=Hs0} = logger:i(), + Hs = lists:keydelete(cth_log_redirect,1,Hs0), + [ok = logger:remove_handler(Id) || {Id,_,_} <- Hs], + Hs. diff --git a/lib/kernel/test/logger_filters_SUITE.erl b/lib/kernel/test/logger_filters_SUITE.erl new file mode 100644 index 0000000000..21f14bbc02 --- /dev/null +++ b/lib/kernel/test/logger_filters_SUITE.erl @@ -0,0 +1,214 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_filters_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). + +-define(ndlog, + #{level=>info,msg=>{"Line: ~p",[?LINE]},meta=>#{}}). +-define(dlog(Domain), + #{level=>info,msg=>{"Line: ~p",[?LINE]},meta=>#{domain=>Domain}}). +-define(llog(Level), + #{level=>Level,msg=>{"Line: ~p",[?LINE]},meta=>#{}}). +-define(plog, + #{level=>info, + msg=>{report,#{label=>{?MODULE,progress}}}, + meta=>#{line=>?LINE}}). +-define(rlog(Node), + #{level=>info, + msg=>{"Line: ~p",[?LINE]}, + meta=>#{gl=>rpc:call(Node,erlang,whereis,[user])}}). + +-define(TRY(X), my_try(fun() -> X end)). + +suite() -> + [{timetrap,{seconds,30}}]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [domain, + level, + progress, + remote_gl]. + +domain(_Config) -> + L1 = logger_filters:domain(L1=?dlog([]),{log,prefix_of,[]}), + stop = logger_filters:domain(?dlog([]),{stop,prefix_of,[]}), + L2 = logger_filters:domain(L2=?dlog([]),{log,starts_with,[]}), + stop = logger_filters:domain(?dlog([]),{stop,starts_with,[]}), + L3 = logger_filters:domain(L3=?dlog([]),{log,equals,[]}), + stop = logger_filters:domain(?dlog([]),{stop,equals,[]}), + ignore = logger_filters:domain(?dlog([]),{log,no_domain,[]}), + ignore = logger_filters:domain(?dlog([]),{stop,no_domain,[]}), + + L4 = logger_filters:domain(L4=?dlog([a]),{log,prefix_of,[a,b]}), + stop = logger_filters:domain(?dlog([a]),{stop,prefix_of,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{log,starts_with,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{stop,starts_with,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{log,equals,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{stop,equals,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{log,no_domain,[a,b]}), + ignore = logger_filters:domain(?dlog([a]),{stop,no_domain,[a,b]}), + + ignore = logger_filters:domain(?dlog([a,b]),{log,prefix_of,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{stop,prefix_of,[a]}), + L5 = logger_filters:domain(L5=?dlog([a,b]),{log,starts_with,[a]}), + stop = logger_filters:domain(?dlog([a,b]),{stop,starts_with,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{log,equals,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{stop,equals,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{log,no_domain,[a]}), + ignore = logger_filters:domain(?dlog([a,b]),{stop,no_domain,[a]}), + + ignore = logger_filters:domain(?ndlog,{log,prefix_of,[a]}), + ignore = logger_filters:domain(?ndlog,{stop,prefix_of,[a]}), + ignore = logger_filters:domain(?ndlog,{log,starts_with,[a]}), + ignore = logger_filters:domain(?ndlog,{stop,starts_with,[a]}), + ignore = logger_filters:domain(?ndlog,{log,equals,[a]}), + ignore = logger_filters:domain(?ndlog,{stop,equals,[a]}), + L6 = logger_filters:domain(L6=?ndlog,{log,no_domain,[a]}), + stop = logger_filters:domain(?ndlog,{stop,no_domain,[a]}), + + L7 = logger_filters:domain(L7=?dlog([a,b,c,d]),{log,prefix_of,[a,b,c,d]}), + stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,prefix_of,[a,b,c,d]}), + L8 = logger_filters:domain(L8=?dlog([a,b,c,d]),{log,starts_with,[a,b,c,d]}), + stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,starts_with,[a,b,c,d]}), + L9 = logger_filters:domain(L9=?dlog([a,b,c,d]),{log,equals,[a,b,c,d]}), + stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,equals,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog([a,b,c,d]),{log,no_domain,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog([a,b,c,d]),{stop,no_domain,[a,b,c,d]}), + + %% A domain field in meta which is not a list is allowed by the + %% filter, but it will never match. + ignore = logger_filters:domain(?dlog(dummy),{log,prefix_of,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,prefix_of,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{log,starts_with,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,starts_with,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{log,equals,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,equals,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{log,no_domain,[a,b,c,d]}), + ignore = logger_filters:domain(?dlog(dummy),{stop,no_domain,[a,b,c,d]}), + + {error,badarg} = ?TRY(logger_filters:domain(?ndlog,bad)), + {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{bad,prefix_of,[]})), + {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,bad,[]})), + {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,prefix_of,bad})), + + ok. + +level(_Config) -> + ignore = logger_filters:level(?llog(info),{log,lt,info}), + ignore = logger_filters:level(?llog(info),{stop,lt,info}), + ignore = logger_filters:level(?llog(info),{log,gt,info}), + ignore = logger_filters:level(?llog(info),{stop,gt,info}), + L1 = logger_filters:level(L1=?llog(info),{log,lteq,info}), + stop = logger_filters:level(?llog(info),{stop,lteq,info}), + L2 = logger_filters:level(L2=?llog(info),{log,gteq,info}), + stop = logger_filters:level(?llog(info),{stop,gteq,info}), + L3 = logger_filters:level(L3=?llog(info),{log,eq,info}), + stop = logger_filters:level(?llog(info),{stop,eq,info}), + ignore = logger_filters:level(?llog(info),{log,neq,info}), + ignore = logger_filters:level(?llog(info),{stop,neq,info}), + + ignore = logger_filters:level(?llog(error),{log,lt,info}), + ignore = logger_filters:level(?llog(error),{stop,lt,info}), + L4 = logger_filters:level(L4=?llog(error),{log,gt,info}), + stop = logger_filters:level(?llog(error),{stop,gt,info}), + ignore = logger_filters:level(?llog(error),{log,lteq,info}), + ignore = logger_filters:level(?llog(error),{stop,lteq,info}), + L5 = logger_filters:level(L5=?llog(error),{log,gteq,info}), + stop = logger_filters:level(?llog(error),{stop,gteq,info}), + ignore = logger_filters:level(?llog(error),{log,eq,info}), + ignore = logger_filters:level(?llog(error),{stop,eq,info}), + L6 = logger_filters:level(L6=?llog(error),{log,neq,info}), + stop = logger_filters:level(?llog(error),{stop,neq,info}), + + L7 = logger_filters:level(L7=?llog(info),{log,lt,error}), + stop = logger_filters:level(?llog(info),{stop,lt,error}), + ignore = logger_filters:level(?llog(info),{log,gt,error}), + ignore = logger_filters:level(?llog(info),{stop,gt,error}), + L8 = logger_filters:level(L8=?llog(info),{log,lteq,error}), + stop = logger_filters:level(?llog(info),{stop,lteq,error}), + ignore = logger_filters:level(?llog(info),{log,gteq,error}), + ignore = logger_filters:level(?llog(info),{stop,gteq,error}), + ignore = logger_filters:level(?llog(info),{log,eq,error}), + ignore = logger_filters:level(?llog(info),{stop,eq,error}), + L9 = logger_filters:level(L9=?llog(info),{log,neq,error}), + stop = logger_filters:level(?llog(info),{stop,neq,error}), + + {error,badarg} = ?TRY(logger_filters:level(?llog(info),bad)), + {error,badarg} = ?TRY(logger_filters:level(?llog(info),{bad,eq,info})), + {error,badarg} = ?TRY(logger_filters:level(?llog(info),{log,bad,info})), + {error,badarg} = ?TRY(logger_filters:level(?llog(info),{log,eq,bad})), + + ok. + +progress(_Config) -> + L1 = logger_filters:progress(L1=?plog,log), + stop = logger_filters:progress(?plog,stop), + ignore = logger_filters:progress(?ndlog,log), + ignore = logger_filters:progress(?ndlog,stop), + + {error,badarg} = ?TRY(logger_filters:progress(?plog,bad)), + + ok. + +remote_gl(_Config) -> + {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]), + L1 = logger_filters:remote_gl(L1=?rlog(Node),log), + stop = logger_filters:remote_gl(?rlog(Node),stop), + ignore = logger_filters:remote_gl(?ndlog,log), + ignore = logger_filters:remote_gl(?ndlog,stop), + + {error,badarg} = ?TRY(logger_filters:remote_gl(?rlog(Node),bad)), + ok. + +remote_gl(cleanup,_Config) -> + [test_server:stop_node(N) || N<-nodes()]. + +%%%----------------------------------------------------------------- +%%% Called by macro ?TRY(X) +my_try(Fun) -> + try Fun() catch C:R -> {C,R} end. diff --git a/lib/kernel/test/logger_formatter_SUITE.erl b/lib/kernel/test/logger_formatter_SUITE.erl new file mode 100644 index 0000000000..7d1f33746d --- /dev/null +++ b/lib/kernel/test/logger_formatter_SUITE.erl @@ -0,0 +1,560 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_formatter_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). + +-define(TRY(X), my_try(fun() -> X end)). + +suite() -> + [{timetrap,{seconds,30}}]. + +init_per_suite(Config) -> + Config. + +end_per_suite(_Config) -> + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [default, + legacy_header, + single_line, + template, + format_msg, + report_cb, + max_size, + depth, + chars_limit, + format_mfa, + format_time, + level_or_msg_in_meta, + faulty_log, + faulty_config, + faulty_msg]. + +default(_Config) -> + String1 = format(info,{"~p",[term]},#{},#{}), + ct:log(String1), + [_Date,_Time,"info:","term\n"] = string:lexemes(String1," "), + + Time = timestamp(), + ExpectedTimestamp = default_time_format(Time), + String2 = format(info,{"~p",[term]},#{time=>Time},#{}), + ct:log(String2), + " info: term\n" = string:prefix(String2,ExpectedTimestamp), + ok. + +legacy_header(_Config) -> + Time = timestamp(), + String1 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>true, + single_line=>false}), + ct:log(String1), + "=INFO REPORT==== "++Rest = String1, + [Timestamp,"\nterm\n"] = string:lexemes(Rest," ="), + [D,M,Y,H,Min,S,Micro] = string:lexemes(Timestamp,"-:."), + integer(D,31), + integer(Y,2018,infinity), + integer(H,23), + integer(Min,59), + integer(S,59), + integer(Micro,999999), + true = lists:member(M,["Jan","Feb","Mar","Apr","May","Jun", + "Jul","Aug","Sep","Oct","Nov","Dec"]), + + String2 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>false, + single_line=>false}), + ct:log(String2), + ExpectedTimestamp = default_time_format(Time), + " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp), + + String3 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>bad, + single_line=>false}), + ct:log(String3), + String3 = String2, + + String4 = format(info,{"~p",[term]},#{time=>Time}, + #{legacy_header=>true, + single_line=>true}), % <---ignored + ct:log(String4), + String4 = String1, + + String5 = format(info,{"~p",[term]},#{}, % <--- no time + #{legacy_header=>true, + single_line=>false}), + ct:log(String5), + "=INFO REPORT==== "++_ = String5, + ok. + +single_line(_Config) -> + Time = timestamp(), + ExpectedTimestamp = default_time_format(Time), + String1 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>true}), + ct:log(String1), + " info: term\n" = string:prefix(String1,ExpectedTimestamp), + + String2 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>false}), + ct:log(String2), + " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp), + + String2 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>bad}), + ok. + +template(_Config) -> + Time = timestamp(), + + Template1 = [msg], + String1 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template1}), + ct:log(String1), + "term" = String1, + + Template2 = [msg,unknown], + String2 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template2}), + ct:log(String2), + "term" = String2, + + Template3 = ["string"], + String3 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template3}), + ct:log(String3), + "string" = String3, + + Template4 = ["string\nnewline"], + String4 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template4, + single_line=>true}), + ct:log(String4), + "string\nnewline" = String4, + + Template5 = [], + String5 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template5}), + ct:log(String5), + "" = String5, + + Ref6 = erlang:make_ref(), + Meta6 = #{atom=>some_atom, + integer=>632, + list=>[list,"string",4321,#{},{tuple}], + mfa=>{mod,func,0}, + pid=>self(), + ref=>Ref6, + string=>"some string", + time=>Time, + tuple=>{1,atom,"list"}, + nested=>#{subkey=>subvalue}}, + Template6 = lists:join(";",maps:keys(maps:remove(nested,Meta6)) ++ + [{nested,subkey}]), + String6 = format(info,{"~p",[term]},Meta6,#{template=>Template6, + single_line=>true}), + ct:log(String6), + SelfStr = pid_to_list(self()), + RefStr6 = ref_to_list(Ref6), + ListStr = "[list,\"string\",4321,#{},{tuple}]", + ExpectedTime6 = default_time_format(Time), + ["some_atom", + "632", + ListStr, + "mod:func/0", + SelfStr, + RefStr6, + "some string", + ExpectedTime6, + "{1,atom,\"list\"}", + "subvalue"] = string:lexemes(String6,";"), + + Meta7 = #{time=>Time, + nested=>#{key1=>#{subkey1=>value1}, + key2=>value2}}, + Template7 = lists:join(";",[nested, + {nested,key1}, + {nested,key1,subkey1}, + {nested,key2}, + {nested,key2,subkey2}, + {nested,key3}, + {nested,key3,subkey3}]), + String7 = format(info,{"~p",[term]},Meta7,#{template=>Template7, + single_line=>true}), + ct:log(String7), + [MultipleKeysStr, + "#{subkey1 => value1}", + "value1", + "value2", + "", + "", + ""] = string:split(String7,";",all), + %% Order of keys is not fixed + case MultipleKeysStr of + "#{key2 => value2,key1 => #{subkey1 => value1}}" -> ok; + "#{key1 => #{subkey1 => value1},key2 => value2}" -> ok; + _ -> ct:fail({full_nested_map_unexpected,MultipleKeysStr}) + end, + ok. + +format_msg(_Config) -> + Template = [msg], + + String1 = format(info,{"~p",[term]},#{},#{template=>Template}), + ct:log(String1), + "term" = String1, + + String2 = format(info,{"list",[term]},#{},#{template=>Template}), + ct:log(String2), + "FORMAT ERROR: \"list\" - [term]" = String2, + + String3 = format(info,{report,term},#{},#{template=>Template}), + ct:log(String3), + "term" = String3, + + String4 = format(info,{report,term}, + #{report_cb=>fun(_)-> {"formatted",[]} end}, + #{template=>Template}), + ct:log(String4), + "formatted" = String4, + + String5 = format(info,{report,term}, + #{report_cb=>fun(_)-> faulty_return end}, + #{template=>Template}), + ct:log(String5), + "REPORT_CB ERROR: term; Returned: faulty_return" = String5, + + String6 = format(info,{report,term}, + #{report_cb=>fun(_)-> erlang:error(fun_crashed) end}, + #{template=>Template}), + ct:log(String6), + "REPORT_CB CRASH: term; Reason: {error,fun_crashed}" = String6, + + %% strings are not formatted + String7 = format(info,{string,"string"}, + #{report_cb=>fun(_)-> {"formatted",[]} end}, + #{template=>Template}), + ct:log(String7), + "string" = String7, + + String8 = format(info,{string,['not',printable,list]}, + #{report_cb=>fun(_)-> {"formatted",[]} end}, + #{template=>Template}), + ct:log(String8), + "INVALID STRING: ['not',printable,list]" = String8, + + String9 = format(info,{string,"string"},#{},#{template=>Template}), + ct:log(String9), + "string" = String9, + + ok. + +report_cb(_Config) -> + Template = [msg], + MetaFun = fun(_) -> {"meta_rcb",[]} end, + ConfigFun = fun(_) -> {"config_rcb",[]} end, + "term" = format(info,{report,term},#{},#{template=>Template}), + "meta_rcb" = + format(info,{report,term},#{report_cb=>MetaFun},#{template=>Template}), + "config_rcb" = + format(info,{report,term},#{},#{template=>Template, + report_cb=>ConfigFun}), + "config_rcb" = + format(info,{report,term},#{report_cb=>MetaFun},#{template=>Template, + report_cb=>ConfigFun}), + ok. + +max_size(_Config) -> + Cfg = #{template=>[msg], + single_line=>false}, + "12345678901234567890" = + format(info,{"12345678901234567890",[]},#{},Cfg), + application:set_env(kernel,logger_max_size,11), + "12345678901234567890" = % min value is 50, so this is not limited + format(info,{"12345678901234567890",[]},#{},Cfg), + "12345678901234567890123456789012345678901234567..." = % 50 + format(info, + {"123456789012345678901234567890123456789012345678901234567890", + []}, + #{}, + Cfg), + application:set_env(kernel,logger_max_size,53), + "12345678901234567890123456789012345678901234567890..." = %53 + format(info, + {"123456789012345678901234567890123456789012345678901234567890", + []}, + #{}, + Cfg), + "123456789012..." = + format(info,{"12345678901234567890",[]},#{},Cfg#{max_size=>15}), + "12345678901234567890" = + format(info,{"12345678901234567890",[]},#{},Cfg#{max_size=>unlimited}), + %% Check that one newline at the end of the line is kept (if it exists) + "12345678901...\n" = + format(info,{"12345678901234567890\n",[]},#{},Cfg#{max_size=>15}), + "12345678901...\n" = + format(info,{"12345678901234567890",[]},#{},Cfg#{template=>[msg,"\n"], + max_size=>15}), + ok. +max_size(cleanup,_Config) -> + application:unset_env(kernel,logger_max_size), + ok. + +depth(_Config) -> + Template = [msg], + "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]" = + format(info, + {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, + #{}, + #{template=>Template}), + application:set_env(kernel,error_logger_format_depth,11), + "[1,2,3,4,5,6,7,8,9,0|...]" = + format(info, + {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, + #{}, + #{template=>Template}), + application:set_env(kernel,logger_format_depth,12), + "[1,2,3,4,5,6,7,8,9,0,1|...]" = + format(info, + {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, + #{}, + #{template=>Template}), + "[1,2,3,4,5,6,7,8,9,0,1,2|...]" = + format(info, + {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, + #{}, + #{template=>Template, + depth=>13}), + "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]" = + format(info, + {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]}, + #{}, + #{template=>Template, + depth=>unlimited}), + ok. +depth(cleanup,_Config) -> + application:unset_env(kernel,logger_format_depth), + ok. + +chars_limit(_Config) -> + FA = {"LoL: ~p~nL: ~p~nMap: ~p~n", + [lists:duplicate(10,lists:seq(1,100)), + lists:seq(1,100), + maps:from_list(lists:zip(lists:seq(1,100), + lists:duplicate(100,value)))]}, + Meta = #{time=>"2018-04-26 9:15:40.449879"}, + Template = [time," - ", msg, "\n"], + FC = #{template=>Template, + depth=>unlimited, + max_size=>unlimited, + chars_limit=>unlimited, + single_line=>true}, + CL1 = 80, + String1 = format(info,FA,Meta,FC#{chars_limit=>CL1}), + L1 = string:length(String1), + ct:log("String1: ~p~nLength1: ~p~n",[lists:flatten(String1),L1]), + true = L1 > CL1, + true = L1 < CL1 + 10, + + String2 = format(info,FA,Meta,FC#{chars_limit=>CL1,depth=>10}), + L2 = string:length(String2), + ct:log("String2: ~p~nLength2: ~p~n",[lists:flatten(String2),L2]), + String2 = String1, + + CL3 = 200, + String3 = format(info,FA,Meta,FC#{chars_limit=>CL3}), + L3 = string:length(String3), + ct:log("String3: ~p~nLength3: ~p~n",[lists:flatten(String3),L3]), + true = L3 > CL3, + true = L3 < CL3 + 10, + + String4 = format(info,FA,Meta,FC#{chars_limit=>CL3,depth=>10}), + L4 = string:length(String4), + ct:log("String4: ~p~nLength4: ~p~n",[lists:flatten(String4),L4]), + true = L4 > CL3, + true = L4 < CL3 + 10, + + %% Test that max_size truncates the string which is limited by + %% depth and chars_limit + MS5 = 150, + String5 = format(info,FA,Meta,FC#{chars_limit=>CL3,depth=>10,max_size=>MS5}), + L5 = string:length(String5), + ct:log("String5: ~p~nLength5: ~p~n",[String5,L5]), + L5 = MS5, + true = lists:prefix(lists:sublist(String5,L5-4),String4), + + ok. + +format_mfa(_Config) -> + Template = [mfa], + + Meta1 = #{mfa=>{mod,func,0}}, + String1 = format(info,{"~p",[term]},Meta1,#{template=>Template}), + ct:log(String1), + "mod:func/0" = String1, + + Meta2 = #{mfa=>{mod,func,[]}}, + String2 = format(info,{"~p",[term]},Meta2,#{template=>Template}), + ct:log(String2), + "mod:func/0" = String2, + + Meta3 = #{mfa=>"mod:func/0"}, + String3 = format(info,{"~p",[term]},Meta3,#{template=>Template}), + ct:log(String3), + "mod:func/0" = String3, + + Meta4 = #{mfa=>othermfa}, + String4 = format(info,{"~p",[term]},Meta4,#{template=>Template}), + ct:log(String4), + "othermfa" = String4, + + ok. + +format_time(_Config) -> + Time1 = timestamp(), + ExpectedTimestamp1 = default_time_format(Time1), + String1 = format(info,{"~p",[term]},#{time=>Time1},#{}), + ct:log(String1), + " info: term\n" = string:prefix(String1,ExpectedTimestamp1), + + Time2 = timestamp(), + ExpectedTimestamp2 = default_time_format(Time2,true), + String2 = format(info,{"~p",[term]},#{time=>Time2},#{utc=>true}), + ct:log(String2), + " info: term\n" = string:prefix(String2,ExpectedTimestamp2), + + application:set_env(kernel,logger_utc,true), + Time3 = timestamp(), + ExpectedTimestamp3 = default_time_format(Time3,true), + String3 = format(info,{"~p",[term]},#{time=>Time3},#{}), + ct:log(String3), + " info: term\n" = string:prefix(String3,ExpectedTimestamp3), + + ok. + +format_time(cleanup,_Config) -> + application:unset_env(kernel,logger_utc), + ok. + +level_or_msg_in_meta(_Config) -> + %% The template contains atoms to pick out values from meta, + %% or level/msg to add these from the log event. What if you have + %% a key named 'level' or 'msg' in meta and want to display + %% its value? + %% For now we simply ignore Meta on this and display the + %% actual level and msg from the log event. + + Meta = #{level=>mylevel, + msg=>"metamsg"}, + Template = [level,";",msg], + String = format(info,{"~p",[term]},Meta,#{template=>Template}), + ct:log(String), + "info;term" = String, % so mylevel and "metamsg" are ignored + + ok. + +faulty_log(_Config) -> + %% Unexpected log (should be type logger:log()) - print error + {error, + function_clause, + {logger_formatter,format,[_,_],_}} = + ?TRY(logger_formatter:format(unexp_log,#{})), + ok. + +faulty_config(_Config) -> + {error, + function_clause, + {logger_formatter,format,[_,_],_}} = + ?TRY(logger_formatter:format(#{level=>info, + msg=>{"~p",[term]}, + meta=>#{time=>timestamp()}}, + unexp_config)), + ok. + +faulty_msg(_Config) -> + {error, + function_clause, + {logger_formatter,_,_,_}} = + ?TRY(logger_formatter:format(#{level=>info, + msg=>term, + meta=>#{time=>timestamp()}}, + #{})), + ok. + +%%%----------------------------------------------------------------- +%%% Internal +format(Level,Msg,Meta,Config) -> + format(#{level=>Level,msg=>Msg,meta=>add_time(Meta)},Config). + +format(Log,Config) -> + lists:flatten(logger_formatter:format(Log,Config)). + +default_time_format(Timestamp) -> + default_time_format(Timestamp,false). + +default_time_format(Timestamp0,Utc) when is_integer(Timestamp0) -> + Timestamp=Timestamp0+erlang:time_offset(microsecond), + %% calendar:system_time_to_rfc3339(Time,[{unit,microsecond}]). + Micro = Timestamp rem 1000000, + Sec = Timestamp div 1000000, + UniversalTime = erlang:posixtime_to_universaltime(Sec), + {Date,Time} = + if Utc -> UniversalTime; + true -> erlang:universaltime_to_localtime(UniversalTime) + end, + default_time_format(Date,Time,Micro). + +default_time_format({Y,M,D},{H,Min,S},Micro) -> + lists:flatten( + io_lib:format("~4w-~2..0w-~2..0w ~2w:~2..0w:~2..0w.~6..0w", + [Y,M,D,H,Min,S,Micro])). + +integer(Str) -> + is_integer(list_to_integer(Str)). +integer(Str,Max) -> + integer(Str,0,Max). +integer(Str,Min,Max) -> + Int = list_to_integer(Str), + Int >= Min andalso Int =<Max. + +%%%----------------------------------------------------------------- +%%% Called by macro ?TRY(X) +my_try(Fun) -> + try Fun() catch C:R:S -> {C,R,hd(S)} end. + +timestamp() -> + erlang:monotonic_time(microsecond). + +%% necessary? +add_time(#{time:=_}=Meta) -> + Meta; +add_time(Meta) -> + Meta#{time=>timestamp()}. diff --git a/lib/kernel/test/logger_legacy_SUITE.erl b/lib/kernel/test/logger_legacy_SUITE.erl new file mode 100644 index 0000000000..b59f5f7758 --- /dev/null +++ b/lib/kernel/test/logger_legacy_SUITE.erl @@ -0,0 +1,282 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_legacy_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). + +%%%----------------------------------------------------------------- +%%% This test suite test that log events from within OTP can be +%%% delivered to legacy error_logger event handlers on the same format +%%% as before 'logger' was introduced. +%%% +%%% Before changing the expected format of any of the log events in +%%% this suite, please make sure that the backwards incompatibility it +%%% introduces is ok. +%%% ----------------------------------------------------------------- + +-define(check(Expected), + receive Expected -> + [] = test_server:messages_get() + after 1000 -> + ct:fail({report_not_received, + {line,?LINE}, + {got,test_server:messages_get()}}) + end). +-define(check_no_flush(Expected), + receive Expected -> + ok + after 1000 -> + ct:fail({report_not_received, + {line,?LINE}, + {got,test_server:messages_get()}}) + end). + +suite() -> + [{timetrap,{seconds,30}}]. + +init_per_suite(Config) -> + logger:add_handler(error_logger,error_logger, + #{level=>info,filter_default=>stop}), + Config. + +end_per_suite(_Config) -> + logger:remove_handler(error_logger), + ok. + +init_per_group(std, Config) -> + ok = logger:set_handler_config( + error_logger,filters, + [{domain,{fun logger_filters:domain/2, + {log,prefix_of,[beam,erlang,otp]}}}]), + Config; +init_per_group(sasl, Config) -> + ok = logger:set_handler_config( + error_logger,filters, + [{domain,{fun logger_filters:domain/2, + {log,prefix_of,[beam,erlang,otp,sasl]}}}]), + + %% cth_log_redirect checks if sasl is started before displaying + %% any sasl reports - so just to see the real sasl reports in tc + %% log: + application:start(sasl), + Config; +init_per_group(_Group, Config) -> + Config. + +end_per_group(sasl, _Config) -> + application:stop(sasl), + ok; +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + error_logger:add_report_handler(?MODULE,{event_handler,self()}), + Config. + +end_per_testcase(Case, Config) -> + %% Using gen_event directly here, instead of + %% error_logger:delete_report_handler. This is to avoid + %% automatically stopping the error_logger process due to removing + %% the last handler. + gen_event:delete_handler(error_logger,?MODULE,[]), + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + [{std,[],[gen_server, + gen_event, + gen_fsm, + gen_statem]}, + {sasl,[],[sasl_reports, + supervisor_handle_info]}]. + +all() -> + [{group,std}, + {group,sasl}]. + +gen_server(_Config) -> + {ok,Pid} = gen_server:start(?MODULE,gen_server,[]), + Msg = fun() -> a=b end, + Pid ! Msg, + ?check({warning_msg,"** Undefined handle_info in ~p"++_,[?MODULE,Msg]}), + ok = gen_server:cast(Pid,Msg), + ?check({error,"** Generic server ~tp terminating"++_, + [Pid,{'$gen_cast',Msg},gen_server,{{badmatch,b},_}]}). + +gen_event(_Config) -> + {ok,Pid} = gen_event:start(), + ok = gen_event:add_handler(Pid,?MODULE,gen_event), + Msg = fun() -> a=b end, + Pid ! Msg, + ?check({warning_msg,"** Undefined handle_info in ~tp"++_,[?MODULE,Msg]}), + gen_event:notify(Pid,Msg), + ?check({error,"** gen_event handler ~p crashed."++_, + [?MODULE,Pid,Msg,gen_event,{{badmatch,b},_}]}). + +gen_fsm(_Config) -> + {ok,Pid} = gen_fsm:start(?MODULE,gen_fsm,[]), + Msg = fun() -> a=b end, + Pid ! Msg, + ?check({warning_msg,"** Undefined handle_info in ~p"++_,[?MODULE,Msg]}), + gen_fsm:send_all_state_event(Pid,Msg), + ?check({error,"** State machine ~tp terminating"++_, + [Pid,Msg,mystate,gen_fsm,{{badmatch,b},_}]}). + +gen_statem(_Config) -> + {ok,Pid} = gen_statem:start(?MODULE,gen_statem,[]), + Msg = fun() -> a=b end, + Pid ! Msg, + ?check({error,"** State machine ~tp terminating"++_, + [Pid,{info,Msg},{mystate,gen_statem},error,{badmatch,b}|_]}). + +sasl_reports(Config) -> + App = {application,?MODULE,[{description, ""}, + {vsn, "1.0"}, + {modules, [?MODULE]}, + {registered, []}, + {applications, []}, + {mod, {?MODULE, []}}]}, + AppStr = io_lib:format("~p.",[App]), + Dir = ?config(priv_dir,Config), + AppFile = filename:join(Dir,?MODULE_STRING++".app"), + ok = file:write_file(AppFile,AppStr), + true = code:add_patha(Dir), + ok = application:start(?MODULE), + SupName = sup_name(), + Pid = whereis(SupName), + [{ch,ChPid,_,_}] = supervisor:which_children(Pid), + Node = node(), + ?check_no_flush({info_report,progress,[{application,?MODULE}, + {started_at,Node}]}), + ?check({info_report,progress,[{supervisor,{local,SupName}}, + {started,[{pid,ChPid}|_]}]}), + ok = gen_server:cast(ChPid, fun() -> + spawn_link(fun() -> receive x->ok end end) + end), + Msg = fun() -> a=b end, + ok = gen_server:cast(ChPid,Msg), + ?check_no_flush({error,"** Generic server ~tp terminating"++_, + [ChPid,{'$gen_cast',Msg},gen_server,{{badmatch,b},_}]}), + ?check_no_flush({error_report,crash_report, + [[{initial_call,_}, + {pid,ChPid}, + {registered_name,[]}, + {error_info,{error,{badmatch,b},_}}, + {ancestors,_}, + {message_queue_len,_}, + {messages,_}, + {links,[Pid,Neighbour]}, + {dictionary,_}, + {trap_exit,_}, + {status,_}, + {heap_size,_}, + {stack_size,_}, + {reductions,_}], + [{neighbour,[{pid,Neighbour}, + {registered_name,_}, + {initial_call,_}, + {current_function,_}, + {ancestors,_}, + {message_queue_len,_}, + {links,[ChPid]}, + {trap_exit,_}, + {status,_}, + {heap_size,_}, + {stack_size,_}, + {reductions,_}, + {current_stacktrace,_}]}]]}), + ?check_no_flush({error_report,supervisor_report, + [{supervisor,{local,SupName}}, + {errorContext,child_terminated}, + {reason,{{badmatch,b},_}}, + {offender,[{pid,ChPid}|_]}]}), + ?check({info_report,progress,[{supervisor,{local,SupName}}, + {started,_}]}), + + ok = application:stop(?MODULE), + ?check({info_report,std_info,[{application,?MODULE}, + {exited,stopped}, + {type,temporary}]}). + +sasl_reports(cleanup,_Config) -> + application:stop(?MODULE). + +supervisor_handle_info(_Config) -> + {ok,Pid} = supervisor:start_link({local,sup_name()},?MODULE,supervisor), + ?check({info_report,progress,[{supervisor,_},{started,_}]}), + Pid ! msg, + ?check({error,"Supervisor received unexpected message: ~tp~n",[msg]}). + +supervisor_handle_info(cleanup,_Config) -> + Pid = whereis(sup_name()), + unlink(Pid), + exit(Pid,shutdown). + +%%%----------------------------------------------------------------- +%%% Callbacks for error_logger event handler, gen_server, gen_statem, +%%% gen_fsm, gen_event, supervisor and application. +start(_,_) -> + supervisor:start_link({local,sup_name()},?MODULE,supervisor). + +init(supervisor) -> + {ok,{#{},[#{id=>ch,start=>{gen_server,start_link,[?MODULE,gen_server,[]]}}]}}; +init(StateMachine) when StateMachine==gen_statem; StateMachine==gen_fsm -> + {ok,mystate,StateMachine}; +init(State) -> + {ok,State}. + +%% error_logger event handler +handle_event({Tag,_Gl,{_Pid,Type,Report}},{_,Pid}=State) -> + Pid ! {Tag,Type,Report}, + {ok,State}; +%% other gen_event +handle_event(Fun,State) when is_function(Fun) -> + Fun(), + {next_state,State}. + +%% gen_fsm +handle_event(Fun,State,Data) when is_function(Fun) -> + Fun(), + {next_state,State,Data}. + +%% gen_statem +handle_event(info,Fun,State,Data) when is_function(Fun) -> + Fun(), + {next_state,State,Data}. + +%% gen_server +handle_cast(Fun,State) when is_function(Fun) -> + Fun(), + {noreply,State}. + +%% gen_statem +callback_mode() -> + handle_event_function. + +%%%----------------------------------------------------------------- +%%% Internal +sup_name() -> + list_to_atom(?MODULE_STRING++"_sup"). diff --git a/lib/kernel/test/logger_simple_SUITE.erl b/lib/kernel/test/logger_simple_SUITE.erl new file mode 100644 index 0000000000..5d8d32492d --- /dev/null +++ b/lib/kernel/test/logger_simple_SUITE.erl @@ -0,0 +1,247 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_simple_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). + +-define(check_no_log,[] = test_server:messages_get()). +-define(check(Expected), + receive {log,Expected} -> + [] = test_server:messages_get() + after 1000 -> + ct:fail({report_not_received, + {line,?LINE}, + {expected,Expected}, + {got,test_server:messages_get()}}) + end). + +-define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++ + ":"++integer_to_list(?LINE)). +-define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}). +-define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]). + +suite() -> + [{timetrap,{seconds,30}}]. + +init_per_suite(Config) -> + #{handlers:=Hs0} = logger:i(), + Hs = lists:keydelete(cth_log_redirect,1,Hs0), + [ok = logger:remove_handler(Id) || {Id,_,_} <- Hs], + Env = [{App,Key,application:get_env(App,Key)} || + {App,Key} <- [{kernel,logger_dest}, + {kernel,logger_level}]], + [{env,Env},{logger,Hs}|Config]. + +end_per_suite(Config) -> + [application:set_env(App,Key,Val) || {App,Key,Val} <- ?config(env,Config)], + Hs = ?config(logger,Config), + [ok = logger:add_handler(Id,Mod,C) || {Id,Mod,C} <- Hs], + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(_TestCase, Config) -> + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [start_stop, + get_buffer, + replace_file, + replace_disk_log + ]. + +start_stop(_Config) -> + undefined = whereis(logger_simple), + register(logger_simple,self()), + {error,_} = logger:add_handler(logger_simple, + logger_simple, + #{filter_default=>log}), + unregister(logger_simple), + ok = logger:add_handler(logger_simple,logger_simple,#{filter_default=>log}), + Pid = whereis(logger_simple), + true = is_pid(Pid), + ok = logger:remove_handler(logger_simple), + false = is_pid(whereis(logger_simple)), + ok. +start_stop(cleanup,_Config) -> + logger:remove_handler(logger_simple). + +get_buffer(_Config) -> + %% Start simple without buffer + ok = logger:add_handler(logger_simple,logger_simple, + #{filter_default=>log}), + logger:emergency(?str), + logger:alert(?str,[]), + logger:error(?map_rep), + logger:info(?keyval_rep), + {ok,[]} = logger_simple:get_buffer(), % no buffer + ok = logger:remove_handler(logger_simple), + + %% Start with buffer + ok = logger:add_handler(logger_simple,logger_simple, + #{filter_default=>log, + logger_simple=>#{buffer=>true}}), + logger:emergency(M1=?str), + logger:alert(M2=?str,[]), + logger:error(M3=?map_rep), + logger:info(M4=?keyval_rep), + logger:info(M41=?keyval_rep++[not_key_val]), + error_logger:error_report(some_type,M5=?map_rep), + error_logger:warning_report("some_type",M6=?map_rep), + logger:critical(M7=?str,[A7=?keyval_rep]), + logger:notice(M8=["fake",string,"line:",?LINE]), + {ok,Buffered1} = logger_simple:get_buffer(), + [#{level:=emergency,msg:={string,M1}}, + #{level:=alert,msg:={M2,[]}}, + #{level:=error,msg:={report,M3}}, + #{level:=info,msg:={report,M4}}, + #{level:=info,msg:={report,M41}}, + #{level:=error,msg:={report,#{label:={error_logger,error_report}, + report:=M5}}}, + #{level:=warning,msg:={report,#{label:={error_logger,warning_report}, + report:=M6}}}, + #{level:=critical,msg:={M7,[A7]}}, + #{level:=notice,msg:={string,M8}}] = Buffered1, + + %% Keep logging - should not buffer any more + logger:emergency(?str), + logger:alert(?str,[]), + logger:error(?map_rep), + logger:info(?keyval_rep), + {ok,[]} = logger_simple:get_buffer(), + ok = logger:remove_handler(logger_simple), + + %% Fill buffer and drop + ok = logger:add_handler(logger_simple,logger_simple, + #{filter_default=>log, + logger_simple=>#{buffer=>true}}), + logger:emergency(M9=?str), + M10=?str, + [logger:info(M10) || _ <- lists:seq(1,8)], + logger:error(M11=?str), + logger:error(?str), + logger:error(?str), + {ok,Buffered3} = logger_simple:get_buffer(), + 11 = length(Buffered3), + [#{level:=emergency,msg:={string,M9}}, + #{level:=info,msg:={string,M10}}, + #{level:=info,msg:={string,M10}}, + #{level:=info,msg:={string,M10}}, + #{level:=info,msg:={string,M10}}, + #{level:=info,msg:={string,M10}}, + #{level:=info,msg:={string,M10}}, + #{level:=info,msg:={string,M10}}, + #{level:=info,msg:={string,M10}}, + #{level:=error,msg:={string,M11}}, + #{level:=info,msg:={"Simple handler buffer full, dropped ~w messages",[2]}}] + = Buffered3, + ok. +get_buffer(cleanup,_Config) -> + logger:remove_handler(logger_simple). + +replace_file(Config) -> + ok = logger:add_handler(logger_simple,logger_simple, + #{filter_default=>log, + logger_simple=>#{buffer=>true}}), + logger:emergency(M1=?str), + logger:alert(M2=?str,[]), + logger:error(?map_rep), + logger:info(?keyval_rep), + undefined = whereis(?STANDARD_HANDLER), + PrivDir = ?config(priv_dir,Config), + File = filename:join(PrivDir,atom_to_list(?FUNCTION_NAME)++".log"), + + application:set_env(kernel,logger_dest,{file,File}), + application:set_env(kernel,logger_level,info), + + ok = logger:setup_standard_handler(), + true = is_pid(whereis(?STANDARD_HANDLER)), + ok = logger_std_h:filesync(?STANDARD_HANDLER), + {ok,Bin} = file:read_file(File), + Lines = [unicode:characters_to_list(L) || + L <- binary:split(Bin,<<"\n">>,[global,trim])], + ["=EMERGENCY REPORT===="++_, + M1, + "=ALERT REPORT===="++_, + M2, + "=ERROR REPORT===="++_, + _, + _, + "=INFO REPORT===="++_, + _, + _] = Lines, + ok. +replace_file(cleanup,_Config) -> + logger:remove_handler(?STANDARD_HANDLER), + logger:remove_handler(logger_simple). + +replace_disk_log(Config) -> + ok = logger:add_handler(logger_simple,logger_simple, + #{filter_default=>log, + logger_simple=>#{buffer=>true}}), + logger:emergency(M1=?str), + logger:alert(M2=?str,[]), + logger:error(?map_rep), + logger:info(?keyval_rep), + undefined = whereis(?STANDARD_HANDLER), + PrivDir = ?config(priv_dir,Config), + File = filename:join(PrivDir,atom_to_list(?FUNCTION_NAME)), + + application:set_env(kernel,logger_dest,{disk_log,File}), + application:set_env(kernel,logger_level,info), + + ok = logger:setup_standard_handler(), + true = is_pid(whereis(?STANDARD_HANDLER)), + ok = logger_disk_log_h:disk_log_sync(?STANDARD_HANDLER), + {ok,Bin} = file:read_file(File++".1"), + Lines = [unicode:characters_to_list(L) || + L <- binary:split(Bin,<<"\n">>,[global,trim])], + ["=EMERGENCY REPORT===="++_, + M1, + "=ALERT REPORT===="++_, + M2, + "=ERROR REPORT===="++_, + _, + _, + "=INFO REPORT===="++_, + _, + _|_] = Lines, % the tail might be an info report about opening the disk log + ok. +replace_disk_log(cleanup,_Config) -> + logger:remove_handler(?STANDARD_HANDLER), + logger:remove_handler(logger_simple). + diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl new file mode 100644 index 0000000000..7c8d63cbbd --- /dev/null +++ b/lib/kernel/test/logger_std_h_SUITE.erl @@ -0,0 +1,1453 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(logger_std_h_SUITE). + +-compile(export_all). + +-include_lib("common_test/include/ct.hrl"). +-include_lib("kernel/include/logger.hrl"). +-include_lib("kernel/src/logger_internal.hrl"). +-include_lib("kernel/src/logger_h_common.hrl"). +-include_lib("stdlib/include/ms_transform.hrl"). +-include_lib("kernel/include/file.hrl"). + +-define(check_no_log, [] = test_server:messages_get()). +-define(check(Expected), + receive + {log,Expected} -> + [] = test_server:messages_get() + after 5000 -> + ct:fail({report_not_received, + {line,?LINE}, + {expected,Expected}, + {got,test_server:messages_get()}}) + end). + +-define(msg,"Log from "++atom_to_list(?FUNCTION_NAME)++ + ":"++integer_to_list(?LINE)). +-define(bin(Msg), list_to_binary(Msg++"\n")). +-define(domain,#{domain=>[?MODULE]}). + +-define(FILESYNC_REP_INT, if is_atom(?FILESYNC_REPEAT_INTERVAL) -> 5500; + true -> ?FILESYNC_REPEAT_INTERVAL + 500 + end). + +suite() -> + [{timetrap,{seconds,30}}]. + +init_per_suite(Config) -> + timer:start(), % to avoid progress report + {ok,{?STANDARD_HANDLER,#{formatter:=OrigFormatter}}} = + logger:get_handler_config(?STANDARD_HANDLER), + [{formatter,OrigFormatter}|Config]. + +end_per_suite(Config) -> + {OrigMod,OrigConf} = proplists:get_value(formatter,Config), + logger:set_handler_config(?STANDARD_HANDLER,formatter,{OrigMod,OrigConf}), + ok. + +init_per_group(_Group, Config) -> + Config. + +end_per_group(_Group, _Config) -> + ok. + +init_per_testcase(TestHooksCase, Config) when + TestHooksCase == write_failure; + TestHooksCase == sync_failure -> + if ?TEST_HOOKS_TAB == undefined -> + {skip,"Define the TEST_HOOKS macro to run this test"}; + true -> + ct:print("********** ~w **********", [TestHooksCase]), + Config + end; +init_per_testcase(TestCase, Config) -> + ct:print("********** ~w **********", [TestCase]), + Config. + +end_per_testcase(Case, Config) -> + try apply(?MODULE,Case,[cleanup,Config]) + catch error:undef -> ok + end, + ok. + +groups() -> + []. + +all() -> + [add_remove_instance_tty, + add_remove_instance_standard_io, + add_remove_instance_standard_error, + add_remove_instance_file1, + add_remove_instance_file2, + default_formatter, + errors, + formatter_fail, + config_fail, + crash_std_h_to_file, + crash_std_h_to_disk_log, + bad_input, + info_and_reset, + reconfig, + file_opts, + filesync, + write_failure, + sync_failure, + op_switch_to_sync_file, + op_switch_to_sync_tty, + op_switch_to_drop_file, + op_switch_to_drop_tty, + op_switch_to_flush_file, + op_switch_to_flush_tty, + limit_burst_disabled, + limit_burst_enabled_one, + limit_burst_enabled_period, + kill_disabled, + qlen_kill_new, + qlen_kill_std, + mem_kill_new, + mem_kill_std, + restart_after, + handler_requests_under_load + ]. + +add_remove_instance_tty(_Config) -> + {error,{handler_not_added,{invalid_config,logger_std_h,{type,tty}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{logger_std_h => #{type => tty}, + filter_default=>log, + formatter=>{?MODULE,self()}}), + ok. + +add_remove_instance_standard_io(_Config) -> + add_remove_instance_nofile(standard_io). +add_remove_instance_standard_io(cleanup,_Config) -> + logger_std_h_remove(). + +add_remove_instance_standard_error(_Config) -> + add_remove_instance_nofile(standard_error). +add_remove_instance_standard_error(cleanup,_Config) -> + logger_std_h_remove(). + +add_remove_instance_file1(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,"stdlog1.txt"), + Type = {file,Log}, + add_remove_instance_file(Log, Type). +add_remove_instance_file1(cleanup,_Config) -> + logger_std_h_remove(). + +add_remove_instance_file2(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,"stdlog2.txt"), + Type = {file,Log,[raw,append]}, + add_remove_instance_file(Log, Type). +add_remove_instance_file2(cleanup,_Config) -> + logger_std_h_remove(). + +add_remove_instance_file(Log, Type) -> + ok = logger:add_handler(?MODULE, + logger_std_h, + #{logger_std_h => #{type => Type}, + filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + Pid = whereis(?MODULE), + true = is_pid(Pid), + logger:info(M1=?msg,?domain), + ?check(M1), + B1 = ?bin(M1), + try_read_file(Log, {ok,B1}, ?FILESYNC_REP_INT), + ok = logger:remove_handler(?MODULE), + timer:sleep(500), + undefined = whereis(?MODULE), + logger:info(?msg,?domain), + ?check_no_log, + try_read_file(Log, {ok,B1}, ?FILESYNC_REP_INT), + ok. + +default_formatter(_Config) -> + ok = logger:set_handler_config(?STANDARD_HANDLER,formatter, + {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}), + ct:capture_start(), + logger:info(M1=?msg), + timer:sleep(100), + ct:capture_stop(), + [Msg] = ct:capture_get(), + match = re:run(Msg,"=INFO REPORT====.*\n"++M1,[{capture,none}]), + ok. + +errors(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,?FUNCTION_NAME), + + ok = logger:add_handler(?MODULE,logger_std_h,#{}), + {error,{already_exist,?MODULE}} = + logger:add_handler(?MODULE,logger_std_h,#{}), + + {error,{not_found,no_such_name}} = logger:remove_handler(no_such_name), + + ok = logger:remove_handler(?MODULE), + {error,{not_found,?MODULE}} = logger:remove_handler(?MODULE), + + {error, + {handler_not_added, + {invalid_config,logger_std_h,{type,faulty_type}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{logger_std_h => #{type => faulty_type}}), + + NoDir = lists:concat(["/",?MODULE,"_dir"]), + {error, + {handler_not_added,{{open_failed,NoDir,eacces},_}}} = + logger:add_handler(myh2,logger_std_h, + #{logger_std_h=>#{type=>{file,NoDir}}}), + + {error, + {handler_not_added,{{open_failed,Log,_},_}}} = + logger:add_handler(myh3,logger_std_h, + #{logger_std_h=>#{type=>{file,Log,[bad_file_opt]}}}), + + ok = logger:info(?msg). + +errors(cleanup,_Config) -> + logger:remove_handler(?MODULE). + +formatter_fail(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir,?FUNCTION_NAME), + + %% no formatter + ok = logger:add_handler(?MODULE, + logger_std_h, + #{logger_std_h => #{type => {file,Log}}, + filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE])}), + Pid = whereis(?MODULE), + true = is_pid(Pid), + {ok,#{handlers:=H}} = logger:get_logger_config(), + true = lists:member(?MODULE,H), + + %% Formatter is added automatically + {ok,{_,#{formatter:={logger_formatter,_}}}} = + logger:get_handler_config(?MODULE), + logger:info(M1=?msg,?domain), + Got1 = try_match_file(Log,"=INFO REPORT====.*\n"++M1,5000), + + ok = logger:set_handler_config(?MODULE,formatter,{nonexistingmodule,#{}}), + logger:info(M2=?msg,?domain), + Got2 = try_match_file(Log, + Got1++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M2, + 5000), + + ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,crash}), + logger:info(M3=?msg,?domain), + Got3 = try_match_file(Log, + Got2++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M3, + 5000), + + ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,bad_return}), + logger:info(?msg,?domain), + try_match_file(Log, + Got3++"FORMATTER ERROR: bad_return_value", + 5000), + + %% Check that handler is still alive and was never dead + Pid = whereis(?MODULE), + {ok,#{handlers:=H}} = logger:get_logger_config(), + + ok. + +formatter_fail(cleanup,_Config) -> + logger:remove_handler(?MODULE). + +config_fail(_Config) -> + {error,{handler_not_added,{invalid_config,logger_std_h,{bad,bad}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{logger_std_h => #{bad => bad}, + filter_default=>log, + formatter=>{?MODULE,self()}}), + {error,{handler_not_added,{invalid_config,logger_std_h, + {restart_type,bad}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{logger_std_h => #{restart_type => bad}, + filter_default=>log, + formatter=>{?MODULE,self()}}), + {error,{handler_not_added,{invalid_levels,{42,42,_}}}} = + logger:add_handler(?MODULE,logger_std_h, + #{logger_std_h => #{toggle_sync_qlen=>42, + drop_new_reqs_qlen=>42}}), + + ok = logger:add_handler(?MODULE,logger_std_h, + #{filter_default=>log, + formatter=>{?MODULE,self()}}), + {error,{illegal_config_change,_,_}} = + logger:set_handler_config(?MODULE,logger_std_h, + #{type=>{file,"file"}}), + {error,{illegal_config_change,_,_}} = + logger:set_handler_config(?MODULE,id,bad), + {error,{invalid_levels,_}} = + logger:set_handler_config(?MODULE,logger_std_h, + #{toggle_sync_qlen=>100, + flush_reqs_qlen=>99}), + {error,{invalid_config,logger_std_h,{filesync_rep_int,2000}}} = + logger:set_handler_config(?MODULE, logger_std_h, + #{filesync_rep_int => 2000}), + ok. + +config_fail(cleanup,_Config) -> + logger:remove_handler(?MODULE). + +crash_std_h_to_file(Config) -> + crash_std_h(Config,?FUNCTION_NAME,logger_dest,file). +crash_std_h_to_file(cleanup,_Config) -> + crash_std_h(cleanup). + +crash_std_h_to_disk_log(Config) -> + crash_std_h(Config,?FUNCTION_NAME,logger_dest,disk_log). +crash_std_h_to_disk_log(cleanup,_Config) -> + crash_std_h(cleanup). + +crash_std_h(Config,Func,Var,Type) -> + Dir = ?config(priv_dir,Config), + File = lists:concat([?MODULE,"_",Func,".log"]), + Log = filename:join(Dir,File), + Pa = filename:dirname(code:which(?MODULE)), + TypeAndLog = + case os:type() of + {win32,_} -> + lists:concat([" {",Type,",\\\"",Log,"\\\"}"]); + _ -> + lists:concat([" \'{",Type,",\"",Log,"\"}\'"]) + end, + Args = lists:concat([" -kernel ",Var,TypeAndLog," -pa ",Pa]), + Name = lists:concat([?MODULE,"_",Func]), + ct:pal("Starting ~p with ~tp", [Name,Args]), + %% Start a node which prints kernel logs to the destination specified by Type + {ok,Node} = test_server:start_node(Name, peer, [{args, Args}]), + Pid = rpc:call(Node,erlang,whereis,[?STANDARD_HANDLER]), + ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter, + {?MODULE,self()}]), + ok = log_on_remote_node(Node,"dummy1"), + ?check("dummy1"), + {ok,Bin1} = sync_and_read(Node,Type,Log), + <<"dummy1\n">> = binary:part(Bin1,{byte_size(Bin1),-7}), + + %% Kill the logger_std_h process + exit(Pid, kill), + + %% Wait a bit, then check that it is gone + timer:sleep(2000), + undefined = rpc:call(Node,erlang,whereis,[?STANDARD_HANDLER]), + + %% Check that file is not empty + {ok,Bin2} = sync_and_read(Node,Type,Log), + <<"dummy1\n">> = binary:part(Bin2,{byte_size(Bin2),-7}), + ok. + +%% Can not use rpc:call here, since the code would execute on a +%% process with group_leader on this (the calling) node, and thus +%% logger would send the log event to the logger process here instead +%% of logging it itself. +log_on_remote_node(Node,Msg) -> + _ = spawn_link(Node, + fun() -> erlang:group_leader(whereis(user),self()), + logger:info(Msg) + end), + ok. + + +crash_std_h(cleanup) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes]. + +sync_and_read(Node,disk_log,Log) -> + rpc:call(Node,logger_disk_log_h,disk_log_sync,[?STANDARD_HANDLER]), + case file:read_file(Log ++ ".1") of + {ok,<<>>} -> + timer:sleep(5000), + file:read_file(Log ++ ".1"); + Ok -> + Ok + end; +sync_and_read(Node,file,Log) -> + rpc:call(Node,logger_std_h,filesync,[?STANDARD_HANDLER]), + case file:read_file(Log) of + {ok,<<>>} -> + timer:sleep(5000), + file:read_file(Log); + Ok -> + Ok + end. + +bad_input(_Config) -> + {error,{badarg,{filesync,["BadType"]}}} = logger_std_h:filesync("BadType"), + {error,{badarg,{info,["BadType"]}}} = logger_std_h:info("BadType"), + {error,{badarg,{reset,["BadType"]}}} = logger_std_h:reset("BadType"). + + +info_and_reset(_Config) -> + #{id := ?STANDARD_HANDLER} = logger_std_h:info(?STANDARD_HANDLER), + ok = logger_std_h:reset(?STANDARD_HANDLER). + +reconfig(Config) -> + Dir = ?config(priv_dir,Config), + ok = logger:add_handler(?MODULE, + logger_std_h, + #{logger_std_h => #{type => standard_io}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + #{id := ?MODULE, + type := standard_io, + file_ctrl_pid := FileCtrlPid, + toggle_sync_qlen := ?TOGGLE_SYNC_QLEN, + drop_new_reqs_qlen := ?DROP_NEW_REQS_QLEN, + flush_reqs_qlen := ?FLUSH_REQS_QLEN, + enable_burst_limit := ?ENABLE_BURST_LIMIT, + burst_limit_size := ?BURST_LIMIT_SIZE, + burst_window_time := ?BURST_WINDOW_TIME, + enable_kill_overloaded := ?ENABLE_KILL_OVERLOADED, + handler_overloaded_qlen := ?HANDLER_OVERLOADED_QLEN, + handler_overloaded_mem := ?HANDLER_OVERLOADED_MEM, + handler_restart_after := ?HANDLER_RESTART_AFTER, + filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL} = + logger_std_h:info(?MODULE), + + ok = logger:set_handler_config(?MODULE, logger_std_h, + #{toggle_sync_qlen => 1, + drop_new_reqs_qlen => 2, + flush_reqs_qlen => 3, + enable_burst_limit => false, + burst_limit_size => 10, + burst_window_time => 10, + enable_kill_overloaded => true, + handler_overloaded_qlen => 100000, + handler_overloaded_mem => 10000000, + handler_restart_after => never, + filesync_repeat_interval => no_repeat}), + #{id := ?MODULE, + type := standard_io, + file_ctrl_pid := FileCtrlPid, + toggle_sync_qlen := 1, + drop_new_reqs_qlen := 2, + flush_reqs_qlen := 3, + enable_burst_limit := false, + burst_limit_size := 10, + burst_window_time := 10, + enable_kill_overloaded := true, + handler_overloaded_qlen := 100000, + handler_overloaded_mem := 10000000, + handler_restart_after := never, + filesync_repeat_interval := no_repeat} = logger_std_h:info(?MODULE), + ok. + +reconfig(cleanup, _Config) -> + logger:remove_handler(?MODULE). + + +file_opts(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir, lists:concat([?FUNCTION_NAME,".log"])), + BadFileOpts = [raw], + BadType = {file,Log,BadFileOpts}, + {error,{handler_not_added,{{open_failed,Log,enoent},_}}} = + logger:add_handler(?MODULE, logger_std_h, + #{logger_std_h => #{type => BadType}}), + + OkFileOpts = [raw,append], + OkType = {file,Log,OkFileOpts}, + ok = logger:add_handler(?MODULE, + logger_std_h, + #{logger_std_h => #{type => OkType}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + + #{type := OkType} = logger_std_h:info(?MODULE), + logger:info(M1=?msg,?domain), + ?check(M1), + B1 = ?bin(M1), + try_read_file(Log, {ok,B1}, ?FILESYNC_REP_INT), + ok. +file_opts(cleanup, _Config) -> + logger:remove_handler(?MODULE). + + +filesync(Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir, lists:concat([?FUNCTION_NAME,".log"])), + Type = {file,Log}, + ok = logger:add_handler(?MODULE, + logger_std_h, + #{logger_std_h => #{type => Type}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,nl}}), + + %% check repeated filesync happens + start_tracer([{logger_std_h, write_to_dev, 5}, + {logger_std_h, sync_dev, 4}, + {file, datasync, 1}], + [{logger_std_h, write_to_dev, <<"first\n">>}, + {logger_std_h, sync_dev}, + {file,datasync}]), + + logger:info("first", ?domain), + %% wait for automatic filesync + check_tracer(?FILESYNC_REP_INT*2), + + %% check that explicit filesync is only done once + start_tracer([{logger_std_h, write_to_dev, 5}, + {logger_std_h, sync_dev, 4}, + {file, datasync, 1}], + [{logger_std_h, write_to_dev, <<"second\n">>}, + {logger_std_h, sync_dev}, + {file,datasync}, + {no_more,500} + ]), + logger:info("second", ?domain), + %% do explicit filesync + logger_std_h:filesync(?MODULE), + %% a second filesync should be ignored + logger_std_h:filesync(?MODULE), + check_tracer(100), + + %% check that if there's no repeated filesync active, + %% a filesync is still performed when handler goes idle + logger:set_handler_config(?MODULE, logger_std_h, + #{filesync_repeat_interval => no_repeat}), + no_repeat = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)), + %% The following timer is to make sure the time from last log + %% ("second") to next ("third") is long enough, so the a flush is + %% triggered by the idle timeout between "thrid" and "fourth". + timer:sleep(?IDLE_DETECT_TIME_MSEC*2), + start_tracer([{logger_std_h, write_to_dev, 5}, + {logger_std_h, sync_dev, 4}, + {file, datasync, 1}], + [{logger_std_h, write_to_dev, <<"third\n">>}, + {logger_std_h, sync_dev}, + {file,datasync}, + {logger_std_h, write_to_dev, <<"fourth\n">>}, + {logger_std_h, sync_dev}, + {file,datasync}]), + logger:info("third", ?domain), + %% wait for automatic filesync + timer:sleep(?IDLE_DETECT_TIME_MSEC*2), + logger:info("fourth", ?domain), + %% wait for automatic filesync + check_tracer(?IDLE_DETECT_TIME_MSEC*2), + + %% switch repeated filesync on and verify that the looping works + SyncInt = 1000, + WaitT = 4500, + OneSync = {logger_std_h,handle_cast,repeated_filesync}, + %% receive 1 initial repeated_filesync, then 1 per sec + start_tracer([{logger_std_h,handle_cast,2}], + [OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]), + + logger:set_handler_config(?MODULE, logger_std_h, + #{filesync_repeat_interval => SyncInt}), + SyncInt = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)), + timer:sleep(WaitT), + logger:set_handler_config(?MODULE, logger_std_h, + #{filesync_repeat_interval => no_repeat}), + check_tracer(100), + ok. +filesync(cleanup, _Config) -> + dbg:stop_clear(), + logger:remove_handler(?MODULE). + +write_failure(Config) -> + Dir = ?config(priv_dir, Config), + File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]), + Log = filename:join(Dir, File), + Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log), + false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), + rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), + rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), + rpc:call(Node, ?MODULE, set_result, [file_write,ok]), + + ok = log_on_remote_node(Node, "Logged1"), + rpc:call(Node, logger_std_h, filesync, [?STANDARD_HANDLER]), + ?check_no_log, + try_read_file(Log, {ok,<<"Logged1\n">>}, ?FILESYNC_REP_INT), + + rpc:call(Node, ?MODULE, set_result, [file_write,{error,terminated}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + + ?check({error,{?STANDARD_HANDLER,write,Log,{error,terminated}}}), + + ok = log_on_remote_node(Node, "No second error printout"), + ?check_no_log, + + rpc:call(Node, ?MODULE, set_result, [file_write,{error,eacces}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + ?check({error,{?STANDARD_HANDLER,write,Log,{error,eacces}}}), + + rpc:call(Node, ?MODULE, set_result, [file_write,ok]), + ok = log_on_remote_node(Node, "Logged2"), + rpc:call(Node, logger_std_h, filesync, [?STANDARD_HANDLER]), + ?check_no_log, + try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, ?FILESYNC_REP_INT), + ok. +write_failure(cleanup, _Config) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes]. + +sync_failure(Config) -> + Dir = ?config(priv_dir, Config), + File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]), + Log = filename:join(Dir, File), + Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log), + false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])), + rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]), + rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]), + rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]), + + SyncInt = 500, + ok = rpc:call(Node, logger, set_handler_config, + [?STANDARD_HANDLER, logger_std_h, + #{filesync_repeat_interval => SyncInt}]), + Info = rpc:call(Node, logger_std_h, info, [?STANDARD_HANDLER]), + SyncInt = maps:get(filesync_repeat_interval, Info), + + ok = log_on_remote_node(Node, "Logged1"), + ?check_no_log, + + rpc:call(Node, ?MODULE, set_result, [file_datasync,{error,terminated}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + + ?check({error,{?STANDARD_HANDLER,filesync,Log,{error,terminated}}}), + + ok = log_on_remote_node(Node, "No second error printout"), + ?check_no_log, + + rpc:call(Node, ?MODULE, set_result, [file_datasync,{error,eacces}]), + ok = log_on_remote_node(Node, "Cause simple error printout"), + ?check({error,{?STANDARD_HANDLER,filesync,Log,{error,eacces}}}), + + rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]), + ok = log_on_remote_node(Node, "Logged2"), + ?check_no_log, + ok. +sync_failure(cleanup, _Config) -> + Nodes = nodes(), + [test_server:stop_node(Node) || Node <- Nodes]. + +start_std_h_on_new_node(_Config, Func, Log) -> + Pa = filename:dirname(code:which(?MODULE)), + Dest = + case os:type() of + {win32,_} -> + lists:concat([" {file,\\\"",Log,"\\\"}"]); + _ -> + lists:concat([" \'{file,\"",Log,"\"}\'"]) + end, + Args = lists:concat([" -kernel ",logger_dest,Dest," -pa ",Pa]), + Name = lists:concat([?MODULE,"_",Func]), + ct:pal("Starting ~s with ~tp", [Name,Args]), + {ok,Node} = test_server:start_node(Name, peer, [{args, Args}]), + Pid = rpc:call(Node,erlang,whereis,[?STANDARD_HANDLER]), + true = is_pid(Pid), + ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter, + {?MODULE,nl}]), + Node. + +%% functions for test hook macros to be called by rpc +set_internal_log(Mod, Func) -> + ?set_internal_log({Mod,Func}). +set_result(Op, Result) -> + ?set_result(Op, Result). +set_defaults() -> + ?set_defaults(). + +%% internal log function that sends the term to the test case process +internal_log(Type, Term) -> + [{tester,Tester}] = ets:lookup(?TEST_HOOKS_TAB, tester), + Tester ! {log,{Type,Term}}, + logger:internal_log(Type, Term), + ok. + + +%%%----------------------------------------------------------------- +%%% Overload protection tests + +op_switch_to_sync_file(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 3, + drop_new_reqs_qlen => 501, + flush_reqs_qlen => 2000, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + %% TRecvPid = start_op_trace(), + NumOfReqs = 500, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + NumOfReqs = count_lines(Log), + %% true = analyse_trace(TRecvPid, + %% fun(Events) -> find_mode(async,Events) end), + %% true = analyse_trace(TRecvPid, + %% fun(Events) -> find_mode(sync,Events) end), + %% true = analyse_trace(TRecvPid, + %% fun(Events) -> find_switch(async,sync,Events) end), + %% false = analyse_trace(TRecvPid, + %% fun(Events) -> find_mode(drop,Events) end), + %% false = analyse_trace(TRecvPid, + %% fun(Events) -> find_mode(flush,Events) end), + ok = file:delete(Log), + %% stop_op_trace(TRecvPid), + ok. +op_switch_to_sync_file(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +op_switch_to_sync_tty(Config) -> + {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config), + NewHConfig = + HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 3, + drop_new_reqs_qlen => 501, + flush_reqs_qlen => 2000, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 500, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + ok. +op_switch_to_sync_tty(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +op_switch_to_drop_file(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + + NewHConfig = + HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2, + drop_new_reqs_qlen => 3, + flush_reqs_qlen => 600, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + %% TRecvPid = start_op_trace(), + NumOfReqs = 500, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages dropped = ~w (~w)", + [NumOfReqs-Logged,NumOfReqs]), + true = (Logged < NumOfReqs), + %% true = analyse_trace(TRecvPid, + %% fun(Events) -> find_mode(async,Events) end), + %% true = analyse_trace(TRecvPid, + %% fun(Events) -> find_mode(drop,Events) end), + %% false = analyse_trace(TRecvPid, + %% fun(Events) -> find_mode(flush,Events) end), + %% true = analyse_trace(TRecvPid, + %% fun(Events) -> find_switch(async,drop,Events) + %% orelse find_switch(sync,drop,Events) + %% end), + ok = file:delete(Log), + %% stop_op_trace(TRecvPid), + ok. +op_switch_to_drop_file(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +op_switch_to_drop_tty(Config) -> + {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config), + NewHConfig = + HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2, + drop_new_reqs_qlen => 3, + flush_reqs_qlen => 600, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 500, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + ok. +op_switch_to_drop_tty(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +op_switch_to_flush_file() -> + [{timetrap,{minutes,3}}]. +op_switch_to_flush_file(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + + %% it's important that both async and sync requests have been queued + %% when the flush happens (verify with coverage of flush_log_requests/2) + + NewHConfig = + HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2, + drop_new_reqs_qlen => 99, + flush_reqs_qlen => 100, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 10000, + Procs = 100, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages flushed/dropped = ~w (~w)", + [(NumOfReqs*Procs)-Logged,NumOfReqs*Procs]), + true = (Logged < (NumOfReqs*Procs)), + + %%! --- Thu Apr 12 13:46:00 2018 --- peppe was here! + %%! TODO: Verify that handler has switched to flush mode + + ok = file:delete(Log), + ok. +op_switch_to_flush_file(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +op_switch_to_flush_tty(Config) -> + {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config), + + %% it's important that both async and sync requests have been queued + %% when the flush happens (verify with coverage of flush_log_requests/2) + + NewHConfig = + HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2, + drop_new_reqs_qlen => 99, + flush_reqs_qlen => 100, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 10000, + Procs = 10, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info), + ok. +op_switch_to_flush_tty(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +limit_burst_disabled(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{logger_std_h => StdHConfig#{enable_burst_limit => false, + burst_limit_size => 10, + burst_window_time => 2000, + drop_new_reqs_qlen => 200, + flush_reqs_qlen => 300}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + ok = file:delete(Log), + NumOfReqs = Logged. +limit_burst_disabled(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +limit_burst_enabled_one(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + ReqLimit = 10, + NewHConfig = + HConfig#{logger_std_h => StdHConfig#{enable_burst_limit => true, + burst_limit_size => ReqLimit, + burst_window_time => 2000, + drop_new_reqs_qlen => 200, + flush_reqs_qlen => 300}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + ok = file:delete(Log), + ReqLimit = Logged. +limit_burst_enabled_one(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +limit_burst_enabled_period(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + ReqLimit = 10, + BurstTWin = 1000, + NewHConfig = + HConfig#{logger_std_h => StdHConfig#{enable_burst_limit => true, + burst_limit_size => ReqLimit, + burst_window_time => BurstTWin, + drop_new_reqs_qlen => 20000, + flush_reqs_qlen => 20001}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + + Windows = 3, + Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w", + [Sent,Logged]), + ok = file:delete(Log), + true = (Logged > (ReqLimit*Windows)) andalso + (Logged < (ReqLimit*(Windows+2))). +limit_burst_enabled_period(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +kill_disabled(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>false, + handler_overloaded_qlen=>10, + handler_overloaded_mem=>100}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + NumOfReqs = 100, + send_burst({n,NumOfReqs}, seq, {chars,79}, info), + Logged = count_lines(Log), + ct:pal("Number of messages logged = ~w", [Logged]), + ok = file:delete(Log), + true = is_pid(whereis(?MODULE)), + ok. +kill_disabled(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +qlen_kill_new(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + Pid0 = whereis(?MODULE), + {_,Mem0} = process_info(Pid0, memory), + RestartAfter = 2000, + NewHConfig = + HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true, + handler_overloaded_qlen=>10, + handler_overloaded_mem=>Mem0+50000, + handler_restart_after=>RestartAfter}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + MRef = erlang:monitor(process, Pid0), + NumOfReqs = 100, + Procs = 2, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info), + %% send_burst({n,NumOfReqs}, seq, {chars,79}, info), + receive + {'DOWN', MRef, _, _, Info} -> + case Info of + {shutdown,{overloaded,?MODULE,QLen,Mem}} -> + ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]); + killed -> + ct:pal("Slow shutdown, handler process was killed!", []) + end, + timer:sleep(RestartAfter + 1000), + true = is_pid(whereis(?MODULE)), + ok + after + 5000 -> + Info = logger_std_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info]), + ct:fail("Handler not dead! It should not have survived this!") + end. +qlen_kill_new(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +%% choke the standard handler on remote node to verify the termination +%% works as expected +qlen_kill_std(Config) -> + %%! HERE + %% Dir = ?config(priv_dir, Config), + %% File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]), + %% Log = filename:join(Dir, File), + %% Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log), + %% ok = rpc:call(Node, logger, set_handler_config, + %% [?STANDARD_HANDLER, logger_std_h, + %% #{enable_kill_overloaded=>true, + %% handler_overloaded_qlen=>10, + %% handler_overloaded_mem=>100000}]), + {skip,"Not done yet"}. + +mem_kill_new(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + Pid0 = whereis(?MODULE), + {_,Mem0} = process_info(Pid0, memory), + RestartAfter = 2000, + NewHConfig = + HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true, + handler_overloaded_qlen=>50000, + handler_overloaded_mem=>Mem0+500, + handler_restart_after=>RestartAfter}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + MRef = erlang:monitor(process, Pid0), + NumOfReqs = 100, + Procs = 2, + send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info), + %% send_burst({n,NumOfReqs}, seq, {chars,79}, info), + receive + {'DOWN', MRef, _, _, Info} -> + case Info of + {shutdown,{overloaded,?MODULE,QLen,Mem}} -> + ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]); + killed -> + ct:pal("Slow shutdown, handler process was killed!", []) + end, + timer:sleep(RestartAfter * 2), + true = is_pid(whereis(?MODULE)), + ok + after + 5000 -> + Info = logger_std_h:info(?MODULE), + ct:pal("Handler state = ~p", [Info]), + ct:fail("Handler not dead! It should not have survived this!") + end. +mem_kill_new(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +%% choke the standard handler on remote node to verify the termination +%% works as expected +mem_kill_std(Config) -> + {skip,"Not done yet"}. + +restart_after(Config) -> + {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig1 = + HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true, + handler_overloaded_qlen=>10, + handler_restart_after=>never}}, + ok = logger:set_handler_config(?MODULE, NewHConfig1), + MRef1 = erlang:monitor(process, whereis(?MODULE)), + %% kill handler + send_burst({n,100}, {spawn,2,0}, {chars,79}, info), + receive + {'DOWN', MRef1, _, _, _Info1} -> + timer:sleep(?HANDLER_RESTART_AFTER + 1000), + undefined = whereis(?MODULE), + ok + after + 5000 -> + ct:fail("Handler not dead! It should not have survived this!") + end, + + {Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config), + RestartAfter = 2000, + NewHConfig2 = + HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true, + handler_overloaded_qlen=>10, + handler_restart_after=>RestartAfter}}, + ok = logger:set_handler_config(?MODULE, NewHConfig2), + Pid0 = whereis(?MODULE), + MRef2 = erlang:monitor(process, Pid0), + %% kill handler + send_burst({n,100}, {spawn,2,0}, {chars,79}, info), + receive + {'DOWN', MRef2, _, _, _Info2} -> + timer:sleep(RestartAfter + 1000), + Pid1 = whereis(?MODULE), + true = is_pid(Pid1), + false = (Pid1 == Pid0), + ok + after + 5000 -> + ct:fail("Handler not dead! It should not have survived this!") + end, + ok. +restart_after(cleanup, _Config) -> + ok = stop_handler(?MODULE). + +%% send handler requests (filesync, info, reset, change_config) +%% during high load to verify that sync, dropping and flushing is +%% handled correctly. +handler_requests_under_load() -> + [{timetrap,{minutes,3}}]. +handler_requests_under_load(Config) -> + {Log,HConfig,StdHConfig} = + start_handler(?MODULE, ?FUNCTION_NAME, Config), + NewHConfig = + HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2, + drop_new_reqs_qlen => 1000, + flush_reqs_qlen => 2000, + enable_burst_limit => false}}, + ok = logger:set_handler_config(?MODULE, NewHConfig), + Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]}, + {info,[]}, + {reset,[]}, + {change_config,[]}]) + end), + Sent = send_burst({t,10000}, seq, {chars,79}, info), + Pid ! {self(),finish}, + ReqResult = receive {Pid,Result} -> Result end, + Logged = count_lines(Log), + ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w", + [Sent,Logged]), + FindError = fun(Res) -> + [E || E <- Res, + is_tuple(E) andalso (element(1,E) == error)] + end, + Errors = [{Req,FindError(Res)} || {Req,Res} <- ReqResult], + NoOfReqs = lists:foldl(fun({_,Res}, N) -> N + length(Res) end, 0, ReqResult), + ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]), + ok = file:delete(Log). +handler_requests_under_load(cleanup, Config) -> + ok = stop_handler(?MODULE). + +send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) -> + receive + {From,finish} -> + From ! {self(),Reqs} + after + TO -> + Result = + case Req of + change_config -> + logger:set_handler_config(HName, logger_std_h, + #{enable_kill_overloaded => + false}); + Func -> + logger_std_h:Func(HName) + end, + send_requests(HName, TO, Rs ++ [{Req,[Result|Res]}]) + end. + + +%%%----------------------------------------------------------------- +%%% +start_handler(Name, TTY, Config) when TTY == standard_io; + TTY == standard_error-> + ok = logger:add_handler(Name, + logger_std_h, + #{logger_std_h => #{type => TTY}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([Name]), + formatter=>{?MODULE,op}}), + {ok,{_,HConfig = #{logger_std_h := StdHConfig}}} = + logger:get_handler_config(Name), + {HConfig,StdHConfig}; + +start_handler(Name, FuncName, Config) -> + Dir = ?config(priv_dir,Config), + Log = filename:join(Dir, lists:concat([FuncName,".log"])), + ct:pal("Logging to ~tp", [Log]), + Type = {file,Log}, + ok = logger:add_handler(Name, + logger_std_h, + #{logger_std_h => #{type => Type}, + filter_default=>log, + filters=>?DEFAULT_HANDLER_FILTERS([Name]), + formatter=>{?MODULE,op}}), + {ok,{_,HConfig = #{logger_std_h := StdHConfig}}} = + logger:get_handler_config(Name), + {Log,HConfig,StdHConfig}. + +stop_handler(Name) -> + ok = logger:remove_handler(Name), + ct:pal("Handler ~p stopped!", [Name]). + +count_lines(File) -> + wait_until_written(File, -1), + count_lines1(File). + +wait_until_written(File, Sz) -> + timer:sleep(2000), + case file:read_file_info(File) of + {ok,#file_info{size = Sz}} -> + timer:sleep(1000), + case file:read_file_info(File) of + {ok,#file_info{size = Sz1}} -> + ok; + {ok,#file_info{size = Sz2}} -> + wait_until_written(File, Sz2) + end; + {ok,#file_info{size = Sz1}} -> + wait_until_written(File, Sz1) + end. + +count_lines1(File) -> + Counter = fun Cnt(Dev,LC) -> + case file:read_line(Dev) of + eof -> LC; + _ -> Cnt(Dev,LC+1) + end + end, + {_,Dev} = file:open(File, [read]), + Lines = Counter(Dev, 0), + file:close(Dev), + Lines. + +send_burst(NorT, Type, {chars,Sz}, Class) -> + Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,Sz)], + case NorT of + {n,N} -> + %% process_flag(priority, high), + send_n_burst(N, Type, Text, Class), + %% process_flag(priority, normal), + N; + {t,T} -> + ct:pal("Sending messages sequentially for ~w ms", [T]), + T0 = erlang:monotonic_time(millisecond), + send_t_burst(T0, T, Text, Class, 0) + end. + +send_n_burst(0, _, _Text, _Class) -> + ok; +send_n_burst(N, seq, Text, Class) -> + ok = logger:Class(Text, ?domain), + send_n_burst(N-1, seq, Text, Class); +send_n_burst(N, {spawn,Ps,TO}, Text, Class) -> + ct:pal("~w processes each sending ~w messages", [Ps,N]), + PerProc = fun() -> + send_n_burst(N, seq, Text, Class) + end, + MRefs = [begin if TO == 0 -> ok; true -> timer:sleep(TO) end, + monitor(process,spawn_link(PerProc)) end || + _ <- lists:seq(1,Ps)], + lists:foreach(fun(MRef) -> + receive + {'DOWN', MRef, _, _, _} -> + ok + end + end, MRefs), + ct:pal("Message burst sent", []), + ok. + +send_t_burst(T0, T, Text, Class, N) -> + T1 = erlang:monotonic_time(millisecond), + if (T1-T0) > T -> + N; + true -> + ok = logger:Class(Text, ?domain), + send_t_burst(T0, T, Text, Class, N+1) + end. + +%%%----------------------------------------------------------------- +%%% Formatter callback +%%% Using this to send the formatted string back to the test case +%%% process - so it can check for logged events. +format(_,bad_return) -> + bad_return; +format(_,crash) -> + erlang:error(formatter_crashed); +format(#{msg:={string,String0}},no_nl) -> + String = unicode:characters_to_list(String0), + String; +format(#{msg:={string,String0}},nl) -> + String = unicode:characters_to_list(String0), + String++"\n"; +format(#{msg:={string,String0}},op) -> + String = unicode:characters_to_list(String0), + String++"\n"; +format(#{msg:={report,#{label:={supervisor,progress}}}},op) -> + ""; +format(#{msg:={report,#{label:={gen_server,terminate}}}},op) -> + ""; +format(#{msg:={report,#{label:={proc_lib,crash}}}},op) -> + ""; +format(#{msg:={F,A}},Pid) when is_list(F), is_list(A) -> + String = lists:flatten(io_lib:format(F,A)), + Pid ! {log,String}, + String++"\n"; +format(#{msg:={string,String0}},Pid) -> + String = unicode:characters_to_list(String0), + Pid ! {log,String}, + String++"\n". + +add_remove_instance_nofile(Type) -> + ok = logger:add_handler(?MODULE,logger_std_h, + #{logger_std_h => #{type => Type}, + filter_default=>stop, + filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]), + formatter=>{?MODULE,self()}}), + Pid = whereis(?MODULE), + true = is_pid(Pid), + group_leader(group_leader(),Pid), % to get printouts in test log + logger:info(M1=?msg,?domain), + ?check(M1), + %% check that filesync doesn't do damage even if not relevant + ok = logger_std_h:filesync(?MODULE), + ok = logger:remove_handler(?MODULE), + timer:sleep(500), + undefined = whereis(?MODULE), + logger:info(?msg,?domain), + ?check_no_log, + ok. + +logger_std_h_remove() -> + logger:remove_handler(?MODULE). +logger_std_h_remove(Id) -> + logger:remove_handler(Id). + +try_read_file(FileName, Expected, Time) when Time > 0 -> + case file:read_file(FileName) of + Expected -> + ok; + Error = {error,_Reason} -> + ct:pal("Can't read ~tp: ~tp", [FileName,Error]), + erlang:error(Error); + Got -> + ct:pal("try_read_file got ~tp", [Got]), + timer:sleep(500), + try_read_file(FileName, Expected, Time-500) + end; +try_read_file(FileName, Expected, _) -> + ct:pal("Missing pattern ~tp in ~tp", [Expected,FileName]), + erlang:error({error,missing_expected_pattern}). + +try_match_file(FileName, Pattern, Time) -> + try_match_file(FileName, Pattern, Time, <<>>). + +try_match_file(FileName, Pattern, Time, _) when Time > 0 -> + case file:read_file(FileName) of + {ok, Bin} -> + case re:run(Bin,Pattern,[{capture,none}]) of + match -> + unicode:characters_to_list(Bin); + _ -> + timer:sleep(100), + try_match_file(FileName, Pattern, Time-100, Bin) + end; + Error -> + erlang:error(Error) + end; +try_match_file(_,Pattern,_,Incorrect) -> + ct:pal("try_match_file did not match pattern: ~p~nGot: ~p~n", + [Pattern,Incorrect]), + erlang:error({error,not_matching_pattern,Pattern,Incorrect}). + +%%%----------------------------------------------------------------- +%%% +start_op_trace() -> + TraceFun = fun({trace,_,call,{_Mod,Func,Details}}, Pid) -> + Pid ! {trace_call,Func,Details}, + Pid; + ({trace,_,return_from,{_Mod,Func,_},RetVal}, Pid) -> + Pid ! {trace_return,Func,RetVal}, + Pid + end, + TRecvPid = spawn_link(fun() -> trace_receiver(5000) end), + {ok,_} = dbg:tracer(process, {TraceFun, TRecvPid}), + + {ok,_} = dbg:p(whereis(?MODULE), [c]), + {ok,_} = dbg:p(self(), [c]), + + MS1 = dbg:fun2ms(fun([_]) -> return_trace() end), + {ok,_} = dbg:tp(logger_h_common, check_load, 1, MS1), + + {ok,_} = dbg:tpl(logger_h_common, flush_log_requests, 2, []), + + MS2 = dbg:fun2ms(fun([_,mode]) -> return_trace() end), + {ok,_} = dbg:tpl(ets, lookup, 2, MS2), + + ct:pal("Tracing started!", []), + TRecvPid. + +stop_op_trace(TRecvPid) -> + dbg:stop_clear(), + unlink(TRecvPid), + exit(TRecvPid, kill), + ok. + +find_mode(flush, Events) -> + lists:any(fun({trace_call,flush_log_requests,[_,_]}) -> true; + (_) -> false + end, Events); +find_mode(Mode, Events) -> + lists:keymember([{mode,Mode}], 3, Events). + +find_switch(From, To, Events) -> + try lists:foldl(fun({trace_return,check_load,{To,_,_,_}}, + {trace_call,check_load,[#{mode := From}]}) -> + throw(match); + (Event, _) -> + Event + end, undefined, Events) of + _ -> false + catch + throw:match -> true + end. + +analyse_trace(TRecvPid, TestFun) -> + TRecvPid ! {test,self(),TestFun}, + receive + {result,TRecvPid,Result} -> + Result + after + 60000 -> + fails + end. + +trace_receiver(IdleT) -> + Msgs = receive_until_idle(IdleT, 5, []), + ct:pal("~w trace events generated", [length(Msgs)]), + analyse(Msgs). + +receive_until_idle(IdleT, WaitN, Msgs) -> + receive + Msg = {trace_call,_,_} -> + receive_until_idle(IdleT, 5, [Msg | Msgs]); + Msg = {trace_return,_,_} -> + receive_until_idle(IdleT, 5, [Msg | Msgs]) + after + IdleT -> + if WaitN == 0 -> + Msgs; + true -> + receive_until_idle(IdleT, WaitN-1, Msgs) + end + end. + +analyse(Msgs) -> + receive + {test,From,TestFun} -> + From ! {result,self(),TestFun(Msgs)}, + analyse(Msgs) + end. + +start_tracer(Trace,Expected) -> + Pid = self(), + FileCtrlPid = maps:get(file_ctrl_pid, logger_std_h:info(?MODULE)), + dbg:tracer(process,{fun tracer/2,{Pid,Expected}}), + dbg:p(whereis(?MODULE),[c]), + dbg:p(FileCtrlPid,[c]), + tpl(Trace), + ok. + +tpl([{M,F,A}|Trace]) -> + {ok,Match} = dbg:tpl(M,F,A,[]), + case lists:keyfind(matched,1,Match) of + {_,_,1} -> + ok; + _ -> + dbg:stop_clear(), + throw({skip,"Can't trace "++atom_to_list(M)++":"++ + atom_to_list(F)++"/"++integer_to_list(A)}) + end, + tpl(Trace); +tpl([]) -> + ok. + +tracer({trace,_,call,{logger_std_h,handle_cast,[{Op,_}|_]}}, + {Pid,[{Mod,Func,Op}|Expected]}) -> + maybe_tracer_done(Pid,Expected,{Mod,Func,Op}); +tracer({trace,_,call,{Mod=logger_std_h,Func=write_to_dev,[_,Data,_,_,_]}}, + {Pid,[{Mod,Func,Data}|Expected]}) -> + maybe_tracer_done(Pid,Expected,{Mod,Func,Data}); +tracer({trace,_,call,{Mod,Func,_}}, {Pid,[{Mod,Func}|Expected]}) -> + maybe_tracer_done(Pid,Expected,{Mod,Func}); +tracer({trace,_,call,Call}, {Pid,Expected}) -> + ct:log("Tracer got unexpected: ~p~nExpected: ~p~n",[Call,Expected]), + Pid ! {tracer_got_unexpected,Call,Expected}, + {Pid,Expected}. + +maybe_tracer_done(Pid,[]=Expected,Got) -> + ct:log("Tracer got: ~p~n",[Got]), + Pid ! {tracer_done,0}, + {Pid,Expected}; +maybe_tracer_done(Pid,[{no_more,T}]=Expected,Got) -> + ct:log("Tracer got: ~p~n",[Got]), + Pid ! {tracer_done,T}, + {Pid,Expected}; +maybe_tracer_done(Pid,Expected,Got) -> + ct:log("Tracer got: ~p~n",[Got]), + {Pid,Expected}. + +check_tracer(T) -> + check_tracer(T,fun() -> ct:fail({timeout,tracer}) end). +check_tracer(T,TimeoutFun) -> + receive + {tracer_done,Delay} -> + %% Possibly wait Delay ms to check that no unexpected + %% traces are received + check_tracer(Delay,fun() -> ok end); + {tracer_got_unexpected,Got,Expected} -> + dbg:stop_clear(), + ct:fail({tracer_got_unexpected,Got,Expected}) + after T -> + dbg:stop_clear(), + TimeoutFun() + end. diff --git a/lib/kernel/test/os_SUITE.erl b/lib/kernel/test/os_SUITE.erl index 53a9e168ef..abbc301360 100644 --- a/lib/kernel/test/os_SUITE.erl +++ b/lib/kernel/test/os_SUITE.erl @@ -22,10 +22,12 @@ -export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, init_per_group/2,end_per_group/2, init_per_testcase/2,end_per_testcase/2]). --export([space_in_cwd/1, quoting/1, cmd_unicode/1, space_in_name/1, bad_command/1, +-export([space_in_cwd/1, quoting/1, cmd_unicode/1, + null_in_command/1, space_in_name/1, bad_command/1, find_executable/1, unix_comment_in_command/1, deep_list_command/1, large_output_command/1, background_command/0, background_command/1, - message_leak/1, close_stdin/0, close_stdin/1, perf_counter_api/1]). + message_leak/1, close_stdin/0, close_stdin/1, max_size_command/1, + perf_counter_api/1]). -include_lib("common_test/include/ct.hrl"). @@ -34,10 +36,11 @@ suite() -> {timetrap,{minutes,1}}]. all() -> - [space_in_cwd, quoting, cmd_unicode, space_in_name, bad_command, + [space_in_cwd, quoting, cmd_unicode, null_in_command, + space_in_name, bad_command, find_executable, unix_comment_in_command, deep_list_command, large_output_command, background_command, message_leak, - close_stdin, perf_counter_api]. + close_stdin, max_size_command, perf_counter_api]. groups() -> []. @@ -125,6 +128,14 @@ cmd_unicode(Config) when is_list(Config) -> [] = receive_all(), ok. +null_in_command(Config) -> + {Ok, Error} = case os:type() of + {win32,_} -> {"dir", "di\0r"}; + _ -> {"ls", "l\0s"} + end, + true = is_list(try os:cmd(Ok) catch Class0:_ -> Class0 end), + error = try os:cmd(Error) catch Class1:_ -> Class1 end, + ok. %% Test that program with a space in its name can be executed. space_in_name(Config) when is_list(Config) -> @@ -216,8 +227,8 @@ find_executable(Config) when is_list(Config) -> DataDir = proplists:get_value(data_dir, Config), %% Smoke test. - case lib:progname() of - erl -> + case ct:get_progname() of + "erl" -> ErlPath = os:find_executable("erl"), true = is_list(ErlPath), true = filelib:is_regular(ErlPath); @@ -312,6 +323,19 @@ close_stdin(Config) -> "-1" = os:cmd(Fds). +max_size_command(_Config) -> + + Res20 = os:cmd("cat /dev/zero", #{ max_size => 20 }), + 20 = length(Res20), + + Res0 = os:cmd("cat /dev/zero", #{ max_size => 0 }), + 0 = length(Res0), + + Res32768 = os:cmd("cat /dev/zero", #{ max_size => 32768 }), + 32768 = length(Res32768), + + ResHello = string:trim(os:cmd("echo hello", #{ max_size => 20 })), + 5 = length(ResHello). %% Test that the os:perf_counter api works as expected perf_counter_api(_Config) -> @@ -364,7 +388,7 @@ comp(Expected, Got) -> ct:fail(failed) end. -%% Like lib:nonl/1, but strips \r as well as \n. +%% strips \n and \r\n from end of string strip_nl([$\r, $\n]) -> []; strip_nl([$\n]) -> []; diff --git a/lib/kernel/test/pdict_SUITE.erl b/lib/kernel/test/pdict_SUITE.erl index d105952df9..a891451c82 100644 --- a/lib/kernel/test/pdict_SUITE.erl +++ b/lib/kernel/test/pdict_SUITE.erl @@ -33,6 +33,7 @@ init_per_group/2,end_per_group/2, mixed/1, literals/1, + destructive/1, simple/1, complicated/1, heavy/1, simple_all_keys/1, info/1]). -export([init_per_testcase/2, end_per_testcase/2]). -export([other_process/2]). @@ -52,6 +53,7 @@ suite() -> all() -> [simple, complicated, heavy, simple_all_keys, info, literals, + destructive, mixed]. groups() -> @@ -367,6 +369,36 @@ match_keys(All) -> ok. +%% Test destructive put optimization of immed values +%% does not affect get/0 or process_info. +destructive(_Config) -> + Keys = lists:seq(1,100), + [put(Key, 17) || Key <- Keys], + Get1 = get(), + {dictionary,PI1} = process_info(self(), dictionary), + + [begin + {Key, 17} = lists:keyfind(Key, 1, Get1), + {Key, 17} = lists:keyfind(Key, 1, PI1) + end + || Key <- Keys], + + [17 = put(Key, 42) || Key <- Keys], % Mutate + + Get2 = get(), + {dictionary,PI2} = process_info(self(), dictionary), + + [begin + {Key, 17} = lists:keyfind(Key, 1, Get1), + {Key, 17} = lists:keyfind(Key, 1, PI1), + {Key, 42} = lists:keyfind(Key, 1, Get2), + {Key, 42} = lists:keyfind(Key, 1, PI2) + + end + || Key <- Keys], + + ok. + %% Do random mixed put/erase to test grow/shrink %% Written for a temporary bug in gc during shrink mixed(_Config) -> diff --git a/lib/kernel/test/prim_file_SUITE.erl b/lib/kernel/test/prim_file_SUITE.erl index 2f4330c217..5bb230d1c4 100644 --- a/lib/kernel/test/prim_file_SUITE.erl +++ b/lib/kernel/test/prim_file_SUITE.erl @@ -21,38 +21,23 @@ -export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1, init_per_group/2,end_per_group/2, init_per_testcase/2, end_per_testcase/2, read_write_file/1, free_memory/0]). --export([cur_dir_0a/1, cur_dir_0b/1, - cur_dir_1a/1, cur_dir_1b/1, - make_del_dir_a/1, make_del_dir_b/1, - pos1/1, pos2/1]). --export([close/1, - delete_a/1, delete_b/1]). --export([ open1/1, modes/1]). --export([ - file_info_basic_file_a/1, file_info_basic_file_b/1, - file_info_basic_directory_a/1, file_info_basic_directory_b/1, - file_info_bad_a/1, file_info_bad_b/1, - file_info_times_a/1, file_info_times_b/1, - file_write_file_info_a/1, file_write_file_info_b/1, - file_read_file_info_opts/1, file_write_file_info_opts/1, - file_write_read_file_info_opts/1 - ]). --export([rename_a/1, rename_b/1, - access/1, truncate/1, datasync/1, sync/1, +-export([cur_dir_0/1, cur_dir_1/1, + make_del_dir/1, pos1/1, pos2/1]). +-export([close/1, delete/1]). +-export([open1/1, modes/1]). +-export([file_info_basic_file/1, file_info_basic_directory/1, file_info_bad/1, + file_info_times/1, file_write_file_info/1, + file_read_file_info_opts/1, file_write_file_info_opts/1, + file_write_read_file_info_opts/1]). +-export([rename/1, access/1, truncate/1, datasync/1, sync/1, read_write/1, pread_write/1, append/1, exclusive/1]). --export([ e_delete/1, e_rename/1, e_make_dir/1, e_del_dir/1]). +-export([e_delete/1, e_rename/1, e_make_dir/1, e_del_dir/1]). --export([ read_not_really_compressed/1, - read_compressed/1, write_compressed/1, - compress_errors/1]). - --export([ - make_link_a/1, make_link_b/1, - read_link_info_for_non_link/1, - symlinks_a/1, symlinks_b/1, - list_dir_limit/1, - list_dir_error/1, - list_dir/1]). +-export([make_link/1, read_link_info_for_non_link/1, + symlinks/1, + list_dir_limit/1, + list_dir_error/1, + list_dir/1]). -export([advise/1]). -export([large_write/1]). @@ -67,29 +52,16 @@ -define(PRIM_FILE, prim_file). -%% Calls ?PRIM_FILE:F with arguments A and an optional handle H -%% as first argument, unless the handle is [], i.e no handle. -%% This is a macro to give the compiler and thereby -%% the cross reference tool the possibility to interprete -%% the call, since M, F, A (or [H | A]) can all be known at -%% compile time. --define(PRIM_FILE_call(F, H, A), - case H of - [] -> apply(?PRIM_FILE, F, A); - _ -> apply(?PRIM_FILE, F, [H | A]) - end). - suite() -> []. all() -> [read_write_file, {group, dirs}, {group, files}, - delete_a, delete_b, rename_a, rename_b, {group, errors}, - {group, compression}, {group, links}, list_dir_limit, list_dir]. + delete, rename, {group, errors}, {group, links}, + list_dir_limit, list_dir]. groups() -> [{dirs, [], - [make_del_dir_a, make_del_dir_b, cur_dir_0a, cur_dir_0b, - cur_dir_1a, cur_dir_1b]}, + [make_del_dir, cur_dir_0, cur_dir_1]}, {files, [], [{group, open}, {group, pos}, {group, file_info}, truncate, sync, datasync, advise, large_write, allocate]}, @@ -98,22 +70,14 @@ groups() -> append, exclusive]}, {pos, [], [pos1, pos2]}, {file_info, [], - [file_info_basic_file_a, file_info_basic_file_b, - file_info_basic_directory_a, - file_info_basic_directory_b, file_info_bad_a, - file_info_bad_b, file_info_times_a, file_info_times_b, - file_write_file_info_a, file_write_file_info_b, - file_read_file_info_opts, file_write_file_info_opts, - file_write_read_file_info_opts + [file_info_basic_file,file_info_basic_directory, file_info_bad, + file_info_times, file_write_file_info, file_read_file_info_opts, + file_write_file_info_opts, file_write_read_file_info_opts ]}, {errors, [], [e_delete, e_rename, e_make_dir, e_del_dir]}, - {compression, [], - [read_compressed, read_not_really_compressed, - write_compressed, compress_errors]}, {links, [], - [make_link_a, make_link_b, read_link_info_for_non_link, - symlinks_a, symlinks_b, list_dir_error]}]. + [make_link, read_link_info_for_non_link, symlinks, list_dir_error]}]. init_per_testcase(large_write, Config) -> {ok, Started} = application:ensure_all_started(os_mon), @@ -246,39 +210,27 @@ read_write_file(Config) when is_list(Config) -> %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% - -make_del_dir_a(Config) when is_list(Config) -> - make_del_dir(Config, [], "_a"). - -make_del_dir_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = make_del_dir(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - %% Just to make sure the state of the server makes a difference - {error, einval} = ?PRIM_FILE_call(get_cwd, Handle, []), - Result. - -make_del_dir(Config, Handle, Suffix) -> +make_del_dir(Config) when is_list(Config) -> RootDir = proplists:get_value(priv_dir,Config), NewDir = filename:join(RootDir, atom_to_list(?MODULE) - ++"_mk-dir"++Suffix), - ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]), - {error, eexist} = ?PRIM_FILE_call(make_dir, Handle, [NewDir]), - ok = ?PRIM_FILE_call(del_dir, Handle, [NewDir]), - {error, enoent} = ?PRIM_FILE_call(del_dir, Handle, [NewDir]), + ++"_mk-dir"), + ok = ?PRIM_FILE:make_dir(NewDir), + {error, eexist} = ?PRIM_FILE:make_dir(NewDir), + ok = ?PRIM_FILE:del_dir(NewDir), + {error, enoent} = ?PRIM_FILE:del_dir(NewDir), %% Make sure we are not in a directory directly under test_server %% as that would result in eacces errors when trying to delete '..', %% because there are processes having that directory as current. - ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]), - {ok, CurrentDir} = ?PRIM_FILE_call(get_cwd, Handle, []), + ok = ?PRIM_FILE:make_dir(NewDir), + {ok, CurrentDir} = ?PRIM_FILE:get_cwd(), case {os:type(), length(NewDir) >= 260 } of {{win32,_}, true} -> io:format("Skip set_cwd for windows path longer than 260 (MAX_PATH)\n", []), io:format("\nNewDir = ~p\n", [NewDir]); _ -> - ok = ?PRIM_FILE_call(set_cwd, Handle, [NewDir]) + ok = ?PRIM_FILE:set_cwd(NewDir) end, try %% Check that we get an error when trying to create... @@ -286,14 +238,14 @@ make_del_dir(Config, Handle, Suffix) -> NewDir2 = filename:join(RootDir, atom_to_list(?MODULE) ++"_mk-dir-noexist/foo"), - {error, enoent} = ?PRIM_FILE_call(make_dir, Handle, [NewDir2]), + {error, enoent} = ?PRIM_FILE:make_dir(NewDir2), %% a nameless directory - {error, enoent} = ?PRIM_FILE_call(make_dir, Handle, [""]), + {error, enoent} = ?PRIM_FILE:make_dir(""), %% a directory with illegal name - {error, badarg} = ?PRIM_FILE_call(make_dir, Handle, ['mk-dir']), + {error, badarg} = ?PRIM_FILE:make_dir('mk-dir'), %% a directory with illegal name, even if it's a (bad) list - {error, badarg} = ?PRIM_FILE_call(make_dir, Handle, [[1,2,3,{}]]), + {error, badarg} = ?PRIM_FILE:make_dir([1,2,3,{}]), %% Maybe this isn't an error, exactly, but worth mentioning anyway: %% ok = ?PRIM_FILE:make_dir([$f,$o,$o,0,$b,$a,$r])), @@ -306,125 +258,101 @@ make_del_dir(Config, Handle, Suffix) -> %% Try deleting some bad directories %% Deleting the parent directory to the current, sounds dangerous, huh? %% Don't worry ;-) the parent directory should never be empty, right? - case ?PRIM_FILE_call(del_dir, Handle, [".."]) of + case ?PRIM_FILE:del_dir("..") of {error, eexist} -> ok; {error, eacces} -> ok; %OpenBSD {error, einval} -> ok %FreeBSD end, - {error, enoent} = ?PRIM_FILE_call(del_dir, Handle, [""]), - {error, badarg} = ?PRIM_FILE_call(del_dir, Handle, [[3,2,1,{}]]) + {error, enoent} = ?PRIM_FILE:del_dir(""), + {error, badarg} = ?PRIM_FILE:del_dir([3,2,1,{}]) after - ok = ?PRIM_FILE_call(set_cwd, Handle, [CurrentDir]) + ok = ?PRIM_FILE:set_cwd(CurrentDir) end, ok. -cur_dir_0a(Config) when is_list(Config) -> - cur_dir_0(Config, []). - -cur_dir_0b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = cur_dir_0(Config, Handle), - ok = ?PRIM_FILE:stop(Handle), - Result. - -cur_dir_0(Config, Handle) -> +cur_dir_0(Config) when is_list(Config) -> %% Find out the current dir, and cd to it ;-) - {ok,BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, []), + {ok,BaseDir} = ?PRIM_FILE:get_cwd(), Dir1 = BaseDir ++ "", %% Check that it's a string - ok = ?PRIM_FILE_call(set_cwd, Handle, [Dir1]), - DirName = atom_to_list(?MODULE) ++ - case Handle of - [] -> - "_curdir"; - _ -> - "_curdir_h" - end, + ok = ?PRIM_FILE:set_cwd(Dir1), + DirName = atom_to_list(?MODULE) ++ "_curdir", %% Make a new dir, and cd to that RootDir = proplists:get_value(priv_dir,Config), NewDir = filename:join(RootDir, DirName), - ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]), + ok = ?PRIM_FILE:make_dir(NewDir), case {os:type(), length(NewDir) >= 260} of {{win32,_}, true} -> io:format("Skip set_cwd for windows path longer than 260 (MAX_PATH):\n"), io:format("\nNewDir = ~p\n", [NewDir]); _ -> io:format("cd to ~s",[NewDir]), - ok = ?PRIM_FILE_call(set_cwd, Handle, [NewDir]), + ok = ?PRIM_FILE:set_cwd(NewDir), %% Create a file in the new current directory, and check that it %% really is created there UncommonName = "uncommon.fil", {ok,Fd} = ?PRIM_FILE:open(UncommonName, [read, write]), ok = ?PRIM_FILE:close(Fd), - {ok,NewDirFiles} = ?PRIM_FILE_call(list_dir, Handle, ["."]), + {ok,NewDirFiles} = ?PRIM_FILE:list_dir("."), true = lists:member(UncommonName,NewDirFiles), %% Delete the directory and return to the old current directory %% and check that the created file isn't there (too!) expect({error, einval}, {error, eacces}, {error, eexist}, - ?PRIM_FILE_call(del_dir, Handle, [NewDir])), - ?PRIM_FILE_call(delete, Handle, [UncommonName]), - {ok,[]} = ?PRIM_FILE_call(list_dir, Handle, ["."]), - ok = ?PRIM_FILE_call(set_cwd, Handle, [Dir1]), + ?PRIM_FILE:del_dir(NewDir)), + ?PRIM_FILE:delete(UncommonName), + {ok,[]} = ?PRIM_FILE:list_dir("."), + ok = ?PRIM_FILE:set_cwd(Dir1), io:format("cd back to ~s",[Dir1]), - ok = ?PRIM_FILE_call(del_dir, Handle, [NewDir]), - {error, enoent} = ?PRIM_FILE_call(set_cwd, Handle, [NewDir]), - ok = ?PRIM_FILE_call(set_cwd, Handle, [Dir1]), + ok = ?PRIM_FILE:del_dir(NewDir), + {error, enoent} = ?PRIM_FILE:set_cwd(NewDir), + ok = ?PRIM_FILE:set_cwd(Dir1), io:format("cd back to ~s",[Dir1]), - {ok,OldDirFiles} = ?PRIM_FILE_call(list_dir, Handle, ["."]), + {ok,OldDirFiles} = ?PRIM_FILE:list_dir("."), false = lists:member(UncommonName,OldDirFiles) end, %% Try doing some bad things {error, badarg} = - ?PRIM_FILE_call(set_cwd, Handle, [{foo,bar}]), + ?PRIM_FILE:set_cwd({foo,bar}), {error, enoent} = - ?PRIM_FILE_call(set_cwd, Handle, [""]), + ?PRIM_FILE:set_cwd(""), {error, enoent} = - ?PRIM_FILE_call(set_cwd, Handle, [".......a......"]), + ?PRIM_FILE:set_cwd(".......a......"), {ok,BaseDir} = - ?PRIM_FILE_call(get_cwd, Handle, []), %% Still there? + ?PRIM_FILE:get_cwd(), %% Still there? %% On Windows, there should only be slashes, no backslashes, %% in the return value of get_cwd(). %% (The test is harmless on Unix, because filenames usually %% don't contain backslashes.) - {ok, BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, []), + {ok, BaseDir} = ?PRIM_FILE:get_cwd(), false = lists:member($\\, BaseDir), ok. %% Tests ?PRIM_FILE:get_cwd/1. -cur_dir_1a(Config) when is_list(Config) -> - cur_dir_1(Config, []). - -cur_dir_1b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = cur_dir_1(Config, Handle), - ok = ?PRIM_FILE:stop(Handle), - Result. - -cur_dir_1(Config, Handle) -> +cur_dir_1(Config) when is_list(Config) -> case os:type() of {win32, _} -> - win_cur_dir_1(Config, Handle); + win_cur_dir_1(Config); _ -> {error, enotsup} = - ?PRIM_FILE_call(get_cwd, Handle, ["d:"]) + ?PRIM_FILE:get_cwd("d:") end, ok. -win_cur_dir_1(_Config, Handle) -> - {ok, BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, []), +win_cur_dir_1(_Config) -> + {ok, BaseDir} = ?PRIM_FILE:get_cwd(), %% Get the drive letter from the current directory, %% and try to get current directory for that drive. [Drive, $:|_] = BaseDir, - {ok, BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, [[Drive, $:]]), + {ok, BaseDir} = ?PRIM_FILE:get_cwd([Drive, $:]), io:format("BaseDir = ~s\n", [BaseDir]), %% Unfortunately, there is no way to move away from the @@ -446,12 +374,12 @@ open1(Config) when is_list(Config) -> Name = filename:join(NewDir, "foo1.fil"), {ok,Fd1} = ?PRIM_FILE:open(Name, [read, write]), {ok,Fd2} = ?PRIM_FILE:open(Name, [read]), - Str = "{a,tuple}.\n", - Length = length(Str), - ?PRIM_FILE:write(Fd1,Str), + Bin = list_to_binary("{a,tuple}.\n"), + Length = byte_size(Bin), + ?PRIM_FILE:write(Fd1,Bin), {ok,0} = ?PRIM_FILE:position(Fd1,bof), - {ok, Str} = ?PRIM_FILE:read(Fd1,Length), - {ok, Str} = ?PRIM_FILE:read(Fd2,Length), + {ok, Bin} = ?PRIM_FILE:read(Fd1,Length), + {ok, Bin} = ?PRIM_FILE:read(Fd2,Length), ok = ?PRIM_FILE:close(Fd2), {ok,0} = ?PRIM_FILE:position(Fd1,bof), ok = ?PRIM_FILE:truncate(Fd1), @@ -471,13 +399,13 @@ modes(Config) when is_list(Config) -> ++"_open_modes"), ok = ?PRIM_FILE:make_dir(NewDir), Name1 = filename:join(NewDir, "foo1.fil"), - Marker = "hello, world", - Length = length(Marker), + Marker = <<"hello, world">>, + Length = byte_size(Marker), %% write {ok, Fd1} = ?PRIM_FILE:open(Name1, [write]), ok = ?PRIM_FILE:write(Fd1, Marker), - ok = ?PRIM_FILE:write(Fd1, ".\n"), + ok = ?PRIM_FILE:write(Fd1, <<".\n">>), ok = ?PRIM_FILE:close(Fd1), %% read @@ -496,12 +424,6 @@ modes(Config) when is_list(Config) -> {ok, Marker} = ?PRIM_FILE:read(Fd4, Length), ok = ?PRIM_FILE:close(Fd4), - %% read and binary - BinaryMarker = list_to_binary(Marker), - {ok, Fd5} = ?PRIM_FILE:open(Name1, [read, binary]), - {ok, BinaryMarker} = ?PRIM_FILE:read(Fd5, Length), - ok = ?PRIM_FILE:close(Fd5), - ok. close(Config) when is_list(Config) -> @@ -528,9 +450,9 @@ access(Config) when is_list(Config) -> Name = filename:join(RootDir, atom_to_list(?MODULE) ++"_access.fil"), - Str = "ABCDEFGH", + Bin = <<"ABCDEFGH">>, {ok,Fd1} = ?PRIM_FILE:open(Name, [write]), - ?PRIM_FILE:write(Fd1,Str), + ?PRIM_FILE:write(Fd1,Bin), ok = ?PRIM_FILE:close(Fd1), %% Check that we can't write when in read only mode {ok,Fd2} = ?PRIM_FILE:open(Name, [read]), @@ -542,7 +464,7 @@ access(Config) when is_list(Config) -> end, ok = ?PRIM_FILE:close(Fd2), {ok, Fd3} = ?PRIM_FILE:open(Name, [read]), - {ok, Str} = ?PRIM_FILE:read(Fd3,length(Str)), + {ok, Bin} = ?PRIM_FILE:read(Fd3,byte_size(Bin)), ok = ?PRIM_FILE:close(Fd3), ok. @@ -564,7 +486,7 @@ read_write(Config) when is_list(Config) -> ok. read_write_test(File) -> - Marker = "hello, world", + Marker = <<"hello, world">>, ok = ?PRIM_FILE:write(File, Marker), {ok, 0} = ?PRIM_FILE:position(File, 0), {ok, Marker} = ?PRIM_FILE:read(File, 100), @@ -590,15 +512,15 @@ pread_write(Config) when is_list(Config) -> ok. pread_write_test(File) -> - Marker = "hello, world", - Len = length(Marker), + Marker = <<"hello, world">>, + Len = byte_size(Marker), ok = ?PRIM_FILE:write(File, Marker), {ok, Marker} = ?PRIM_FILE:pread(File, 0, 100), eof = ?PRIM_FILE:pread(File, 100, 1), ok = ?PRIM_FILE:pwrite(File, Len, Marker), {ok, Marker} = ?PRIM_FILE:pread(File, Len, 100), eof = ?PRIM_FILE:pread(File, 100, 1), - MM = Marker ++ Marker, + MM = <<Marker/binary,Marker/binary>>, {ok, MM} = ?PRIM_FILE:pread(File, 0, 100), ok = ?PRIM_FILE:close(File), ok. @@ -655,24 +577,24 @@ pos1(Config) when is_list(Config) -> atom_to_list(?MODULE) ++"_pos1.fil"), {ok, Fd1} = ?PRIM_FILE:open(Name, [write]), - ?PRIM_FILE:write(Fd1,"ABCDEFGH"), + ?PRIM_FILE:write(Fd1,<<"ABCDEFGH">>), ok = ?PRIM_FILE:close(Fd1), {ok, Fd2} = ?PRIM_FILE:open(Name, [read]), %% Start pos is first char io:format("Relative positions"), - {ok, "A"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1), {ok, 2} = ?PRIM_FILE:position(Fd2,{cur,1}), - {ok, "C"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"C">>} = ?PRIM_FILE:read(Fd2,1), {ok, 0} = ?PRIM_FILE:position(Fd2,{cur,-3}), - {ok, "A"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1), %% Backwards from first char should be an error {ok,0} = ?PRIM_FILE:position(Fd2,{cur,-1}), {error, einval} = ?PRIM_FILE:position(Fd2,{cur,-1}), %% Reset position and move again {ok, 0} = ?PRIM_FILE:position(Fd2,0), {ok, 2} = ?PRIM_FILE:position(Fd2,{cur,2}), - {ok, "C"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"C">>} = ?PRIM_FILE:read(Fd2,1), %% Go a lot forwards {ok, 13} = ?PRIM_FILE:position(Fd2,{cur,10}), eof = ?PRIM_FILE:read(Fd2,1), @@ -684,27 +606,27 @@ pos1(Config) when is_list(Config) -> {ok, 8} = ?PRIM_FILE:position(Fd2,cur), eof = ?PRIM_FILE:read(Fd2,1), {ok, 7} = ?PRIM_FILE:position(Fd2,7), - {ok, "H"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"H">>} = ?PRIM_FILE:read(Fd2,1), {ok, 0} = ?PRIM_FILE:position(Fd2,0), - {ok, "A"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1), {ok, 3} = ?PRIM_FILE:position(Fd2,3), - {ok, "D"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1), {ok, 12} = ?PRIM_FILE:position(Fd2,12), eof = ?PRIM_FILE:read(Fd2,1), {ok, 3} = ?PRIM_FILE:position(Fd2,3), - {ok, "D"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1), %% Try the {bof,X} notation {ok, 3} = ?PRIM_FILE:position(Fd2,{bof,3}), - {ok, "D"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1), %% Try eof positions io:format("EOF positions"), {ok, 8} = ?PRIM_FILE:position(Fd2,{eof,0}), eof = ?PRIM_FILE:read(Fd2,1), {ok, 7} = ?PRIM_FILE:position(Fd2,{eof,-1}), - {ok, "H"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"H">>} = ?PRIM_FILE:read(Fd2,1), {ok, 0} = ?PRIM_FILE:position(Fd2,{eof,-8}), - {ok, "A"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1), {error, einval} = ?PRIM_FILE:position(Fd2,{eof,-9}), ok. @@ -714,7 +636,7 @@ pos2(Config) when is_list(Config) -> atom_to_list(?MODULE) ++"_pos2.fil"), {ok,Fd1} = ?PRIM_FILE:open(Name, [write]), - ?PRIM_FILE:write(Fd1,"ABCDEFGH"), + ?PRIM_FILE:write(Fd1,<<"ABCDEFGH">>), ok = ?PRIM_FILE:close(Fd1), {ok, Fd2} = ?PRIM_FILE:open(Name, [read]), {error, einval} = ?PRIM_FILE:position(Fd2,-1), @@ -722,35 +644,25 @@ pos2(Config) when is_list(Config) -> %% Make sure that we still can search after an error. {ok, 0} = ?PRIM_FILE:position(Fd2, 0), {ok, 3} = ?PRIM_FILE:position(Fd2, {bof,3}), - {ok, "D"} = ?PRIM_FILE:read(Fd2,1), + {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1), io:format("DONE"), ok. - -file_info_basic_file_a(Config) when is_list(Config) -> - file_info_basic_file(Config, [], "_a"). - -file_info_basic_file_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = file_info_basic_file(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - Result. - -file_info_basic_file(Config, Handle, Suffix) -> +file_info_basic_file(Config) when is_list(Config) -> RootDir = proplists:get_value(priv_dir, Config), %% Create a short file. Name = filename:join(RootDir, atom_to_list(?MODULE) - ++"_basic_test"++Suffix++".fil"), + ++"_basic_test"".fil"), {ok,Fd1} = ?PRIM_FILE:open(Name, [write]), ?PRIM_FILE:write(Fd1, "foo bar"), ok = ?PRIM_FILE:close(Fd1), %% Test that the file has the expected attributes. %% The times are tricky, so we will save them to a separate test case. - {ok, FileInfo} = ?PRIM_FILE_call(read_file_info, Handle, [Name]), + {ok, FileInfo} = ?PRIM_FILE:read_file_info(Name), #file_info{size = Size, type = Type, access = Access, atime = AccessTime, mtime = ModifyTime} = FileInfo, @@ -768,39 +680,30 @@ file_info_basic_file(Config, Handle, Suffix) -> ok. -file_info_basic_directory_a(Config) when is_list(Config) -> - file_info_basic_directory(Config, []). - -file_info_basic_directory_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = file_info_basic_directory(Config, Handle), - ok = ?PRIM_FILE:stop(Handle), - Result. - -file_info_basic_directory(Config, Handle) -> +file_info_basic_directory(Config) when is_list(Config) -> %% Note: filename:join/1 removes any trailing slash, %% which is essential for ?PRIM_FILE:read_file_info/1 to work on %% platforms such as Windows95. RootDir = filename:join([proplists:get_value(priv_dir, Config)]), %% Test that the RootDir directory has the expected attributes. - test_directory(RootDir, read_write, Handle), + test_directory(RootDir, read_write), %% Note that on Windows file systems, "/" or "c:/" are *NOT* directories. %% Therefore, test that ?PRIM_FILE:read_file_info/1 behaves %% as if they were directories. case os:type() of {win32, _} -> - test_directory("/", read_write, Handle), - test_directory("c:/", read_write, Handle), - test_directory("c:\\", read_write, Handle); + test_directory("/", read_write), + test_directory("c:/", read_write), + test_directory("c:\\", read_write); _ -> - test_directory("/", read, Handle) + test_directory("/", read) end, ok. -test_directory(Name, ExpectedAccess, Handle) -> - {ok, FileInfo} = ?PRIM_FILE_call(read_file_info, Handle, [Name]), +test_directory(Name, ExpectedAccess) -> + {ok, FileInfo} = ?PRIM_FILE:read_file_info(Name), #file_info{size = Size, type = Type, access = Access, atime = AccessTime, mtime = ModifyTime} = FileInfo, @@ -824,45 +727,24 @@ all_integers([]) -> %% Try something nonexistent. -file_info_bad_a(Config) when is_list(Config) -> - file_info_bad(Config, []). - -file_info_bad_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = file_info_bad(Config, Handle), - ok = ?PRIM_FILE:stop(Handle), - Result. - -file_info_bad(Config, Handle) -> +file_info_bad(Config) when is_list(Config) -> RootDir = filename:join([proplists:get_value(priv_dir, Config)]), - {error, enoent} = - ?PRIM_FILE_call( - read_file_info, Handle, - [filename:join(RootDir, - atom_to_list(?MODULE)++"_nonexistent")]), + NonExistent = filename:join(RootDir, atom_to_list(?MODULE)++"_nonexistent"), + {error, enoent} = ?PRIM_FILE:read_file_info(NonExistent), ok. %% Test that the file times behave as they should. -file_info_times_a(Config) when is_list(Config) -> - file_info_times(Config, [], "_a"). - -file_info_times_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = file_info_times(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - Result. - -file_info_times(Config, Handle, Suffix) -> +file_info_times(Config) when is_list(Config) -> %% We have to try this twice, since if the test runs across the change %% of a month the time diff calculations will fail. But it won't happen %% if you run it twice in succession. test_server:m_out_of_n( 1,2, - fun() -> file_info_int(Config, Handle, Suffix) end), + fun() -> file_info_int(Config) end), ok. -file_info_int(Config, Handle, Suffix) -> +file_info_int(Config) -> %% Note: filename:join/1 removes any trailing slash, %% which is essential for ?PRIM_FILE:read_file_info/1 to work on %% platforms such as Windows95. @@ -872,14 +754,14 @@ file_info_int(Config, Handle, Suffix) -> Name = filename:join(RootDir, atom_to_list(?MODULE) - ++"_file_info"++Suffix++".fil"), + ++"_file_info.fil"), {ok,Fd1} = ?PRIM_FILE:open(Name, [write]), ?PRIM_FILE:write(Fd1,"foo"), %% check that the file got a modify date max a few seconds away from now {ok, #file_info{type = regular, atime = AccTime1, mtime = ModTime1}} = - ?PRIM_FILE_call(read_file_info, Handle, [Name]), + ?PRIM_FILE:read_file_info(Name), Now = erlang:localtime(), io:format("Now ~p",[Now]), io:format("Open file Acc ~p Mod ~p",[AccTime1,ModTime1]), @@ -897,7 +779,7 @@ file_info_int(Config, Handle, Suffix) -> ok = ?PRIM_FILE:close(Fd1), {ok, #file_info{size = Size, type = regular, access = Access, atime = AccTime2, mtime = ModTime2}} = - ?PRIM_FILE_call(read_file_info, Handle, [Name]), + ?PRIM_FILE:read_file_info(Name), io:format("Closed file Acc ~p Mod ~p",[AccTime2,ModTime2]), true = time_dist(ModTime1, ModTime2) >= 0, @@ -909,7 +791,7 @@ file_info_int(Config, Handle, Suffix) -> {ok, #file_info{size = DSize, type = directory, access = DAccess, atime = AccTime3, mtime = ModTime3}} = - ?PRIM_FILE_call(read_file_info, Handle, [RootDir]), + ?PRIM_FILE:read_file_info(RootDir), %% this dir was modified only a few secs ago io:format("Dir Acc ~p; Mod ~p; Now ~p", [AccTime3, ModTime3, Now]), @@ -936,16 +818,7 @@ filter_atime(Atime, Config) -> %% Test the write_file_info/2 function. -file_write_file_info_a(Config) when is_list(Config) -> - file_write_file_info(Config, [], "_a"). - -file_write_file_info_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = file_write_file_info(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - Result. - -file_write_file_info(Config, Handle, Suffix) -> +file_write_file_info(Config) when is_list(Config) -> RootDir = get_good_directory(Config), io:format("RootDir = ~p", [RootDir]), @@ -955,16 +828,16 @@ file_write_file_info(Config, Handle, Suffix) -> Name = filename:join(RootDir, atom_to_list(?MODULE) - ++"_write_file_info_ro"++Suffix), + ++"_write_file_info_ro"), ok = ?PRIM_FILE:write_file(Name, "hello"), Time = {{1997, 01, 02}, {12, 35, 42}}, Info = #file_info{mode=8#400, atime=Time, mtime=Time, ctime=Time}, - ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, Info]), + ok = ?PRIM_FILE:write_file_info(Name, Info), %% Read back the times. {ok, ActualInfo} = - ?PRIM_FILE_call(read_file_info, Handle, [Name]), + ?PRIM_FILE:read_file_info(Name), #file_info{mode=_Mode, atime=ActAtime, mtime=Time, ctime=ActCtime} = ActualInfo, FilteredAtime = filter_atime(Time, Config), @@ -980,14 +853,11 @@ file_write_file_info(Config, Handle, Suffix) -> {error, eacces} = ?PRIM_FILE:write_file(Name, "hello again"), %% Make the file writable again. - - ?PRIM_FILE_call(write_file_info, Handle, - [Name, #file_info{mode=8#600}]), + ?PRIM_FILE:write_file_info(Name, #file_info{mode=8#600}), ok = ?PRIM_FILE:write_file(Name, "hello again"), %% And unwritable. - ?PRIM_FILE_call(write_file_info, Handle, - [Name, #file_info{mode=8#400}]), + ?PRIM_FILE:write_file_info(Name, #file_info{mode=8#400}), {error, eacces} = ?PRIM_FILE:write_file(Name, "hello again"), %% Write the times again. @@ -995,9 +865,9 @@ file_write_file_info(Config, Handle, Suffix) -> NewTime = {{1997, 02, 15}, {13, 18, 20}}, NewInfo = #file_info{atime=NewTime, mtime=NewTime, ctime=NewTime}, - ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, NewInfo]), + ok = ?PRIM_FILE:write_file_info(Name, NewInfo), {ok, ActualInfo2} = - ?PRIM_FILE_call(read_file_info, Handle, [Name]), + ?PRIM_FILE:read_file_info(Name), #file_info{atime=NewActAtime, mtime=NewTime, ctime=NewActCtime} = ActualInfo2, NewFilteredAtime = filter_atime(NewTime, Config), @@ -1012,14 +882,12 @@ file_write_file_info(Config, Handle, Suffix) -> %% Make the file writeable again, so that we can remove the %% test suites ... :-) - ?PRIM_FILE_call(write_file_info, Handle, - [Name, #file_info{mode=8#600}]), + ?PRIM_FILE:write_file_info(Name, #file_info{mode=8#600}), ok. %% Test the write_file_info/3 function. file_write_file_info_opts(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), RootDir = get_good_directory(Config), io:format("RootDir = ~p", [RootDir]), @@ -1028,7 +896,7 @@ file_write_file_info_opts(Config) when is_list(Config) -> lists:foreach(fun ({FI, Opts}) -> - ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, FI, Opts]) + ok = ?PRIM_FILE:write_file_info(Name, FI, Opts) end, [ {#file_info{ mode=8#600, atime = Time, mtime = Time, ctime = Time}, Opts} || Opts <- [[{time, posix}]], @@ -1038,7 +906,7 @@ file_write_file_info_opts(Config) when is_list(Config) -> %% REM: determine date range dependent on time_t = Uint32 | Sint32 | Sint64 | Uint64 %% Determine time_t on os:type()? lists:foreach(fun ({FI, Opts}) -> - ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, FI, Opts]) + ok = ?PRIM_FILE:write_file_info(Name, FI, Opts) end, [ {#file_info{ mode=8#400, atime = Time, mtime = Time, ctime = Time}, Opts} || Opts <- [[{time, universal}],[{time, local}]], Time <- [ @@ -1050,11 +918,9 @@ file_write_file_info_opts(Config) when is_list(Config) -> {{2037,2,3},{23,59,59}}, erlang:localtime() ]]), - ok = ?PRIM_FILE:stop(Handle), ok. file_read_file_info_opts(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), RootDir = get_good_directory(Config), io:format("RootDir = ~p", [RootDir]), @@ -1063,41 +929,38 @@ file_read_file_info_opts(Config) when is_list(Config) -> lists:foreach(fun (Opts) -> - {ok,_} = ?PRIM_FILE_call(read_file_info, Handle, [Name, Opts]) + {ok,_} = ?PRIM_FILE:read_file_info(Name, Opts) end, [[{time, Type}] || Type <- [local, universal, posix]]), - ok = ?PRIM_FILE:stop(Handle), ok. %% Test the write and read back *_file_info/3 functions. file_write_read_file_info_opts(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), RootDir = get_good_directory(Config), io:format("RootDir = ~p", [RootDir]), Name = filename:join(RootDir, atom_to_list(?MODULE) ++"_read_write_file_info_opts"), ok = ?PRIM_FILE:write_file(Name, "hello_opts2"), - ok = file_write_read_file_info_opts(Handle, Name, {{1989, 04, 28}, {19,30,22}}, [{time, local}]), - ok = file_write_read_file_info_opts(Handle, Name, {{1989, 04, 28}, {19,30,22}}, [{time, universal}]), + ok = file_write_read_file_info_opts(Name, {{1989, 04, 28}, {19,30,22}}, [{time, local}]), + ok = file_write_read_file_info_opts(Name, {{1989, 04, 28}, {19,30,22}}, [{time, universal}]), %% will not work on platforms with unsigned time_t - %ok = file_write_read_file_info_opts(Handle, Name, {{1930, 04, 28}, {19,30,22}}, [{time, local}]), - %ok = file_write_read_file_info_opts(Handle, Name, {{1930, 04, 28}, {19,30,22}}, [{time, universal}]), - ok = file_write_read_file_info_opts(Handle, Name, 1, [{time, posix}]), + %ok = file_write_read_file_info_opts(Name, {{1930, 04, 28}, {19,30,22}}, [{time, local}]), + %ok = file_write_read_file_info_opts(Name, {{1930, 04, 28}, {19,30,22}}, [{time, universal}]), + ok = file_write_read_file_info_opts(Name, 1, [{time, posix}]), %% will not work on platforms with unsigned time_t - %ok = file_write_read_file_info_opts(Handle, Name, -1, [{time, posix}]), - %ok = file_write_read_file_info_opts(Handle, Name, -300000, [{time, posix}]), - ok = file_write_read_file_info_opts(Handle, Name, 300000, [{time, posix}]), - ok = file_write_read_file_info_opts(Handle, Name, 0, [{time, posix}]), + %ok = file_write_read_file_info_opts(Name, -1, [{time, posix}]), + %ok = file_write_read_file_info_opts(Name, -300000, [{time, posix}]), + ok = file_write_read_file_info_opts(Name, 300000, [{time, posix}]), + ok = file_write_read_file_info_opts(Name, 0, [{time, posix}]), - ok = ?PRIM_FILE:stop(Handle), ok. -file_write_read_file_info_opts(Handle, Name, Mtime, Opts) -> - {ok, FI} = ?PRIM_FILE_call(read_file_info, Handle, [Name, Opts]), +file_write_read_file_info_opts(Name, Mtime, Opts) -> + {ok, FI} = ?PRIM_FILE:read_file_info(Name, Opts), FI2 = FI#file_info{ mtime = Mtime }, - ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, FI2, Opts]), - {ok, FI3} = ?PRIM_FILE_call(read_file_info, Handle, [Name, Opts]), + ok = ?PRIM_FILE:write_file_info(Name, FI2, Opts), + {ok, FI3} = ?PRIM_FILE:read_file_info(Name, Opts), io:format("Expecting mtime = ~p, got ~p~n", [FI2#file_info.mtime, FI3#file_info.mtime]), FI2 = FI3, ok. @@ -1175,8 +1038,8 @@ advise(Config) when is_list(Config) -> atom_to_list(?MODULE) ++"_advise.fil"), - Line1 = "Hello\n", - Line2 = "World!\n", + Line1 = <<"Hello\n">>, + Line2 = <<"World!\n">>, {ok, Fd} = ?PRIM_FILE:open(Advise, [write]), ok = ?PRIM_FILE:advise(Fd, 0, 0, normal), @@ -1226,7 +1089,7 @@ advise(Config) when is_list(Config) -> {ok, Fd9} = ?PRIM_FILE:open(Advise, [read]), Offset = 0, %% same as a 0 length in some implementations - Length = length(Line1) + length(Line2), + Length = byte_size(Line1) + byte_size(Line2), ok = ?PRIM_FILE:advise(Fd9, Offset, Length, sequential), {ok, Line1} = ?PRIM_FILE:read_line(Fd9), {ok, Line2} = ?PRIM_FILE:read_line(Fd9), @@ -1250,23 +1113,18 @@ do_large_write(Name) -> Chunk = <<0:ChunkSize/unit:8>>, Data = zip_data(lists:duplicate(Chunks, Chunk), Interleave), Size = Chunks * ChunkSize + Chunks, % 4 G + 32 - Wordsize = erlang:system_info(wordsize), - case prim_file:write_file(Name, Data) of - ok when Wordsize =:= 8 -> - {ok,#file_info{size=Size}} = file:read_file_info(Name), - {ok,Fd} = prim_file:open(Name, [read]), - check_large_write(Fd, ChunkSize, 0, Interleave); - {error,einval} when Wordsize =:= 4 -> - ok - end. + ok = ?PRIM_FILE:write_file(Name, Data), + {ok,#file_info{size=Size}} = file:read_file_info(Name), + {ok,Fd} = ?PRIM_FILE:open(Name, [read]), + check_large_write(Fd, ChunkSize, 0, Interleave). check_large_write(Fd, ChunkSize, Pos, [X|Interleave]) -> Pos1 = Pos + ChunkSize, - {ok,Pos1} = prim_file:position(Fd, {cur,ChunkSize}), - {ok,[X]} = prim_file:read(Fd, 1), + {ok,Pos1} = ?PRIM_FILE:position(Fd, {cur,ChunkSize}), + {ok,<<X>>} = ?PRIM_FILE:read(Fd, 1), check_large_write(Fd, ChunkSize, Pos1+1, Interleave); check_large_write(Fd, _, _, []) -> - eof = prim_file:read(Fd, 1), + eof = ?PRIM_FILE:read(Fd, 1), ok. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% @@ -1338,71 +1196,53 @@ allocate_and_assert(Fd, Offset, Length) -> %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% -delete_a(Config) when is_list(Config) -> - delete(Config, [], "_a"). - -delete_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = delete(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - Result. - -delete(Config, Handle, Suffix) -> +delete(Config) when is_list(Config) -> RootDir = proplists:get_value(priv_dir,Config), Name = filename:join(RootDir, atom_to_list(?MODULE) - ++"_delete"++Suffix++".fil"), + ++"_delete.fil"), {ok, Fd1} = ?PRIM_FILE:open(Name, [write]), ?PRIM_FILE:write(Fd1,"ok.\n"), ok = ?PRIM_FILE:close(Fd1), %% Check that the file is readable {ok, Fd2} = ?PRIM_FILE:open(Name, [read]), ok = ?PRIM_FILE:close(Fd2), - ok = ?PRIM_FILE_call(delete, Handle, [Name]), + ok = ?PRIM_FILE:delete(Name), %% Check that the file is not readable anymore {error, _} = ?PRIM_FILE:open(Name, [read]), %% Try deleting a nonexistent file - {error, enoent} = ?PRIM_FILE_call(delete, Handle, [Name]), + {error, enoent} = ?PRIM_FILE:delete(Name), ok. -rename_a(Config) when is_list(Config) -> - rename(Config, [], "_a"). - -rename_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = rename(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - Result. - -rename(Config, Handle, Suffix) -> +rename(Config) when is_list(Config) -> RootDir = proplists:get_value(priv_dir,Config), - FileName1 = atom_to_list(?MODULE)++"_rename"++Suffix++".fil", - FileName2 = atom_to_list(?MODULE)++"_rename"++Suffix++".ful", + FileName1 = atom_to_list(?MODULE)++"_rename.fil", + FileName2 = atom_to_list(?MODULE)++"_rename.ful", Name1 = filename:join(RootDir, FileName1), Name2 = filename:join(RootDir, FileName2), {ok,Fd1} = ?PRIM_FILE:open(Name1, [write]), ok = ?PRIM_FILE:close(Fd1), %% Rename, and check that it really changed name - ok = ?PRIM_FILE_call(rename, Handle, [Name1, Name2]), + ok = ?PRIM_FILE:rename(Name1, Name2), {error, _} = ?PRIM_FILE:open(Name1, [read]), {ok, Fd2} = ?PRIM_FILE:open(Name2, [read]), ok = ?PRIM_FILE:close(Fd2), %% Try renaming something to itself - ok = ?PRIM_FILE_call(rename, Handle, [Name2, Name2]), + ok = ?PRIM_FILE:rename(Name2, Name2), %% Try renaming something that doesn't exist {error, enoent} = - ?PRIM_FILE_call(rename, Handle, [Name1, Name2]), + ?PRIM_FILE:rename(Name1, Name2), %% Try renaming to something else than a string {error, badarg} = - ?PRIM_FILE_call(rename, Handle, [Name1, foobar]), + ?PRIM_FILE:rename(Name1, foobar), %% Move between directories DirName1 = filename:join(RootDir, atom_to_list(?MODULE) - ++"_rename_dir"++Suffix), + ++"_rename_dir"), DirName2 = filename:join(RootDir, atom_to_list(?MODULE) - ++"_second_rename_dir"++Suffix), + ++"_second_rename_dir"), Name1foo = filename:join(DirName1, "foo.fil"), Name2foo = filename:join(DirName2, "foo.fil"), Name2bar = filename:join(DirName2, "bar.dir"), @@ -1410,21 +1250,21 @@ rename(Config, Handle, Suffix) -> %% The name has to include the full file name, path is not enough expect( {error, eexist}, {error, eisdir}, - ?PRIM_FILE_call(rename, Handle, [Name2, DirName1])), + ?PRIM_FILE:rename(Name2, DirName1)), ok = - ?PRIM_FILE_call(rename, Handle, [Name2, Name1foo]), + ?PRIM_FILE:rename(Name2, Name1foo), %% Now rename the directory - ok = ?PRIM_FILE_call(rename, Handle, [DirName1, DirName2]), + ok = ?PRIM_FILE:rename(DirName1, DirName2), %% And check that the file is there now {ok,Fd3} = ?PRIM_FILE:open(Name2foo, [read]), ok = ?PRIM_FILE:close(Fd3), %% Try some dirty things now: move the directory into itself {error, Msg1} = - ?PRIM_FILE_call(rename, Handle, [DirName2, Name2bar]), + ?PRIM_FILE:rename(DirName2, Name2bar), io:format("Errmsg1: ~p",[Msg1]), %% move dir into a file in itself {error, Msg2} = - ?PRIM_FILE_call(rename, Handle, [DirName2, Name2foo]), + ?PRIM_FILE:rename(DirName2, Name2foo), io:format("Errmsg2: ~p",[Msg2]), ok. @@ -1466,7 +1306,7 @@ e_delete(Config) when is_list(Config) -> Base, #file_info {mode=0}), {error, eacces} = ?PRIM_FILE:delete(Afile), ?PRIM_FILE:write_file_info( - Base, #file_info {mode=8#600}) + Base, #file_info {mode=8#700}) end, ok. @@ -1602,7 +1442,7 @@ e_make_dir(Config) when is_list(Config) -> ?PRIM_FILE:write_file_info(Base, #file_info {mode=0}), {error, eacces} = ?PRIM_FILE:make_dir(filename:join(Base, "xxxx")), - ?PRIM_FILE:write_file_info(Base, #file_info {mode=8#600}) + ?PRIM_FILE:write_file_info(Base, #file_info {mode=8#700}) end, ok. @@ -1652,170 +1492,24 @@ e_del_dir(Config) when is_list(Config) -> ?PRIM_FILE:write_file_info(Base, #file_info {mode=0}), {error, eacces} = ?PRIM_FILE:del_dir(ADirectory), ?PRIM_FILE:write_file_info( - Base, #file_info {mode=8#600}) - end, - ok. - - -%% Trying reading and positioning from a compressed file. - -read_compressed(Config) when is_list(Config) -> - Data = proplists:get_value(data_dir, Config), - Real = filename:join(Data, "realmen.html.gz"), - {ok, Fd} = ?PRIM_FILE:open(Real, [read, compressed]), - try_read_file(Fd). - -%% Trying reading and positioning from an uncompressed file, -%% but with the compressed flag given. - -read_not_really_compressed(Config) when is_list(Config) -> - Data = proplists:get_value(data_dir, Config), - Priv = proplists:get_value(priv_dir, Config), - - %% The file realmen.html might have got CRs added (by WinZip). - %% Remove them, or the file positions will not be correct. - - Real = filename:join(Data, "realmen.html"), - RealPriv = filename:join(Priv, - atom_to_list(?MODULE)++"_realmen.html"), - {ok, RealDataBin} = ?PRIM_FILE:read_file(Real), - RealData = remove_crs(binary_to_list(RealDataBin), []), - ok = ?PRIM_FILE:write_file(RealPriv, RealData), - {ok, Fd} = ?PRIM_FILE:open(RealPriv, [read, compressed]), - try_read_file(Fd). - -remove_crs([$\r|Rest], Result) -> - remove_crs(Rest, Result); -remove_crs([C|Rest], Result) -> - remove_crs(Rest, [C|Result]); -remove_crs([], Result) -> - lists:reverse(Result). - -try_read_file(Fd) -> - %% Seek to the current position (nothing should happen). - - {ok, 0} = ?PRIM_FILE:position(Fd, 0), - {ok, 0} = ?PRIM_FILE:position(Fd, {cur, 0}), - - %% Read a few lines from a compressed file. - - ShouldBe = "<TITLE>Real Programmers Don't Use PASCAL</TITLE>\n", - {ok, ShouldBe} = ?PRIM_FILE:read(Fd, length(ShouldBe)), - - %% Now seek forward. - - {ok, 381} = ?PRIM_FILE:position(Fd, 381), - Back = "Back in the good old days -- the \"Golden Era\" " ++ - "of computers, it was\n", - {ok, Back} = ?PRIM_FILE:read(Fd, length(Back)), - - %% Try to search forward relative to the current position. - - {ok, CurPos} = ?PRIM_FILE:position(Fd, {cur, 0}), - RealPos = 4273, - {ok, RealPos} = ?PRIM_FILE:position(Fd, {cur, RealPos-CurPos}), - RealProg = "<LI> Real Programmers aren't afraid to use GOTOs.\n", - {ok, RealProg} = ?PRIM_FILE:read(Fd, length(RealProg)), - - %% Seek backward. - - AfterTitle = length("<TITLE>"), - {ok, AfterTitle} = ?PRIM_FILE:position(Fd, AfterTitle), - Title = "Real Programmers Don't Use PASCAL</TITLE>\n", - {ok, Title} = ?PRIM_FILE:read(Fd, length(Title)), - - %% Done. - - ?PRIM_FILE:close(Fd), - ok. - -write_compressed(Config) when is_list(Config) -> - Priv = proplists:get_value(priv_dir, Config), - MyFile = filename:join(Priv, - atom_to_list(?MODULE)++"_test.gz"), - - %% Write a file. - - {ok, Fd} = ?PRIM_FILE:open(MyFile, [write, compressed]), - {ok, 0} = ?PRIM_FILE:position(Fd, 0), - Prefix = "hello\n", - End = "end\n", - ok = ?PRIM_FILE:write(Fd, Prefix), - {ok, 143} = ?PRIM_FILE:position(Fd, 143), - ok = ?PRIM_FILE:write(Fd, End), - ok = ?PRIM_FILE:close(Fd), - - %% Read the file and verify the contents. - - {ok, Fd1} = ?PRIM_FILE:open(MyFile, [read, compressed]), - {ok, Prefix} = ?PRIM_FILE:read(Fd1, length(Prefix)), - Second = lists:duplicate(143-length(Prefix), 0) ++ End, - {ok, Second} = ?PRIM_FILE:read(Fd1, length(Second)), - ok = ?PRIM_FILE:close(Fd1), - - %% Ensure that the file is compressed. - - TotalSize = 143 + length(End), - case ?PRIM_FILE:read_file_info(MyFile) of - {ok, #file_info{size=Size}} when Size < TotalSize -> - ok; - {ok, #file_info{size=Size}} when Size == TotalSize -> - ct:fail(file_not_compressed) + Base, #file_info {mode=8#700}) end, - - %% Write again to ensure that the file is truncated. - - {ok, Fd2} = ?PRIM_FILE:open(MyFile, [write, compressed]), - NewString = "aaaaaaaaaaa", - ok = ?PRIM_FILE:write(Fd2, NewString), - ok = ?PRIM_FILE:close(Fd2), - {ok, Fd3} = ?PRIM_FILE:open(MyFile, [read, compressed]), - {ok, NewString} = ?PRIM_FILE:read(Fd3, 1024), - ok = ?PRIM_FILE:close(Fd3), - - ok. - -compress_errors(Config) when is_list(Config) -> - Data = proplists:get_value(data_dir, Config), - {error, enoent} = ?PRIM_FILE:open("non_existing__", - [compressed, read]), - {error, einval} = ?PRIM_FILE:open("non_existing__", - [compressed, read, write]), - - %% Read a corrupted .gz file. - - Corrupted = filename:join(Data, "corrupted.gz"), - {ok, Fd} = ?PRIM_FILE:open(Corrupted, [read, compressed]), - {error, eio} = ?PRIM_FILE:read(Fd, 100), - ?PRIM_FILE:close(Fd), - ok. -%% Test creating a hard link. -make_link_a(Config) when is_list(Config) -> - make_link(Config, [], "_a"). - -%% Test creating a hard link. -make_link_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = make_link(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - Result. - -make_link(Config, Handle, Suffix) -> +make_link(Config) when is_list(Config) -> RootDir = proplists:get_value(priv_dir, Config), NewDir = filename:join(RootDir, atom_to_list(?MODULE) - ++"_make_link"++Suffix), - ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]), + ++"_make_link"), + ok = ?PRIM_FILE:make_dir(NewDir), Name = filename:join(NewDir, "a_file"), ok = ?PRIM_FILE:write_file(Name, "some contents\n"), Alias = filename:join(NewDir, "an_alias"), Result = - case ?PRIM_FILE_call(make_link, Handle, [Name, Alias]) of + case ?PRIM_FILE:make_link(Name, Alias) of {error, enotsup} -> {skipped, "Links not supported on this platform"}; ok -> @@ -1826,12 +1520,12 @@ make_link(Config, Handle, Suffix) -> %% since they are not used on symbolic links. {ok, Info} = - ?PRIM_FILE_call(read_link_info, Handle, [Name]), + ?PRIM_FILE:read_link_info(Name), {ok, Info} = - ?PRIM_FILE_call(read_link_info, Handle, [Alias]), + ?PRIM_FILE:read_link_info(Alias), #file_info{links = 2, type = regular} = Info, {error, eexist} = - ?PRIM_FILE_call(make_link, Handle, [Name, Alias]), + ?PRIM_FILE:make_link(Name, Alias), ok end, @@ -1843,30 +1537,19 @@ read_link_info_for_non_link(Config) when is_list(Config) -> {ok, #file_info{type=directory}} = ?PRIM_FILE:read_link_info("."), ok. -%% Test operations on symbolic links (for Unix). -symlinks_a(Config) when is_list(Config) -> - symlinks(Config, [], "_a"). - -%% Test operations on symbolic links (for Unix). -symlinks_b(Config) when is_list(Config) -> - {ok, Handle} = ?PRIM_FILE:start(), - Result = symlinks(Config, Handle, "_b"), - ok = ?PRIM_FILE:stop(Handle), - Result. - -symlinks(Config, Handle, Suffix) -> +symlinks(Config) when is_list(Config) -> RootDir = proplists:get_value(priv_dir, Config), NewDir = filename:join(RootDir, atom_to_list(?MODULE) - ++"_make_symlink"++Suffix), - ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]), + ++"_make_symlink"), + ok = ?PRIM_FILE:make_dir(NewDir), Name = filename:join(NewDir, "a_plain_file"), ok = ?PRIM_FILE:write_file(Name, "some stupid content\n"), Alias = filename:join(NewDir, "a_symlink_alias"), Result = - case ?PRIM_FILE_call(make_symlink, Handle, [Name, Alias]) of + case ?PRIM_FILE:make_symlink(Name, Alias) of {error, enotsup} -> {skipped, "Links not supported on this platform"}; {error, eperm} -> @@ -1874,20 +1557,20 @@ symlinks(Config, Handle, Suffix) -> {skipped, "Windows user not privileged to create links"}; ok -> {ok, Info1} = - ?PRIM_FILE_call(read_file_info, Handle, [Name]), + ?PRIM_FILE:read_file_info(Name), {ok, Info1} = - ?PRIM_FILE_call(read_file_info, Handle, [Alias]), + ?PRIM_FILE:read_file_info(Alias), {ok, Info1} = - ?PRIM_FILE_call(read_link_info, Handle, [Name]), + ?PRIM_FILE:read_link_info(Name), #file_info{links = 1, type = regular} = Info1, {ok, Info2} = - ?PRIM_FILE_call(read_link_info, Handle, [Alias]), + ?PRIM_FILE:read_link_info(Alias), #file_info{links=1, type=symlink} = Info2, {ok, Name} = - ?PRIM_FILE_call(read_link, Handle, [Alias]), + ?PRIM_FILE:read_link(Alias), {ok, Name} = - ?PRIM_FILE_call(read_link_all, Handle, [Alias]), + ?PRIM_FILE:read_link_all(Alias), %% If all is good, delete dir again (avoid hanging dir on windows) rm_rf(?PRIM_FILE,NewDir), ok @@ -1907,10 +1590,9 @@ list_dir_limit(Config) when is_list(Config) -> RootDir = proplists:get_value(priv_dir, Config), NewDir = filename:join(RootDir, atom_to_list(?MODULE)++"_list_dir_limit"), - {ok, Handle1} = ?PRIM_FILE:start(), - ok = ?PRIM_FILE_call(make_dir, Handle1, [NewDir]), + ok = ?PRIM_FILE:make_dir(NewDir), Ref = erlang:start_timer(MaxTime*1000, self(), []), - Result = list_dir_limit_loop(NewDir, Handle1, Ref, MaxNumber, 0), + Result = list_dir_limit_loop(NewDir, Ref, MaxNumber, 0), Time = case erlang:cancel_timer(Ref) of false -> MaxTime; T -> MaxTime - (T div 1000) @@ -1920,21 +1602,18 @@ list_dir_limit(Config) when is_list(Config) -> {error, _Reason, N} -> N; _ -> 0 end, - {ok, Handle2} = ?PRIM_FILE:start(), - list_dir_limit_cleanup(NewDir, Handle2, Number, 0), - ok = ?PRIM_FILE:stop(Handle1), - ok = ?PRIM_FILE:stop(Handle2), + list_dir_limit_cleanup(NewDir, Number, 0), {ok, Number} = Result, {comment, "Created " ++ integer_to_list(Number) ++ " files in " ++ integer_to_list(Time) ++ " seconds."}. -list_dir_limit_loop(Dir, Handle, _Ref, N, Cnt) when Cnt >= N -> - list_dir_check(Dir, Handle, Cnt); -list_dir_limit_loop(Dir, Handle, Ref, N, Cnt) -> +list_dir_limit_loop(Dir, _Ref, N, Cnt) when Cnt >= N -> + list_dir_check(Dir, Cnt); +list_dir_limit_loop(Dir, Ref, N, Cnt) -> receive {timeout, Ref, []} -> - list_dir_check(Dir, Handle, Cnt) + list_dir_check(Dir, Cnt) after 0 -> Name = integer_to_list(Cnt), case ?PRIM_FILE:write_file(filename:join(Dir, Name), Name) of @@ -1942,23 +1621,23 @@ list_dir_limit_loop(Dir, Handle, Ref, N, Cnt) -> Next = Cnt + 1, case Cnt rem 100 of 0 -> - case list_dir_check(Dir, Handle, Next) of + case list_dir_check(Dir, Next) of {ok, Next} -> list_dir_limit_loop( - Dir, Handle, Ref, N, Next); + Dir, Ref, N, Next); Other -> Other end; _ -> - list_dir_limit_loop(Dir, Handle, Ref, N, Next) + list_dir_limit_loop(Dir, Ref, N, Next) end; {error, Reason} -> {error, Reason, Cnt} end end. -list_dir_check(Dir, Handle, Cnt) -> - case ?PRIM_FILE:list_dir(Handle, Dir) of +list_dir_check(Dir, Cnt) -> + case ?PRIM_FILE:list_dir(Dir) of {ok, ListDir} -> case length(ListDir) of Cnt -> @@ -1975,18 +1654,18 @@ list_dir_check(Dir, Handle, Cnt) -> %% Deletes N files while ignoring errors, then continues deleting %% as long as they exist. -list_dir_limit_cleanup(Dir, Handle, N, Cnt) when Cnt >= N -> +list_dir_limit_cleanup(Dir, N, Cnt) when Cnt >= N -> Name = integer_to_list(Cnt), - case ?PRIM_FILE:delete(Handle, filename:join(Dir, Name)) of + case ?PRIM_FILE:delete(filename:join(Dir, Name)) of ok -> - list_dir_limit_cleanup(Dir, Handle, N, Cnt+1); + list_dir_limit_cleanup(Dir, N, Cnt+1); _ -> ok end; -list_dir_limit_cleanup(Dir, Handle, N, Cnt) -> +list_dir_limit_cleanup(Dir, N, Cnt) -> Name = integer_to_list(Cnt), - ?PRIM_FILE:delete(Handle, filename:join(Dir, Name)), - list_dir_limit_cleanup(Dir, Handle, N, Cnt+1). + ?PRIM_FILE:delete(filename:join(Dir, Name)), + list_dir_limit_cleanup(Dir, N, Cnt+1). %%% %%% Test list_dir() on a non-existing pathname. @@ -1995,7 +1674,7 @@ list_dir_limit_cleanup(Dir, Handle, N, Cnt) -> list_dir_error(Config) -> Priv = proplists:get_value(priv_dir, Config), NonExisting = filename:join(Priv, "non-existing-dir"), - {error,enoent} = prim_file:list_dir(NonExisting), + {error,enoent} = ?PRIM_FILE:list_dir(NonExisting), ok. %%% @@ -2063,7 +1742,7 @@ do_run_large_file_test(Config, Run, Name0) -> {'DOWN',Mref,_,_,_} -> ok; {Tester,done} -> ok end, - prim_file:delete(Name) + ?PRIM_FILE:delete(Name) end), %% Run the test case. @@ -2108,12 +1787,25 @@ free_memory() -> {value, {buffered_memory, Buffed}} -> Buffed; false -> 0 end), - TotFree div (1024*1024) + usable_mem(TotFree) div (1024*1024) catch error : undef -> ct:fail({"os_mon not built"}) end. +usable_mem(Memory) -> + case test_server:is_valgrind() of + true -> + %% Valgrind uses extra memory for the V- and A-bits. + %% http://valgrind.org/docs/manual/mc-manual.html#mc-manual.value + %% Docs says it uses "compression to represent the V bits compactly" + %% but let's be conservative and cut usable memory in half. + Memory div 2; + false -> + Memory + end. + + %%%----------------------------------------------------------------- %%% Utilities rm_rf(Mod,Dir) -> diff --git a/lib/kernel/test/sendfile_SUITE.erl b/lib/kernel/test/sendfile_SUITE.erl index bfa564c32c..0c0b1cbcb6 100644 --- a/lib/kernel/test/sendfile_SUITE.erl +++ b/lib/kernel/test/sendfile_SUITE.erl @@ -23,30 +23,41 @@ -include_lib("common_test/include/ct.hrl"). -include_lib("kernel/include/file.hrl"). --compile(export_all). - -all() -> [{group,async_threads}, - {group,no_async_threads}]. - -groups() -> - [{async_threads,[],tcs()}, - {no_async_threads,[],tcs()}]. - -tcs() -> - [t_sendfile_small - ,t_sendfile_big_all - ,t_sendfile_big_size - ,t_sendfile_many_small - ,t_sendfile_partial - ,t_sendfile_offset - ,t_sendfile_sendafter - ,t_sendfile_recvafter - ,t_sendfile_recvafter_remoteclose - ,t_sendfile_sendduring - ,t_sendfile_recvduring - ,t_sendfile_closeduring - ,t_sendfile_crashduring - ]. +-export([all/0, init_per_suite/1, end_per_suite/1, init_per_testcase/2]). + +-export([sendfile_server/2, sendfile_do_recv/2, init/1, handle_event/2]). + +-export( + [t_sendfile_small/1, + t_sendfile_big_all/1, + t_sendfile_big_size/1, + t_sendfile_many_small/1, + t_sendfile_partial/1, + t_sendfile_offset/1, + t_sendfile_sendafter/1, + t_sendfile_recvafter/1, + t_sendfile_recvafter_remoteclose/1, + t_sendfile_sendduring/1, + t_sendfile_recvduring/1, + t_sendfile_closeduring/1, + t_sendfile_crashduring/1, + t_sendfile_arguments/1]). + +all() -> + [t_sendfile_small, + t_sendfile_big_all, + t_sendfile_big_size, + t_sendfile_many_small, + t_sendfile_partial, + t_sendfile_offset, + t_sendfile_sendafter, + t_sendfile_recvafter, + t_sendfile_recvafter_remoteclose, + t_sendfile_sendduring, + t_sendfile_recvduring, + t_sendfile_closeduring, + t_sendfile_crashduring, + t_sendfile_arguments]. init_per_suite(Config) -> case {os:type(),os:version()} of @@ -72,28 +83,18 @@ init_per_suite(Config) -> end_per_suite(Config) -> file:delete(proplists:get_value(big_file, Config)). -init_per_group(async_threads,Config) -> - case erlang:system_info(thread_pool_size) of - 0 -> - {skip,"No async threads"}; - _ -> - [{sendfile_opts,[{use_threads,true}]}|Config] - end; -init_per_group(no_async_threads,Config) -> - [{sendfile_opts,[{use_threads,false}]}|Config]. - -end_per_group(_,_Config) -> - ok. - init_per_testcase(TC,Config) when TC == t_sendfile_recvduring; TC == t_sendfile_sendduring -> Filename = proplists:get_value(small_file, Config), Send = fun(Sock) -> {_Size, Data} = sendfile_file_info(Filename), - {ok,D} = file:open(Filename, [raw,binary,read]), - prim_file:sendfile(D, Sock, 0, 0, 0, - [],[],[]), + {ok,Fd} = file:open(Filename, [raw,binary,read]), + %% Determine whether the driver has native support by + %% hitting the raw module directly; file:sendfile/5 will + %% land in the fallback if it doesn't. + RawModule = Fd#file_descriptor.module, + {ok, _Ignored} = RawModule:sendfile(Fd,Sock,0,0,0,[],[],[]), Data end, @@ -105,9 +106,8 @@ init_per_testcase(TC,Config) when TC == t_sendfile_recvduring; ct:log("Error: ~p",[Error]), {skip,"Not supported"} end; -init_per_testcase(_Tc,Config) -> - Config ++ [{sendfile_opts,[{use_threads,false}]}]. - +init_per_testcase(_TC,Config) -> + Config. t_sendfile_small(Config) when is_list(Config) -> Filename = proplists:get_value(small_file, Config), @@ -124,7 +124,7 @@ t_sendfile_small(Config) when is_list(Config) -> t_sendfile_many_small(Config) when is_list(Config) -> Filename = proplists:get_value(small_file, Config), FileOpts = proplists:get_value(file_opts, Config, []), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), error_logger:add_report_handler(?MODULE,[self()]), @@ -151,7 +151,7 @@ t_sendfile_many_small(Config) when is_list(Config) -> t_sendfile_big_all(Config) when is_list(Config) -> Filename = proplists:get_value(big_file, Config), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), Send = fun(Sock) -> {ok, #file_info{size = Size}} = @@ -165,7 +165,7 @@ t_sendfile_big_all(Config) when is_list(Config) -> t_sendfile_big_size(Config) -> Filename = proplists:get_value(big_file, Config), FileOpts = proplists:get_value(file_opts, Config, []), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), SendAll = fun(Sock) -> {ok, #file_info{size = Size}} = @@ -180,7 +180,7 @@ t_sendfile_big_size(Config) -> t_sendfile_partial(Config) -> Filename = proplists:get_value(small_file, Config), FileOpts = proplists:get_value(file_opts, Config, []), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), SendSingle = fun(Sock) -> {_Size, <<Data:5/binary,_/binary>>} = @@ -217,7 +217,7 @@ t_sendfile_partial(Config) -> t_sendfile_offset(Config) -> Filename = proplists:get_value(small_file, Config), FileOpts = proplists:get_value(file_opts, Config, []), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), Send = fun(Sock) -> {_Size, <<_:5/binary,Data:3/binary,_/binary>> = AllData} = @@ -233,7 +233,7 @@ t_sendfile_offset(Config) -> t_sendfile_sendafter(Config) -> Filename = proplists:get_value(small_file, Config), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), Send = fun(Sock) -> {Size, Data} = sendfile_file_info(Filename), @@ -246,7 +246,7 @@ t_sendfile_sendafter(Config) -> t_sendfile_recvafter(Config) -> Filename = proplists:get_value(small_file, Config), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), Send = fun(Sock) -> {Size, Data} = sendfile_file_info(Filename), @@ -279,7 +279,7 @@ t_sendfile_recvafter_remoteclose(Config) -> t_sendfile_sendduring(Config) -> Filename = proplists:get_value(big_file, Config), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), Send = fun(Sock) -> {ok, #file_info{size = Size}} = @@ -296,7 +296,7 @@ t_sendfile_sendduring(Config) -> t_sendfile_recvduring(Config) -> Filename = proplists:get_value(big_file, Config), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), Send = fun(Sock) -> {ok, #file_info{size = Size}} = @@ -315,7 +315,7 @@ t_sendfile_recvduring(Config) -> t_sendfile_closeduring(Config) -> Filename = proplists:get_value(big_file, Config), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), Send = fun(Sock,SFServPid) -> spawn_link(fun() -> @@ -345,7 +345,7 @@ t_sendfile_closeduring(Config) -> t_sendfile_crashduring(Config) -> Filename = proplists:get_value(big_file, Config), - SendfileOpts = proplists:get_value(sendfile_opts, Config), + SendfileOpts = proplists:get_value(sendfile_opts, Config, []), error_logger:add_report_handler(?MODULE,[self()]), @@ -373,6 +373,36 @@ t_sendfile_crashduring(Config) -> end end. +t_sendfile_arguments(Config) -> + Filename = proplists:get_value(small_file, Config), + + {ok, Listener} = gen_tcp:listen(0, + [{packet, 0}, {active, false}, {reuseaddr, true}]), + {ok, Port} = inet:port(Listener), + + ErrorCheck = + fun(Reason, Offset, Length, Opts) -> + {ok, Sender} = gen_tcp:connect({127, 0, 0, 1}, Port, + [{packet, 0}, {active, false}]), + {ok, Receiver} = gen_tcp:accept(Listener), + {ok, Fd} = file:open(Filename, [read, raw]), + {error, Reason} = file:sendfile(Fd, Sender, Offset, Length, Opts), + gen_tcp:close(Receiver), + gen_tcp:close(Sender), + file:close(Fd) + end, + + ErrorCheck(einval, -1, 0, []), + ErrorCheck(einval, 0, -1, []), + ErrorCheck(badarg, gurka, 0, []), + ErrorCheck(badarg, 0, gurka, []), + ErrorCheck(badarg, 0, 0, gurka), + ErrorCheck(badarg, 0, 0, [{chunk_size, gurka}]), + + gen_tcp:close(Listener), + + ok. + %% Generic sendfile server code sendfile_send(Send) -> sendfile_send({127,0,0,1},Send). diff --git a/lib/kernel/test/seq_trace_SUITE.erl b/lib/kernel/test/seq_trace_SUITE.erl index be23a1933f..aae8a83304 100644 --- a/lib/kernel/test/seq_trace_SUITE.erl +++ b/lib/kernel/test/seq_trace_SUITE.erl @@ -25,7 +25,7 @@ -export([token_set_get/1, tracer_set_get/1, print/1, send/1, distributed_send/1, recv/1, distributed_recv/1, trace_exit/1, distributed_exit/1, call/1, port/1, - match_set_seq_token/1, gc_seq_token/1]). + match_set_seq_token/1, gc_seq_token/1, label_capability_mismatch/1]). %% internal exports -export([simple_tracer/2, one_time_receiver/0, one_time_receiver/1, @@ -47,7 +47,7 @@ all() -> [token_set_get, tracer_set_get, print, send, distributed_send, recv, distributed_recv, trace_exit, distributed_exit, call, port, match_set_seq_token, - gc_seq_token]. + gc_seq_token, label_capability_mismatch]. groups() -> []. @@ -90,8 +90,8 @@ do_token_set_get(TsType) -> %% Test that initial seq_trace is disabled [] = seq_trace:get_token(), %% Test setting and reading the different fields - 0 = seq_trace:set_token(label,17), - {label,17} = seq_trace:get_token(label), + 0 = seq_trace:set_token(label,{my_label,1}), + {label,{my_label,1}} = seq_trace:get_token(label), false = seq_trace:set_token(print,true), {print,true} = seq_trace:get_token(print), false = seq_trace:set_token(send,true), @@ -101,12 +101,12 @@ do_token_set_get(TsType) -> false = seq_trace:set_token(TsType,true), {TsType,true} = seq_trace:get_token(TsType), %% Check the whole token - {Flags,17,0,Self,0} = seq_trace:get_token(), % all flags are set + {Flags,{my_label,1},0,Self,0} = seq_trace:get_token(), % all flags are set %% Test setting and reading the 'serial' field {0,0} = seq_trace:set_token(serial,{3,5}), {serial,{3,5}} = seq_trace:get_token(serial), %% Check the whole token, test that a whole token can be set and get - {Flags,17,5,Self,3} = seq_trace:get_token(), + {Flags,{my_label,1},5,Self,3} = seq_trace:get_token(), seq_trace:set_token({Flags,19,7,Self,5}), {Flags,19,7,Self,5} = seq_trace:get_token(), %% Check that receive timeout does not reset token @@ -166,11 +166,13 @@ do_send(TsType) -> seq_trace:reset_trace(), start_tracer(), Receiver = spawn(?MODULE,one_time_receiver,[]), + Label = make_ref(), + seq_trace:set_token(label,Label), set_token_flags([send, TsType]), Receiver ! send, Self = self(), seq_trace:reset_trace(), - [{0,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1), + [{Label,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1), check_ts(TsType, Ts). distributed_send(Config) when is_list(Config) -> @@ -184,14 +186,19 @@ do_distributed_send(TsType) -> seq_trace:reset_trace(), start_tracer(), Receiver = spawn(Node,?MODULE,one_time_receiver,[]), + + %% Make sure complex labels survive the trip. + Label = make_ref(), + seq_trace:set_token(label,Label), set_token_flags([send,TsType]), + Receiver ! send, Self = self(), seq_trace:reset_trace(), stop_node(Node), - [{0,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1), + [{Label,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1), check_ts(TsType, Ts). - + recv(Config) when is_list(Config) -> lists:foreach(fun do_recv/1, ?TIMESTAMP_MODES). @@ -220,7 +227,12 @@ do_distributed_recv(TsType) -> seq_trace:reset_trace(), rpc:call(Node,?MODULE,start_tracer,[]), Receiver = spawn(Node,?MODULE,one_time_receiver,[]), + + %% Make sure complex labels survive the trip. + Label = make_ref(), + seq_trace:set_token(label,Label), set_token_flags(['receive',TsType]), + Receiver ! 'receive', %% let the other process receive the message: receive after 1 -> ok end, @@ -229,7 +241,7 @@ do_distributed_recv(TsType) -> Result = rpc:call(Node,?MODULE,stop_tracer,[1]), stop_node(Node), ok = io:format("~p~n",[Result]), - [{0,{'receive',_,Self,Receiver,'receive'}, Ts}] = Result, + [{Label,{'receive',_,Self,Receiver,'receive'}, Ts}] = Result, check_ts(TsType, Ts). trace_exit(Config) when is_list(Config) -> @@ -240,7 +252,12 @@ do_trace_exit(TsType) -> start_tracer(), Receiver = spawn_link(?MODULE, one_time_receiver, [exit]), process_flag(trap_exit, true), + + %% Make sure complex labels survive the trip. + Label = make_ref(), + seq_trace:set_token(label,Label), set_token_flags([send, TsType]), + Receiver ! {before, exit}, %% let the other process receive the message: receive @@ -254,8 +271,8 @@ do_trace_exit(TsType) -> Result = stop_tracer(2), seq_trace:reset_trace(), ok = io:format("~p~n", [Result]), - [{0, {send, {0,1}, Self, Receiver, {before, exit}}, Ts0}, - {0, {send, {1,2}, Receiver, Self, + [{Label, {send, {0,1}, Self, Receiver, {before, exit}}, Ts0}, + {Label, {send, {1,2}, Receiver, Self, {'EXIT', Receiver, {exit, {before, exit}}}}, Ts1}] = Result, check_ts(TsType, Ts0), check_ts(TsType, Ts1). @@ -291,6 +308,74 @@ do_distributed_exit(TsType) -> {'EXIT', Receiver, {exit, {before, exit}}}}, Ts}] = Result, check_ts(TsType, Ts). +label_capability_mismatch(Config) when is_list(Config) -> + Releases = ["20_latest"], + Available = [Rel || Rel <- Releases, test_server:is_release_available(Rel)], + case Available of + [] -> {skipped, "No incompatible releases available"}; + _ -> + lists:foreach(fun do_incompatible_labels/1, Available), + lists:foreach(fun do_compatible_labels/1, Available), + ok + end. + +do_incompatible_labels(Rel) -> + Cookie = atom_to_list(erlang:get_cookie()), + {ok, Node} = test_server:start_node( + list_to_atom(atom_to_list(?MODULE)++"_"++Rel), peer, + [{args, " -setcookie "++Cookie}, {erl, [{release, Rel}]}]), + + {_,Dir} = code:is_loaded(?MODULE), + Mdir = filename:dirname(Dir), + true = rpc:call(Node,code,add_patha,[Mdir]), + seq_trace:reset_trace(), + rpc:call(Node,?MODULE,start_tracer,[]), + Receiver = spawn(Node,?MODULE,one_time_receiver,[]), + + %% This node does not support arbitrary labels, so it must fail with a + %% timeout as the token is dropped silently. + seq_trace:set_token(label,make_ref()), + seq_trace:set_token('receive',true), + + Receiver ! 'receive', + %% let the other process receive the message: + receive after 10 -> ok end, + seq_trace:reset_trace(), + + {error,timeout} = rpc:call(Node,?MODULE,stop_tracer,[1]), + stop_node(Node), + ok. + +do_compatible_labels(Rel) -> + Cookie = atom_to_list(erlang:get_cookie()), + {ok, Node} = test_server:start_node( + list_to_atom(atom_to_list(?MODULE)++"_"++Rel), peer, + [{args, " -setcookie "++Cookie}, {erl, [{release, Rel}]}]), + + {_,Dir} = code:is_loaded(?MODULE), + Mdir = filename:dirname(Dir), + true = rpc:call(Node,code,add_patha,[Mdir]), + seq_trace:reset_trace(), + rpc:call(Node,?MODULE,start_tracer,[]), + Receiver = spawn(Node,?MODULE,one_time_receiver,[]), + + %% This node does not support arbitrary labels, but small integers should + %% still work. + Label = 1234, + seq_trace:set_token(label,Label), + seq_trace:set_token('receive',true), + + Receiver ! 'receive', + %% let the other process receive the message: + receive after 10 -> ok end, + Self = self(), + seq_trace:reset_trace(), + Result = rpc:call(Node,?MODULE,stop_tracer,[1]), + stop_node(Node), + ok = io:format("~p~n",[Result]), + [{Label,{'receive',_,Self,Receiver,'receive'}, _}] = Result, + ok. + call(doc) -> "Tests special forms {is_seq_trace} and {get_seq_token} " "in trace match specs."; diff --git a/lib/kernel/test/wrap_log_reader_SUITE.erl b/lib/kernel/test/wrap_log_reader_SUITE.erl index 40a016aed0..b1ee29a11f 100644 --- a/lib/kernel/test/wrap_log_reader_SUITE.erl +++ b/lib/kernel/test/wrap_log_reader_SUITE.erl @@ -428,13 +428,14 @@ stop() -> ok = wrap_log_test:stop(), dl_wait(). -%% Give disk logs opened by 'logger' and 'wlt' time to close after +%% Give disk logs opened by 'wlr_logger' and 'wlt' time to close after %% receiving EXIT signals. dl_wait() -> case disk_log:accessible_logs() of {[], []} -> ok; - _ -> + _X -> + erlang:display(_X), timer:sleep(100), dl_wait() end. @@ -507,27 +508,27 @@ add_ext(Name, Ext) -> %% disk_log. open(Log, File, Where) -> - logger ! {open, self(), Log, File}, + wlr_logger ! {open, self(), Log, File}, rec1(ok, Where). open_ext(Log, File, Where) -> - logger ! {open_ext, self(), Log, File}, + wlr_logger ! {open_ext, self(), Log, File}, rec1(ok, Where). close(Log) -> - logger ! {close, self(), Log}, + wlr_logger ! {close, self(), Log}, rec(ok, ?LINE). sync(Log) -> - logger ! {sync, self(), Log}, + wlr_logger ! {sync, self(), Log}, rec(ok, ?LINE). log_terms(File, Terms) -> - logger ! {log_terms, self(), File, Terms}, + wlr_logger ! {log_terms, self(), File, Terms}, rec(ok, ?LINE). blog_terms(File, Terms) -> - logger ! {blog_terms, self(), File, Terms}, + wlr_logger ! {blog_terms, self(), File, Terms}, rec(ok, ?LINE). rec1(M, Where) -> diff --git a/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl b/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl index 38449b6bb3..2b24ccc66f 100644 --- a/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl +++ b/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl @@ -36,9 +36,9 @@ -endif. init() -> - spawn(fun() -> start(logger) end), + spawn(fun() -> start(wlr_logger) end), spawn(fun() -> start2(wlt) end), - wait_registered(logger), + wait_registered(wlr_logger), wait_registered(wlt), ok. @@ -52,9 +52,9 @@ wait_registered(Name) -> end. stop() -> - catch logger ! exit, + catch wlr_logger ! exit, catch wlt ! exit, - wait_unregistered(logger), + wait_unregistered(wlr_logger), wait_unregistered(wlt), ok. @@ -82,47 +82,47 @@ loop() -> {open, Pid, Name, File} -> R = disk_log:open([{name, Name}, {type, wrap}, {file, File}, {size, {?fsize, ?fno}}]), - ?format("logger: open ~p -> ~p~n", [Name, R]), + ?format("wlr_logger: open ~p -> ~p~n", [Name, R]), Pid ! R, loop(); {open_ext, Pid, Name, File} -> R = disk_log:open([{name, Name}, {type, wrap}, {file, File}, {format, external}, {size, {?fsize, ?fno}}]), - ?format("logger: open ~p -> ~p~n", [Name, R]), + ?format("wlr_logger: open ~p -> ~p~n", [Name, R]), Pid ! R, loop(); {close, Pid, Name} -> R = disk_log:close(Name), - ?format("logger: close ~p -> ~p~n", [Name, R]), + ?format("wlr_logger: close ~p -> ~p~n", [Name, R]), Pid ! R, loop(); {sync, Pid, Name} -> R = disk_log:sync(Name), - ?format("logger: sync ~p -> ~p~n", [Name, R]), + ?format("wlr_logger: sync ~p -> ~p~n", [Name, R]), Pid ! R, loop(); {log_terms, Pid, Name, Terms} -> R = disk_log:log_terms(Name, Terms), - ?format("logger: log_terms ~p -> ~p~n", [Name, R]), + ?format("wlr_logger: log_terms ~p -> ~p~n", [Name, R]), Pid ! R, loop(); {blog_terms, Pid, Name, Terms} -> R = disk_log:blog_terms(Name, Terms), - ?format("logger: blog_terms ~p -> ~p~n", [Name, R]), + ?format("wlr_logger: blog_terms ~p -> ~p~n", [Name, R]), Pid ! R, loop(); exit -> - ?format("Stopping logger~n", []), + ?format("Stopping wlr_logger~n", []), exit(normal); _Else -> - ?format("logger: ignored: ~p~n", [_Else]), + ?format("wlr_logger: ignored: ~p~n", [_Else]), loop() end. diff --git a/lib/kernel/test/zlib_SUITE.erl b/lib/kernel/test/zlib_SUITE.erl index 26602bdcda..f203ef878f 100644 --- a/lib/kernel/test/zlib_SUITE.erl +++ b/lib/kernel/test/zlib_SUITE.erl @@ -1061,32 +1061,27 @@ sub_heap_binaries(Config) when is_list(Config) -> %% Check concurrent access to zlib driver. smp(Config) -> - case erlang:system_info(smp_support) of - true -> - NumOfProcs = lists:min([8,erlang:system_info(schedulers)]), - io:format("smp starting ~p workers\n",[NumOfProcs]), - - %% Tests to run in parallel. - Funcs = - [zip_usage, gz_usage, compress_usage, dictionary_usage, - crc, adler], - - %% We get all function arguments here to avoid repeated parallel - %% file read access. - UsageArgs = - list_to_tuple([{F, ?MODULE:F({get_arg,Config})} || F <- Funcs]), - Parent = self(), - - WorkerFun = - fun() -> - worker(rand:uniform(9999), UsageArgs, Parent) - end, - - Pids = [spawn_link(WorkerFun) || _ <- lists:seq(1, NumOfProcs)], - wait_pids(Pids); - false -> - {skipped,"No smp support"} - end. + NumOfProcs = lists:min([8,erlang:system_info(schedulers)]), + io:format("smp starting ~p workers\n",[NumOfProcs]), + + %% Tests to run in parallel. + Funcs = + [zip_usage, gz_usage, compress_usage, dictionary_usage, + crc, adler], + + %% We get all function arguments here to avoid repeated parallel + %% file read access. + UsageArgs = + list_to_tuple([{F, ?MODULE:F({get_arg,Config})} || F <- Funcs]), + Parent = self(), + + WorkerFun = + fun() -> + worker(rand:uniform(9999), UsageArgs, Parent) + end, + + Pids = [spawn_link(WorkerFun) || _ <- lists:seq(1, NumOfProcs)], + wait_pids(Pids). worker(Seed, FnATpl, Parent) -> io:format("smp worker ~p, seed=~p~n",[self(),Seed]), diff --git a/lib/kernel/test/zzz_SUITE.erl b/lib/kernel/test/zzz_SUITE.erl new file mode 100644 index 0000000000..59c7fd7404 --- /dev/null +++ b/lib/kernel/test/zzz_SUITE.erl @@ -0,0 +1,37 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2006-2018. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% +-module(zzz_SUITE). + +%% The sole purpose of this test suite is for things we want to run last +%% before the VM terminates. + +-export([all/0]). + +-export([lc_graph/1]). + + +all() -> + [lc_graph]. + +lc_graph(_Config) -> + %% Create "lc_graph" file in current working dir + %% if lock checker is enabled. + erts_debug:lc_graph(), + ok. |