aboutsummaryrefslogtreecommitdiffstats
path: root/lib/kernel/test
diff options
context:
space:
mode:
Diffstat (limited to 'lib/kernel/test')
-rw-r--r--lib/kernel/test/Makefile36
-rw-r--r--lib/kernel/test/application_SUITE.erl150
-rw-r--r--lib/kernel/test/code_SUITE.erl384
-rw-r--r--lib/kernel/test/code_SUITE_data/upgrade_client.erl94
-rw-r--r--lib/kernel/test/code_SUITE_data/upgradee.erl12
-rw-r--r--lib/kernel/test/disk_log_SUITE.erl247
-rw-r--r--lib/kernel/test/erl_distribution_SUITE.erl470
-rw-r--r--lib/kernel/test/erl_distribution_wb_SUITE.erl24
-rw-r--r--lib/kernel/test/erl_prim_loader_SUITE.erl5
-rw-r--r--lib/kernel/test/error_logger_SUITE.erl64
-rw-r--r--lib/kernel/test/error_logger_warn_SUITE.erl16
-rw-r--r--lib/kernel/test/file_SUITE.erl382
-rw-r--r--lib/kernel/test/file_SUITE_data/realmen.html4
-rw-r--r--lib/kernel/test/file_name_SUITE.erl43
-rw-r--r--lib/kernel/test/gen_sctp_SUITE.erl18
-rw-r--r--lib/kernel/test/gen_tcp_api_SUITE.erl14
-rw-r--r--lib/kernel/test/gen_tcp_api_SUITE_data/gen_tcp_api_SUITE.c3
-rw-r--r--lib/kernel/test/gen_tcp_misc_SUITE.erl648
-rw-r--r--lib/kernel/test/gen_udp_SUITE.erl432
-rw-r--r--lib/kernel/test/global_SUITE.erl58
-rw-r--r--lib/kernel/test/heart_SUITE.erl23
-rw-r--r--lib/kernel/test/inet_SUITE.erl458
-rw-r--r--lib/kernel/test/inet_res_SUITE.erl15
-rw-r--r--lib/kernel/test/inet_sockopt_SUITE.erl13
-rw-r--r--lib/kernel/test/init_SUITE.erl88
-rw-r--r--lib/kernel/test/interactive_shell_SUITE.erl62
-rw-r--r--lib/kernel/test/interactive_shell_SUITE_data/.gitignore1
-rw-r--r--lib/kernel/test/interactive_shell_SUITE_data/io_columns.erl6
-rw-r--r--lib/kernel/test/interactive_shell_SUITE_data/io_rows.erl6
-rw-r--r--lib/kernel/test/kernel.spec1
-rw-r--r--lib/kernel/test/kernel_SUITE.erl71
-rw-r--r--lib/kernel/test/kernel_bench.spec3
-rw-r--r--lib/kernel/test/kernel_config_SUITE.erl25
-rw-r--r--lib/kernel/test/logger.cover17
-rw-r--r--lib/kernel/test/logger.spec13
-rw-r--r--lib/kernel/test/logger_SUITE.erl1390
-rw-r--r--lib/kernel/test/logger_disk_log_h_SUITE.erl1668
-rw-r--r--lib/kernel/test/logger_env_var_SUITE.erl697
-rw-r--r--lib/kernel/test/logger_filters_SUITE.erl227
-rw-r--r--lib/kernel/test/logger_formatter_SUITE.erl886
-rw-r--r--lib/kernel/test/logger_legacy_SUITE.erl288
-rw-r--r--lib/kernel/test/logger_olp_SUITE.erl90
-rw-r--r--lib/kernel/test/logger_proxy_SUITE.erl274
-rw-r--r--lib/kernel/test/logger_simple_h_SUITE.erl209
-rw-r--r--lib/kernel/test/logger_std_h_SUITE.erl2184
-rw-r--r--lib/kernel/test/logger_stress_SUITE.erl550
-rw-r--r--lib/kernel/test/logger_test_lib.erl88
-rw-r--r--lib/kernel/test/multi_load_SUITE.erl10
-rw-r--r--lib/kernel/test/os_SUITE.erl87
-rw-r--r--lib/kernel/test/pdict_SUITE.erl94
-rw-r--r--lib/kernel/test/pg2_SUITE.erl29
-rw-r--r--lib/kernel/test/prim_file_SUITE.erl802
-rw-r--r--lib/kernel/test/sendfile_SUITE.erl166
-rw-r--r--lib/kernel/test/seq_trace_SUITE.erl199
-rw-r--r--lib/kernel/test/wrap_log_reader_SUITE.erl19
-rw-r--r--lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl26
-rw-r--r--lib/kernel/test/zlib_SUITE.erl892
-rw-r--r--lib/kernel/test/zzz_SUITE.erl37
58 files changed, 12987 insertions, 1831 deletions
diff --git a/lib/kernel/test/Makefile b/lib/kernel/test/Makefile
index b9942e899f..6763a04d9f 100644
--- a/lib/kernel/test/Makefile
+++ b/lib/kernel/test/Makefile
@@ -1,7 +1,7 @@
#
# %CopyrightBegin%
#
-# Copyright Ericsson AB 1997-2016. All Rights Reserved.
+# Copyright Ericsson AB 1997-2018. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@@ -57,6 +57,7 @@ MODULES= \
prim_file_SUITE \
ram_file_SUITE \
gen_tcp_api_SUITE \
+ gen_tcp_dist \
gen_tcp_echo_SUITE \
gen_tcp_misc_SUITE \
gen_udp_SUITE \
@@ -70,6 +71,18 @@ MODULES= \
interactive_shell_SUITE \
init_SUITE \
kernel_config_SUITE \
+ logger_SUITE \
+ logger_disk_log_h_SUITE \
+ logger_env_var_SUITE \
+ logger_filters_SUITE \
+ logger_formatter_SUITE \
+ logger_legacy_SUITE \
+ logger_olp_SUITE \
+ logger_proxy_SUITE \
+ logger_simple_h_SUITE \
+ logger_std_h_SUITE \
+ logger_stress_SUITE \
+ logger_test_lib \
os_SUITE \
pg2_SUITE \
seq_trace_SUITE \
@@ -80,7 +93,8 @@ MODULES= \
loose_node \
sendfile_SUITE \
standard_error_SUITE \
- multi_load_SUITE
+ multi_load_SUITE \
+ zzz_SUITE
APP_FILES = \
appinc.app \
@@ -101,7 +115,7 @@ TARGET_FILES= $(MODULES:%=$(EBIN)/%.$(EMULATOR))
INSTALL_PROGS= $(TARGET_FILES)
EMAKEFILE=Emakefile
-COVERFILE=kernel.cover
+COVERFILE=kernel.cover logger.cover
# ----------------------------------------------------
# Release directory specification
@@ -117,11 +131,17 @@ ERL_COMPILE_FLAGS +=
EBIN = .
+TARGETS = $(MODULES:%=$(EBIN)/%.$(EMULATOR))
+
+
# ----------------------------------------------------
# Targets
# ----------------------------------------------------
-make_emakefile:
+gen_tcp_dist.erl: ../examples/gen_tcp_dist/src/gen_tcp_dist.erl
+ cp $< $@
+
+make_emakefile: $(ERL_FILES)
$(ERL_TOP)/make/make_emakefile $(ERL_COMPILE_FLAGS) -o$(EBIN) '*_SUITE_make' \
> $(EMAKEFILE)
$(ERL_TOP)/make/make_emakefile $(ERL_COMPILE_FLAGS) -o$(EBIN) $(MODULES) \
@@ -137,6 +157,9 @@ clean:
docs:
+targets: $(TARGETS)
+
+
# ----------------------------------------------------
# Release Target
# ----------------------------------------------------
@@ -148,8 +171,9 @@ release_tests_spec: make_emakefile
$(INSTALL_DIR) "$(RELSYSDIR)"
$(INSTALL_DATA) $(ERL_FILES) "$(RELSYSDIR)"
$(INSTALL_DATA) $(APP_FILES) "$(RELSYSDIR)"
- $(INSTALL_DATA) kernel.spec kernel_smoke.spec $(EMAKEFILE)\
- $(COVERFILE) "$(RELSYSDIR)"
+ $(INSTALL_DATA) \
+ kernel.spec kernel_smoke.spec kernel_bench.spec logger.spec \
+ $(EMAKEFILE) $(COVERFILE) "$(RELSYSDIR)"
chmod -R u+w "$(RELSYSDIR)"
@tar cf - *_SUITE_data | (cd "$(RELSYSDIR)"; tar xf -)
diff --git a/lib/kernel/test/application_SUITE.erl b/lib/kernel/test/application_SUITE.erl
index 81407e9d96..1ab554db7c 100644
--- a/lib/kernel/test/application_SUITE.erl
+++ b/lib/kernel/test/application_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -31,13 +31,15 @@
otp_3002/1, otp_3184/1, otp_4066/1, otp_4227/1, otp_5363/1,
otp_5606/1,
start_phases/1, get_key/1, get_env/1,
+ set_env/1, set_env_persistent/1, set_env_errors/1,
permit_false_start_local/1, permit_false_start_dist/1, script_start/1,
nodedown_start/1, init2973/0, loop2973/0, loop5606/1]).
-export([config_change/1, persistent_env/1,
distr_changed_tc1/1, distr_changed_tc2/1,
ensure_started/1, ensure_all_started/1,
- shutdown_func/1, do_shutdown/1, shutdown_timeout/1, shutdown_deadlock/1]).
+ shutdown_func/1, do_shutdown/1, shutdown_timeout/1, shutdown_deadlock/1,
+ config_relative_paths/1]).
-define(TESTCASE, testcase_name).
-define(testcase, proplists:get_value(?TESTCASE, Config)).
@@ -54,8 +56,9 @@ all() ->
load_use_cache, ensure_started, {group, reported_bugs}, start_phases,
script_start, nodedown_start, permit_false_start_local,
permit_false_start_dist, get_key, get_env, ensure_all_started,
+ set_env, set_env_persistent, set_env_errors,
{group, distr_changed}, config_change, shutdown_func, shutdown_timeout,
- shutdown_deadlock,
+ shutdown_deadlock, config_relative_paths,
persistent_env].
groups() ->
@@ -1498,7 +1501,7 @@ otp_5363(Conf) when is_list(Conf) ->
%% Ticket: OTP-5606
%% Slogan: Problems with starting a distributed application
%%-----------------------------------------------------------------
-%% Test of several processes simultanously starting the same
+%% Test of several processes simultaneously starting the same
%% distributed application.
otp_5606(Conf) when is_list(Conf) ->
@@ -1568,7 +1571,8 @@ loop5606(Pid) ->
%% Tests get_env/* functions.
get_env(Conf) when is_list(Conf) ->
- {ok, _} = application:get_env(kernel, error_logger),
+ ok = application:set_env(kernel, new_var, new_val),
+ {ok, new_val} = application:get_env(kernel, new_var),
undefined = application:get_env(undefined_app, a),
undefined = application:get_env(kernel, error_logger_xyz),
default = application:get_env(kernel, error_logger_xyz, default),
@@ -1602,8 +1606,7 @@ get_key(Conf) when is_list(Conf) ->
{ok, [{init, [kalle]}, {takeover, []}, {go, [sune]}]} =
rpc:call(Cp1, application, get_key, [appinc, start_phases]),
{ok, Env} = rpc:call(Cp1, application, get_key, [appinc ,env]),
- [{included_applications,[appinc1,appinc2]},
- {own2,val2},{own_env1,value1}] = lists:sort(Env),
+ [{own2,val2},{own_env1,value1}] = lists:sort(Env),
{ok, []} = rpc:call(Cp1, application, get_key, [appinc, modules]),
{ok, {application_starter, [ch_sup, {appinc, 41, 43}] }} =
rpc:call(Cp1, application, get_key, [appinc, mod]),
@@ -1624,8 +1627,7 @@ get_key(Conf) when is_list(Conf) ->
{mod, {application_starter, [ch_sup, {appinc, 41, 43}] }},
{start_phases, [{init, [kalle]}, {takeover, []}, {go, [sune]}]}]} =
rpc:call(Cp1, application, get_all_key, [appinc]),
- [{included_applications,[appinc1,appinc2]},
- {own2,val2},{own_env1,value1}] = lists:sort(Env),
+ [{own2,val2},{own_env1,value1}] = lists:sort(Env),
{ok, "Test of new app file, including appnew"} =
gen_server:call({global, {ch,41}}, {get_pid_key, description}),
@@ -1642,8 +1644,7 @@ get_key(Conf) when is_list(Conf) ->
{ok, [{init, [kalle]}, {takeover, []}, {go, [sune]}]} =
gen_server:call({global, {ch,41}}, {get_pid_key, start_phases}),
{ok, Env} = gen_server:call({global, {ch,41}}, {get_pid_key, env}),
- [{included_applications,[appinc1,appinc2]},
- {own2,val2},{own_env1,value1}] = lists:sort(Env),
+ [{own2,val2},{own_env1,value1}] = lists:sort(Env),
{ok, []} =
gen_server:call({global, {ch,41}}, {get_pid_key, modules}),
{ok, {application_starter, [ch_sup, {appinc, 41, 43}] }} =
@@ -1670,8 +1671,7 @@ get_key(Conf) when is_list(Conf) ->
{mod, {application_starter, [ch_sup, {appinc, 41, 43}] }},
{start_phases, [{init, [kalle]}, {takeover, []}, {go, [sune]}]}]} =
gen_server:call({global, {ch,41}}, get_pid_all_key),
- [{included_applications,[appinc1,appinc2]},
- {own2,val2},{own_env1,value1}] = lists:sort(Env),
+ [{own2,val2},{own_env1,value1}] = lists:sort(Env),
stop_node_nice(Cp1),
ok.
@@ -1946,6 +1946,94 @@ get_appls([_ | T], Res) ->
get_appls([], Res) ->
Res.
+%% Test set_env/1.
+set_env(Conf) when is_list(Conf) ->
+ ok = application:set_env([{appinc, [{own2, persist}, {not_in_app, persist}]},
+ {unknown_app, [{key, persist}]}]),
+
+ %% own_env1 and own2 are set in appinc
+ undefined = application:get_env(appinc, own_env1),
+ {ok, persist} = application:get_env(appinc, own2),
+ {ok, persist} = application:get_env(appinc, not_in_app),
+ {ok, persist} = application:get_env(unknown_app, key),
+
+ ok = application:load(appinc()),
+ {ok, value1} = application:get_env(appinc, own_env1),
+ {ok, val2} = application:get_env(appinc, own2),
+ {ok, persist} = application:get_env(appinc, not_in_app),
+ {ok, persist} = application:get_env(unknown_app, key),
+
+ %% On reload, values are lost
+ ok = application:unload(appinc),
+ ok = application:load(appinc()),
+ {ok, value1} = application:get_env(appinc, own_env1),
+ {ok, val2} = application:get_env(appinc, own2),
+ undefined = application:get_env(appinc, not_in_app),
+
+ %% Clean up
+ ok = application:unload(appinc).
+
+%% Test set_env/2 with persistent true.
+set_env_persistent(Conf) when is_list(Conf) ->
+ Opts = [{persistent, true}],
+ ok = application:set_env([{appinc, [{own2, persist}, {not_in_app, persist}]},
+ {unknown_app, [{key, persist}]}], Opts),
+
+ %% own_env1 and own2 are set in appinc
+ undefined = application:get_env(appinc, own_env1),
+ {ok, persist} = application:get_env(appinc, own2),
+ {ok, persist} = application:get_env(appinc, not_in_app),
+ {ok, persist} = application:get_env(unknown_app, key),
+
+ ok = application:load(appinc()),
+ {ok, value1} = application:get_env(appinc, own_env1),
+ {ok, persist} = application:get_env(appinc, own2),
+ {ok, persist} = application:get_env(appinc, not_in_app),
+ {ok, persist} = application:get_env(unknown_app, key),
+
+ %% On reload, values are not lost
+ ok = application:unload(appinc),
+ ok = application:load(appinc()),
+ {ok, value1} = application:get_env(appinc, own_env1),
+ {ok, persist} = application:get_env(appinc, own2),
+ {ok, persist} = application:get_env(appinc, not_in_app),
+
+ %% Clean up
+ ok = application:unload(appinc).
+
+set_env_errors(Conf) when is_list(Conf) ->
+ "application: 1; application name must be an atom" =
+ badarg_msg(fun() -> application:set_env([{1, []}]) end),
+
+ "application: foo; parameters must be a list" =
+ badarg_msg(fun() -> application:set_env([{foo, bar}]) end),
+
+ "invalid application config: foo_bar" =
+ badarg_msg(fun() -> application:set_env([foo_bar]) end),
+
+ "application: foo; invalid parameter name: 1" =
+ badarg_msg(fun() -> application:set_env([{foo, [{1, 2}]}]) end),
+
+ "application: foo; invalid parameter: config" =
+ badarg_msg(fun() -> application:set_env([{foo, [config]}]) end),
+
+ "application: kernel; erroneous parameter: distributed" =
+ badarg_msg(fun() -> application:set_env([{kernel, [{distributed, config}]}]) end),
+
+ "duplicate application config: foo" =
+ badarg_msg(fun() -> application:set_env([{foo, []}, {foo, []}]) end),
+
+ "application: foo; duplicate parameter: bar" =
+ badarg_msg(fun() -> application:set_env([{foo, [{bar, baz}, {bar, bat}]}]) end),
+
+ ok.
+
+badarg_msg(Fun) ->
+ try Fun() of
+ _ -> ct:fail(try_succeeded)
+ catch
+ error:{badarg, Msg} -> Msg
+ end.
%% Test set_env/4 and unset_env/3 with persistent true.
persistent_env(Conf) when is_list(Conf) ->
@@ -2078,6 +2166,42 @@ shutdown_deadlock(Config) when is_list(Config) ->
%%-----------------------------------------------------------------
+%% Relative paths in sys.config
+%%-----------------------------------------------------------------
+config_relative_paths(Config) ->
+ Dir = ?config(priv_dir,Config),
+ SubDir = filename:join(Dir,"subdir"),
+ Sys = filename:join(SubDir,"sys.config"),
+ ok = filelib:ensure_dir(Sys),
+ ok = file:write_file(Sys,"[\"../up.config\",\"current\"].\n"),
+
+ Up = filename:join(Dir,"up.config"),
+ ok = file:write_file(Up,"[{app1,[{key1,value}]}].\n"),
+
+ {ok,Cwd} = file:get_cwd(),
+ Current1 = filename:join(Cwd,"current.config"),
+ ok = file:write_file(Current1,"[{app1,[{key2,value1}]}].\n"),
+
+ N1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ {ok,Node1} = start_node(N1,filename:rootname(Sys)),
+ ok = rpc:call(Node1, application, load, [app1()]),
+ {ok, value} = rpc:call(Node1, application, get_env,[app1,key1]),
+ {ok, value1} = rpc:call(Node1, application, get_env,[app1,key2]),
+
+ Current2 = filename:join(SubDir,"current.config"),
+ ok = file:write_file(Current2,"[{app1,[{key2,value2}]}].\n"),
+
+ N2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_2"])),
+ {ok, Node2} = start_node(N2,filename:rootname(Sys)),
+ ok = rpc:call(Node2, application, load, [app1()]),
+ {ok, value} = rpc:call(Node2, application, get_env,[app1,key1]),
+ {ok, value2} = rpc:call(Node2, application, get_env,[app1,key2]),
+
+ stop_node_nice([Node1,Node2]),
+
+ ok.
+
+%%-----------------------------------------------------------------
%% Utility functions
%%-----------------------------------------------------------------
app0() ->
diff --git a/lib/kernel/test/code_SUITE.erl b/lib/kernel/test/code_SUITE.erl
index 62ad7b6a27..6b133f8d6b 100644
--- a/lib/kernel/test/code_SUITE.erl
+++ b/lib/kernel/test/code_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -25,8 +25,8 @@
-export([all/0, suite/0,groups/0,init_per_group/2,end_per_group/2]).
-export([set_path/1, get_path/1, add_path/1, add_paths/1, del_path/1,
replace_path/1, load_file/1, load_abs/1, ensure_loaded/1,
- delete/1, purge/1, purge_many_exits/1, soft_purge/1, is_loaded/1,
- all_loaded/1,
+ delete/1, purge/1, purge_many_exits/0, purge_many_exits/1,
+ soft_purge/1, is_loaded/1, all_loaded/1,
load_binary/1, dir_req/1, object_code/1, set_path_file/1,
upgrade/1,
sticky_dir/1, pa_pz_option/1, add_del_path/1,
@@ -35,11 +35,13 @@
purge_stacktrace/1, mult_lib_roots/1, bad_erl_libs/1,
code_archive/1, code_archive2/1, on_load/1, on_load_binary/1,
on_load_embedded/1, on_load_errors/1, on_load_update/1,
+ on_load_trace_on_load/1,
on_load_purge/1, on_load_self_call/1, on_load_pending/1,
on_load_deleted/1,
big_boot_embedded/1,
+ module_status/1,
native_early_modules/1, get_mode/1,
- normalized_paths/1]).
+ normalized_paths/1, mult_embedded_flags/1]).
-export([init_per_testcase/2, end_per_testcase/2,
init_per_suite/1, end_per_suite/1]).
@@ -53,7 +55,7 @@
suite() ->
[{ct_hooks,[ts_install_cth]},
- {timetrap,{minutes,5}}].
+ {timetrap,{seconds,30}}].
all() ->
[set_path, get_path, add_path, add_paths, del_path,
@@ -65,13 +67,17 @@ all() ->
ext_mod_dep, clash, where_is_file,
purge_stacktrace, mult_lib_roots,
bad_erl_libs, code_archive, code_archive2, on_load,
- on_load_binary, on_load_embedded, on_load_errors, on_load_update,
+ on_load_binary, on_load_embedded, on_load_errors,
+ {group, sequence},
on_load_purge, on_load_self_call, on_load_pending,
on_load_deleted,
- big_boot_embedded, native_early_modules, get_mode, normalized_paths].
+ module_status,
+ big_boot_embedded, native_early_modules, get_mode, normalized_paths,
+ mult_embedded_flags].
-groups() ->
- [].
+%% These need to run in order
+groups() -> [{sequence, [sequence], [on_load_update,
+ on_load_trace_on_load]}].
init_per_group(_GroupName, Config) ->
Config.
@@ -93,6 +99,11 @@ init_per_suite(Config) ->
end_per_suite(Config) ->
Config.
+-define(TESTMOD, test_dummy).
+-define(TESTMODSTR, "test_dummy").
+-define(TESTMODSRC, ?TESTMODSTR ".erl").
+-define(TESTMODOBJ, ?TESTMODSTR ".beam").
+
init_per_testcase(big_boot_embedded, Config) ->
case catch crypto:start() of
ok ->
@@ -100,16 +111,41 @@ init_per_testcase(big_boot_embedded, Config) ->
_Else ->
{skip, "Needs crypto!"}
end;
+init_per_testcase(on_load_embedded, Config0) ->
+ LibRoot = code:lib_dir(),
+ LinkName = filename:join(LibRoot, "on_load_app-1.0"),
+ Config = [{link_name,LinkName}|Config0],
+ init_per_testcase(Config);
init_per_testcase(_Func, Config) ->
+ init_per_testcase(Config).
+
+init_per_testcase(Config) ->
P = code:get_path(),
[{code_path, P}|Config].
+
+end_per_testcase(module_status, Config) ->
+ code:purge(?TESTMOD),
+ code:delete(?TESTMOD),
+ code:purge(?TESTMOD),
+ file:delete(?TESTMODOBJ),
+ file:delete(?TESTMODSRC),
+ end_per_testcase(Config);
end_per_testcase(TC, Config) when TC == mult_lib_roots;
TC == big_boot_embedded ->
{ok, HostName} = inet:gethostname(),
NodeName = list_to_atom(atom_to_list(TC)++"@"++HostName),
test_server:stop_node(NodeName),
end_per_testcase(Config);
+end_per_testcase(on_load_embedded, Config) ->
+ LinkName = proplists:get_value(link_name, Config),
+ _ = del_link(LinkName),
+ end_per_testcase(Config);
+end_per_testcase(upgrade, Config) ->
+ %% Make sure tracing is turned off even if the test times out.
+ erlang:trace_pattern({error_handler,undefined_function,3}, false, [global]),
+ erlang:trace(self(), false, [call]),
+ end_per_testcase(Config);
end_per_testcase(_Func, Config) ->
end_per_testcase(Config).
@@ -309,7 +345,7 @@ load_abs(Config) when is_list(Config) ->
{error, nofile} = code:load_abs(TestDir ++ "/duuuumy_mod"),
{error, badfile} = code:load_abs(TestDir ++ "/code_a_test"),
{'EXIT', _} = (catch code:load_abs({})),
- {'EXIT', _} = (catch code:load_abs("Non-latin-имя-файла")),
+ {error, nofile} = code:load_abs("Non-latin-имя-файла"),
{module, code_b_test} = code:load_abs(TestDir ++ "/code_b_test"),
code:stick_dir(TestDir),
{error, sticky_directory} = code:load_abs(TestDir ++ "/code_b_test"),
@@ -319,7 +355,7 @@ load_abs(Config) when is_list(Config) ->
ensure_loaded(Config) when is_list(Config) ->
{module, lists} = code:ensure_loaded(lists),
case init:get_argument(mode) of
- {ok, [["embedded"]]} ->
+ {ok, [["embedded"] | _]} ->
{error, embedded} = code:ensure_loaded(code_b_test),
{error, badarg} = code:ensure_loaded(34),
ok;
@@ -361,6 +397,9 @@ purge(Config) when is_list(Config) ->
process_flag(trap_exit, OldFlag),
ok.
+purge_many_exits() ->
+ [{timetrap, {minutes, 2}}].
+
purge_many_exits(Config) when is_list(Config) ->
OldFlag = process_flag(trap_exit, true),
@@ -483,23 +522,35 @@ load_binary(Config) when is_list(Config) ->
code:delete(code_b_test),
ok.
+
upgrade(Config) ->
DataDir = proplists:get_value(data_dir, Config),
- %%T = [beam, hipe],
- T = [beam],
-
- [upgrade_do(DataDir, Client, U1, U2, O1, O2)
- || Client<-T, U1<-T, U2<-T, O1<-T, O2<-T],
-
+ case erlang:system_info(hipe_architecture) of
+ undefined ->
+ upgrade_do(DataDir, beam, [beam]);
+
+ _ ->
+ T = [beam, hipe],
+ [upgrade_do(DataDir, Client, T) || Client <- T],
+
+ case hipe:erllvm_is_supported() of
+ false -> ok;
+ true ->
+ T2 = [beam, hipe_llvm],
+ [upgrade_do(DataDir, Client, T2) || Client <- T2]
+ end
+ end,
ok.
-upgrade_do(DataDir, Client, U1, U2, O1, O2) ->
+upgrade_do(DataDir, Client, T) ->
compile_load(upgrade_client, DataDir, undefined, Client),
- upgrade_client:run(DataDir, U1, U2, O1, O2),
+ [upgrade_client:run(DataDir, U1, U2, O1, O2)
+ || U1<-T, U2<-T, O1<-T, O2<-T],
ok.
compile_load(Mod, Dir, Ver, CodeType) ->
+ %%erlang:display({"{{{{{{{{{{{{{{{{Loading",Mod,Ver,CodeType}),
Version = case Ver of
undefined ->
io:format("Compiling '~p' as ~p\n", [Mod, CodeType]),
@@ -511,14 +562,21 @@ compile_load(Mod, Dir, Ver, CodeType) ->
end,
Target = case CodeType of
beam -> [];
- hipe -> [native]
+ hipe -> [native];
+ hipe_llvm -> [native,{hipe,[to_llvm]}]
end,
CompOpts = [binary, report] ++ Target ++ Version,
Src = filename:join(Dir, atom_to_list(Mod) ++ ".erl"),
+ T1 = erlang:now(),
{ok,Mod,Code} = compile:file(Src, CompOpts),
+ T2 = erlang:now(),
ObjFile = filename:basename(Src,".erl") ++ ".beam",
{module,Mod} = code:load_binary(Mod, ObjFile, Code),
+ T3 = erlang:now(),
+ io:format("Compile time ~p ms, Load time ~p ms\n",
+ [timer:now_diff(T2,T1) div 1000, timer:now_diff(T3,T2) div 1000]),
+ %%erlang:display({"}}}}}}}}}}}}}}}Loaded",Mod,Ver,CodeType}),
ok.
dir_req(Config) when is_list(Config) ->
@@ -818,8 +876,6 @@ check_funs({'$M_EXPR','$F_EXPR',_},
{code_server,start_link,1}]) -> 0;
check_funs({'$M_EXPR','$F_EXPR',_},
[{erlang,spawn_link,1},{code_server,start_link,1}]) -> 0;
-check_funs({'$M_EXPR',module_info,1},
- [{hipe_unified_loader,patch_to_emu_step1,1} | _]) -> 0;
check_funs({'$M_EXPR','$F_EXPR',2},
[{hipe_unified_loader,write_words,3} | _]) -> 0;
check_funs({'$M_EXPR','$F_EXPR',2},
@@ -831,11 +887,7 @@ check_funs({'$M_EXPR','$F_EXPR',2},
{hipe_unified_loader,sort_and_write,5} | _]) -> 0;
check_funs({'$M_EXPR','$F_EXPR',1},
[{lists,foreach,2},
- {hipe_unified_loader,patch_consts,3} | _]) -> 0;
-check_funs({'$M_EXPR','$F_EXPR',1},
- [{lists,foreach,2},
- {hipe_unified_loader,mark_referred_from,1},
- {hipe_unified_loader,get_refs_from,2}| _]) -> 0;
+ {hipe_unified_loader,patch_consts,4} | _]) -> 0;
check_funs({'$M_EXPR',warning_msg,2},
[{code_server,finish_on_load_report,2} | _]) -> 0;
check_funs({'$M_EXPR','$F_EXPR',1},
@@ -888,37 +940,34 @@ purge_stacktrace(Config) when is_list(Config) ->
code:purge(code_b_test),
try code_b_test:call(fun(b) -> ok end, a)
catch
- error:function_clause ->
+ error:function_clause:Stacktrace ->
code:load_file(code_b_test),
- case erlang:get_stacktrace() of
+ case Stacktrace of
[{?MODULE,_,[a],_},
{code_b_test,call,2,_},
{?MODULE,purge_stacktrace,1,_}|_] ->
- false = code:purge(code_b_test),
- [] = erlang:get_stacktrace()
+ false = code:purge(code_b_test)
end
end,
try code_b_test:call(nofun, 2)
catch
- error:function_clause ->
+ error:function_clause:Stacktrace2 ->
code:load_file(code_b_test),
- case erlang:get_stacktrace() of
+ case Stacktrace2 of
[{code_b_test,call,[nofun,2],_},
{?MODULE,purge_stacktrace,1,_}|_] ->
- false = code:purge(code_b_test),
- [] = erlang:get_stacktrace()
+ false = code:purge(code_b_test)
end
end,
Args = [erlang,error,[badarg]],
try code_b_test:call(erlang, error, [badarg,Args])
catch
- error:badarg ->
+ error:badarg:Stacktrace3 ->
code:load_file(code_b_test),
- case erlang:get_stacktrace() of
+ case Stacktrace3 of
[{code_b_test,call,Args,_},
{?MODULE,purge_stacktrace,1,_}|_] ->
- false = code:purge(code_b_test),
- [] = erlang:get_stacktrace()
+ false = code:purge(code_b_test)
end
end,
ok.
@@ -981,6 +1030,13 @@ mult_lib_remove_prefix([H|T1], [H|T2]) ->
mult_lib_remove_prefix([$/|T], []) -> T.
bad_erl_libs(Config) when is_list(Config) ->
+ %% Preserve ERL_LIBS if set.
+ BadLibs0 = "/no/such/dir",
+ BadLibs =
+ case os:getenv("ERL_LIBS") of
+ false -> BadLibs0;
+ Libs -> BadLibs0 ++ ":" ++ Libs
+ end,
{ok,Node} =
test_server:start_node(bad_erl_libs, slave, []),
Code = rpc:call(Node,code,get_path,[]),
@@ -988,10 +1044,9 @@ bad_erl_libs(Config) when is_list(Config) ->
{ok,Node2} =
test_server:start_node(bad_erl_libs, slave,
- [{args,"-env ERL_LIBS /no/such/dir"}]),
+ [{args,"-env ERL_LIBS " ++ BadLibs}]),
Code2 = rpc:call(Node,code,get_path,[]),
test_server:stop_node(Node2),
-
%% Test that code path is not affected by the faulty ERL_LIBS
Code = Code2,
@@ -1244,10 +1299,9 @@ on_load_embedded(Config) when is_list(Config) ->
on_load_embedded_1(Config) ->
DataDir = proplists:get_value(data_dir, Config),
+ LinkName = proplists:get_value(link_name, Config),
%% Link the on_load_app application into the lib directory.
- LibRoot = code:lib_dir(),
- LinkName = filename:join(LibRoot, "on_load_app-1.0"),
OnLoadApp = filename:join(DataDir, "on_load_app-1.0"),
del_link(LinkName),
io:format("LinkName :~p, OnLoadApp: ~p~n",[LinkName,OnLoadApp]),
@@ -1281,8 +1335,7 @@ on_load_embedded_1(Config) ->
ok = rpc:call(Node, on_load_embedded, status, []),
%% Clean up.
- stop_node(Node),
- ok = del_link(LinkName).
+ stop_node(Node).
del_link(LinkName) ->
case file:delete(LinkName) of
@@ -1333,9 +1386,8 @@ create_big_boot(Config) ->
%% corresponding beam file (if hipe is not enabled).
filter_app("hipe",_) -> false;
-%% Dialyzer and typer depends on hipe
+%% Dialyzer depends on hipe
filter_app("dialyzer",_) -> false;
-filter_app("typer",_) -> false;
%% Orber requires explicit configuration
filter_app("orber",_) -> false;
@@ -1456,7 +1508,7 @@ do_on_load_error(ReturnValue) ->
{undef,[{on_load_error,main,[],_}|_]} = Exit
end.
-on_load_update(_Config) ->
+on_load_update(Config) ->
{Mod,Code1} = on_load_update_code(1),
{module,Mod} = code:load_binary(Mod, "", Code1),
42 = Mod:a(),
@@ -1466,7 +1518,7 @@ on_load_update(_Config) ->
{Mod,Code2} = on_load_update_code(2),
{error,on_load_failure} = code:load_binary(Mod, "", Code2),
42 = Mod:a(),
- 100 = Mod:b(99),
+ 78 = Mod:b(77),
{'EXIT',{undef,_}} = (catch Mod:never()),
4 = erlang:trace_pattern({Mod,'_','_'}, false),
@@ -1477,6 +1529,9 @@ on_load_update(_Config) ->
{'EXIT',{undef,_}} = (catch Mod:b(10)),
{'EXIT',{undef,_}} = (catch Mod:never()),
+ code:purge(Mod),
+ code:delete(Mod),
+ code:purge(Mod),
ok.
on_load_update_code(Version) ->
@@ -1508,6 +1563,36 @@ on_load_update_code_1(3, Mod) ->
"f() -> ok.\n",
"c() -> 100.\n"]).
+%% Test -on_load while trace feature 'on_load' is enabled (OTP-14612)
+on_load_trace_on_load(Config) ->
+ %% 'on_load' enables tracing for all newly loaded modules, so we make a dry
+ %% run to ensure that ancillary modules like 'merl' won't be loaded during
+ %% the actual test.
+ on_load_update(Config),
+
+ Papa = self(),
+ Tracer = spawn_link(fun F() -> receive M -> Papa ! M end, F() end),
+ {tracer,[]} = erlang:trace_info(self(),tracer),
+ erlang:trace(self(), true, [call, {tracer, Tracer}]),
+ erlang:trace_pattern(on_load, true, []),
+ on_load_update(Config),
+ erlang:trace_pattern(on_load, false, []),
+ erlang:trace(self(), false, [call]),
+
+ Ms = flush(),
+ [{trace, Papa, call, {on_load_update_code, a, []}},
+ {trace, Papa, call, {on_load_update_code, b, [99]}},
+ {trace, Papa, call, {on_load_update_code, c, []}}] = Ms,
+
+ exit(Tracer, normal),
+ ok.
+
+flush() ->
+ receive M -> [M | flush()]
+ after 100 -> []
+ end.
+
+
on_load_purge(_Config) ->
Mod = ?FUNCTION_NAME,
register(Mod, self()),
@@ -1752,6 +1837,207 @@ do_normalized_paths([M|Ms]) ->
do_normalized_paths([]) ->
ok.
+%% Make sure that the extra -mode flags are ignored
+mult_embedded_flags(_Config) ->
+ Modes = [{" -mode embedded", embedded},
+ {" -mode interactive", interactive},
+ {" -mode invalid", interactive}],
+
+ [ begin
+ {ArgMode, ExpectedMode} = Mode,
+ {ok, Node} = start_node(mode_test, ArgMode),
+ ExpectedMode = rpc:call(Node, code, get_mode, []),
+ true = stop_node(Node)
+ end || Mode <- Modes],
+
+ [ begin
+ {ArgIgnoredMode, _} = IgnoredMode,
+ {ArgRelevantMode, ExpectedMode} = RelevantMode,
+ {ok, Node} = start_node(mode_test, ArgRelevantMode ++ ArgIgnoredMode),
+ ExpectedMode = rpc:call(Node, code, get_mode, []),
+ true = stop_node(Node)
+ end || IgnoredMode <- Modes, RelevantMode <- Modes],
+ ok.
+
+%% Test that module_status/1 behaves as expected
+module_status(_Config) ->
+ case test_server:is_cover() of
+ true ->
+ module_status();
+ false ->
+ %% Make sure that we terminate the cover server.
+ try
+ module_status()
+ after
+ cover:stop()
+ end
+ end.
+
+module_status() ->
+ %% basics
+ not_loaded = code:module_status(fubar), % nonexisting
+ {file, preloaded} = code:is_loaded(erlang),
+ loaded = code:module_status(erlang), % preloaded
+ loaded = code:module_status(?MODULE), % normal known loaded
+
+ non_existing = code:which(?TESTMOD), % verify dummy name not in path
+ code:purge(?TESTMOD), % ensure no previous version in memory
+ code:delete(?TESTMOD),
+ code:purge(?TESTMOD),
+
+ %% generated code is detected as such
+ {ok,?TESTMOD,Bin} = compile:forms(dummy_ast(), []),
+ {module,?TESTMOD} = code:load_binary(?TESTMOD,"",Bin), % no source file
+ ok = ?TESTMOD:f(),
+ "" = code:which(?TESTMOD), % verify empty string for source file
+ loaded = code:module_status(?TESTMOD),
+
+ %% deleting generated code
+ true = code:delete(?TESTMOD),
+ non_existing = code:which(?TESTMOD), % verify still not in path
+ not_loaded = code:module_status(?TESTMOD),
+
+ %% beam file exists but not loaded
+ make_source_file(<<"0">>),
+ compile_beam(0),
+ true = (non_existing =/= code:which(?TESTMOD)), % verify in path
+ not_loaded = code:module_status(?TESTMOD),
+
+ %% loading code from disk makes it loaded
+ load_code(),
+ loaded = code:module_status(?TESTMOD), % loaded
+
+ %% cover compiling a module
+ {ok,?TESTMOD} = cover:compile(?TESTMOD),
+ {file, cover_compiled} = code:is_loaded(?TESTMOD), % verify cover compiled
+ modified = code:module_status(?TESTMOD), % loaded cover code but file exists
+ remove_code(),
+ removed = code:module_status(?TESTMOD), % removed
+ compile_beam(0),
+ modified = code:module_status(?TESTMOD), % recreated
+ load_code(),
+ loaded = code:module_status(?TESTMOD), % loading removes cover status
+ code:purge(?TESTMOD),
+ true = code:delete(?TESTMOD),
+ not_loaded = code:module_status(?TESTMOD), % deleted
+
+ %% recompilation ignores timestamps, only md5 matters
+ load_code(),
+ compile_beam(1100),
+ loaded = code:module_status(?TESTMOD),
+
+ %% modifying module detects different md5
+ make_source_file(<<"1">>),
+ compile_beam(0),
+ modified = code:module_status(?TESTMOD),
+
+ %% loading the modified code from disk makes it loaded
+ load_code(),
+ loaded = code:module_status(?TESTMOD),
+
+ %% removing and recreating a module with same md5
+ remove_code(),
+ removed = code:module_status(?TESTMOD),
+ compile_beam(0),
+ loaded = code:module_status(?TESTMOD),
+
+ case erlang:system_info(hipe_architecture) of
+ undefined ->
+ %% no native support
+ ok;
+ _ ->
+ %% native chunk is ignored if beam code is already loaded
+ load_code(),
+ loaded = code:module_status(?TESTMOD),
+ false = has_native(?TESTMOD),
+ compile_native(0),
+ BeamMD5 = erlang:get_module_info(?TESTMOD, md5),
+ {ok,{?TESTMOD,BeamMD5}} = beam_lib:md5(?TESTMODOBJ), % beam md5 unchanged
+ loaded = code:module_status(?TESTMOD),
+
+ %% native code reported as loaded, though different md5 from beam
+ load_code(),
+ true = has_native(?TESTMOD),
+ NativeMD5 = erlang:get_module_info(?TESTMOD, md5),
+ true = (BeamMD5 =/= NativeMD5),
+ loaded = code:module_status(?TESTMOD),
+
+ %% recompilation ignores timestamps, only md5 matters
+ compile_native(1100), % later timestamp
+ loaded = code:module_status(?TESTMOD),
+
+ %% modifying native module detects different md5
+ make_source_file(<<"2">>),
+ compile_native(0),
+ modified = code:module_status(?TESTMOD),
+
+ %% loading the modified native code from disk makes it loaded
+ load_code(),
+ true = has_native(?TESTMOD),
+ NativeMD5_2 = erlang:get_module_info(?TESTMOD, md5),
+ true = (NativeMD5 =/= NativeMD5_2), % verify native md5 changed
+ {ok,{?TESTMOD,BeamMD5_2}} = beam_lib:md5(?TESTMODOBJ),
+ true = (BeamMD5_2 =/= NativeMD5_2), % verify md5 differs from beam
+ loaded = code:module_status(?TESTMOD),
+
+ %% removing and recreating a native module with same md5
+ remove_code(),
+ removed = code:module_status(?TESTMOD),
+ compile_native(0),
+ loaded = code:module_status(?TESTMOD),
+
+ %% purging/deleting native module
+ code:purge(?TESTMOD),
+ true = code:delete(?TESTMOD),
+ not_loaded = code:module_status(?TESTMOD)
+ end,
+ ok.
+
+compile_beam(Sleep) ->
+ compile(Sleep, []).
+
+compile_native(Sleep) ->
+ compile(Sleep, [native]).
+
+compile(Sleep, Opts) ->
+ timer:sleep(Sleep), % increment compilation timestamp
+ {ok,?TESTMOD} = compile:file(?TESTMODSRC, Opts).
+
+load_code() ->
+ code:purge(?TESTMOD),
+ {module,?TESTMOD} = code:load_file(?TESTMOD).
+
+remove_code() ->
+ ok = file:delete(?TESTMODOBJ).
+
+has_native(Module) ->
+ case erlang:get_module_info(Module, native_addresses) of
+ [] -> false;
+ [_|_] -> true
+ end.
+
+make_source_file(Body) ->
+ ok = file:write_file(?TESTMODSRC, dummy_source(Body)).
+
+dummy_source(Body) ->
+ [<<"-module(" ?TESTMODSTR ").\n"
+ "-export([f/0]).\n"
+ "f() -> ">>, Body, <<".\n">>].
+
+dummy_ast() ->
+ dummy_ast(?TESTMODSTR).
+
+dummy_ast(Mod) when is_atom(Mod) ->
+ dummy_ast(atom_to_list(Mod));
+dummy_ast(ModStr) ->
+ [scan_form("-module(" ++ ModStr ++ ")."),
+ scan_form("-export([f/0])."),
+ scan_form("f() -> ok.")].
+
+scan_form(String) ->
+ {ok,Ts,_} = erl_scan:string(String),
+ {ok,F} = erl_parse:parse_form(Ts),
+ F.
%%-----------------------------------------------------------------
%% error_logger handler.
diff --git a/lib/kernel/test/code_SUITE_data/upgrade_client.erl b/lib/kernel/test/code_SUITE_data/upgrade_client.erl
index bb655e01d3..1c3c2def53 100644
--- a/lib/kernel/test/code_SUITE_data/upgrade_client.erl
+++ b/lib/kernel/test/code_SUITE_data/upgrade_client.erl
@@ -9,6 +9,8 @@ run(Dir, Upgradee1, Upgradee2, Other1, Other2) ->
%% Load version 1 of upgradee
code_SUITE:compile_load(upgradee, Dir, 1, Upgradee1),
+ Tracer = start_tracing(),
+
?line 1 = upgradee:exp1(),
?line 1 = upgradee:exp1exp2(),
?line 1 = upgradee:exp1loc2(),
@@ -56,6 +58,15 @@ run(Dir, Upgradee1, Upgradee2, Other1, Other2) ->
?line {'EXIT',{undef,_}} = proxy_call(P, other, loc1),
?line {'EXIT',{undef,_}} = proxy_call(P, other, loc2),
+ Env1 = "Env1",
+ put(loc1_fun, upgradee:get_local_fun(Env1)),
+ ?line {1,Env1} = (get(loc1_fun))(),
+
+ put(exp1exp2_fun, upgradee:get_exp1exp2_fun()),
+ ?line 1 = (get(exp1exp2_fun))(),
+
+ ok = check_tracing(Tracer, 13),
+
%%
%% Load version 1 of other
%%
@@ -78,6 +89,8 @@ run(Dir, Upgradee1, Upgradee2, Other1, Other2) ->
?line {'EXIT',{undef,_}} = proxy_call(P, other, exp2),
?line {'EXIT',{undef,_}} = proxy_call(P, other, loc2),
+ ok = check_tracing(Tracer, 5),
+
%%
%% Load version 2 of upgradee
%%
@@ -130,6 +143,15 @@ run(Dir, Upgradee1, Upgradee2, Other1, Other2) ->
?line {'EXIT',{undef,_}} = proxy_call(P, other, exp2),
?line {'EXIT',{undef,_}} = proxy_call(P, other, loc2),
+ ?line {1,Env1} = (get(loc1_fun))(),
+ Env2 = "Env2",
+ put(loc2_fun, upgradee:get_local_fun(Env2)),
+ ?line {2,Env2} = (get(loc2_fun))(),
+
+ ?line 2 = (get(exp1exp2_fun))(),
+
+ ok = check_tracing(Tracer, 10),
+
%%
%% Load version 2 of other
%%
@@ -182,17 +204,26 @@ run(Dir, Upgradee1, Upgradee2, Other1, Other2) ->
?line {'EXIT',{undef,_}} = proxy_call(P, other, loc1loc2),
?line {'EXIT',{undef,_}} = proxy_call(P, other, loc2),
+ ?line {1,Env1} = (get(loc1_fun))(),
+ ?line {2,Env2} = (get(loc2_fun))(),
+ ?line 2 = (get(exp1exp2_fun))(),
+
+ ok = check_tracing(Tracer, 10),
%%
%% Upgrade proxy to version 2
%%
P ! upgrade_order,
-
%%
- io:format("Delete version 2 of 'upgradee'\n",[]),
+ io:format("Purge version 1 of 'upgradee'\n",[]),
%%
+ put(loc1_fun,undefined),
code:purge(upgradee),
+
+ %%
+ io:format("Delete version 2 of 'upgradee'\n",[]),
+ %%
code:delete(upgradee),
?line {'EXIT',{undef,_}} = (catch upgradee:exp2()),
@@ -239,17 +270,24 @@ run(Dir, Upgradee1, Upgradee2, Other1, Other2) ->
?line {'EXIT',{undef,_}} = proxy_call(P, other, exp1loc2),
?line {'EXIT',{undef,_}} = proxy_call(P, other, loc1loc2),
?line {'EXIT',{undef,_}} = proxy_call(P, other, loc2),
+
+ ?line {'EXIT',{undef,_}} = (catch (get(exp1exp2_fun))()),
+ ok = check_tracing(Tracer, 14),
+
unlink(P),
exit(P, die_please),
io:format("Purge 'upgradee'\n",[]),
+ put(loc2_fun,undefined),
code:purge(upgradee),
io:format("Delete and purge 'other'\n",[]),
code:purge(other),
code:delete(other),
code:purge(other),
+
+ stop_tracing(Tracer),
ok.
proxy_call(Pid, CallType, Func) ->
@@ -257,3 +295,55 @@ proxy_call(Pid, CallType, Func) ->
receive
{Pid, call_result, Func, Ret} -> Ret
end.
+
+
+start_tracing() ->
+ Self = self(),
+ {Tracer,_} = spawn_opt(fun() -> tracer_loop(Self) end, [link,monitor]),
+ ?line 1 = erlang:trace_pattern({error_handler,undefined_function,3},
+ true, [global]),
+ ?line 1 = erlang:trace(Self, true, [call,{tracer,Tracer}]),
+ Tracer.
+
+
+tracer_loop(Receiver) ->
+ receive
+ die_please ->
+ ok;
+ {do_trace_delivered, Tracee} ->
+ _ = erlang:trace_delivered(Tracee),
+ tracer_loop(Receiver);
+
+ Msg ->
+ Receiver ! Msg,
+ tracer_loop(Receiver)
+ end.
+
+check_tracing(Tracer, Expected) ->
+ Tracer ! {do_trace_delivered, self()},
+ case check_tracing_loop(0,[]) of
+ {Expected,_} ->
+ ok;
+ {Got, MsgList} ->
+ io:format("Expected ~p trace msg, got ~p:\n~p\n",
+ [Expected, Got, lists:reverse(MsgList)]),
+ "Trace msg mismatch"
+ end.
+
+check_tracing_loop(N, MsgList) ->
+ Self = self(),
+ receive
+ {trace, _Pid, call, {_M, _F, _Args}} = Msg ->
+ check_tracing_loop(N+1, [Msg | MsgList]);
+ {trace_delivered, Self, _} ->
+ {N, MsgList}
+ end.
+
+
+stop_tracing(Tracer) ->
+ erlang:trace_pattern({error_handler,undefined_function,3}, false, [global]),
+ erlang:trace(self(), false, [call]),
+ Tracer ! die_please,
+ receive
+ {'DOWN', _, process, Tracer, _} -> ok
+ end.
diff --git a/lib/kernel/test/code_SUITE_data/upgradee.erl b/lib/kernel/test/code_SUITE_data/upgradee.erl
index 62b1d95e30..8ca660c19c 100644
--- a/lib/kernel/test/code_SUITE_data/upgradee.erl
+++ b/lib/kernel/test/code_SUITE_data/upgradee.erl
@@ -8,6 +8,9 @@
-export([exp1/0]). % only exported in v1
-export([exp1loc2/0]). % exported in v1, local in v2
-export([exp1exp2/0]). % exported in v1 and v2
+-export([get_local_fun/1]).
+-export([get_exp1exp2_fun/0]).
+-export([exp1exp2_fun/0]).
exp1() -> ?VERSION.
loc1() -> ?VERSION.
@@ -20,6 +23,9 @@ loc1() -> ?VERSION.
-export([exp2/0]).
-export([loc1exp2/0]).
-export([exp1exp2/0]).
+-export([get_local_fun/1]).
+-export([get_exp1exp2_fun/0]).
+-export([exp1exp2_fun/0]).
exp2() -> ?VERSION.
loc2() -> ?VERSION.
@@ -31,6 +37,12 @@ exp1loc2() -> ?VERSION.
loc1exp2() -> ?VERSION.
loc1loc2() -> ?VERSION.
+get_local_fun(Env) -> fun() -> {?VERSION,Env} end.
+get_exp1exp2_fun() -> fun ?MODULE:exp1exp2_fun/0.
+
+exp1exp2_fun() ->
+ ?VERSION.
+
dispatch_loop() ->
receive
upgrade_order ->
diff --git a/lib/kernel/test/disk_log_SUITE.erl b/lib/kernel/test/disk_log_SUITE.erl
index f7ad9c0c04..9704c3b28c 100644
--- a/lib/kernel/test/disk_log_SUITE.erl
+++ b/lib/kernel/test/disk_log_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -89,8 +89,6 @@
dist_terminate/1, dist_accessible/1, dist_deadlock/1,
dist_open2/1, other_groups/1,
- evil/1,
-
otp_6278/1, otp_10131/1]).
-export([head_fun/1, hf/0, lserv/1,
@@ -123,7 +121,7 @@
[halt_int, wrap_int, halt_ext, wrap_ext, read_mode, head,
notif, new_idx_vsn, reopen, block, unblock, open, close,
error, chunk, truncate, many_users, info, change_size,
- change_attribute, distribution, evil, otp_6278, otp_10131]).
+ change_attribute, distribution, otp_6278, otp_10131]).
%% These test cases should be skipped if the VxWorks card is
%% configured without NFS cache.
@@ -149,7 +147,7 @@ all() ->
{group, open}, {group, close}, {group, error}, chunk,
truncate, many_users, {group, info},
{group, change_size}, change_attribute,
- {group, distribution}, evil, otp_6278, otp_10131].
+ {group, distribution}, otp_6278, otp_10131].
groups() ->
[{halt_int, [], [halt_int_inf, {group, halt_int_sz}]},
@@ -421,7 +419,7 @@ halt_ro_alog(Conf) when is_list(Conf) ->
halt_ro_alog_wait_notify(Log, T) ->
Term = term_to_binary(T),
receive
- {disk_log, _, Log,{read_only, Term}} ->
+ {disk_log, _, Log,{read_only, [Term]}} ->
ok;
Other ->
Other
@@ -449,7 +447,7 @@ halt_ro_balog(Conf) when is_list(Conf) ->
halt_ro_balog_wait_notify(Log, T) ->
Term = list_to_binary(T),
receive
- {disk_log, _, Log,{read_only, Term}} ->
+ {disk_log, _, Log,{read_only, [Term]}} ->
ok;
Other ->
Other
@@ -1385,15 +1383,15 @@ blocked_notif(Conf) when is_list(Conf) ->
"The requested operation" ++ _ = format_error(Error1),
ok = disk_log:blog(n, B),
ok = disk_log:alog(n, B),
- rec(1, {disk_log, node(), n, {format_external, term_to_binary(B)}}),
+ rec(1, {disk_log, node(), n, {format_external, [term_to_binary(B)]}}),
ok = disk_log:alog_terms(n, [B,B,B,B]),
rec(1, {disk_log, node(), n, {format_external,
lists:map(fun term_to_binary/1, [B,B,B,B])}}),
ok = disk_log:block(n, false),
ok = disk_log:alog(n, B),
- rec(1, {disk_log, node(), n, {blocked_log, term_to_binary(B)}}),
+ rec(1, {disk_log, node(), n, {blocked_log, [term_to_binary(B)]}}),
ok = disk_log:balog(n, B),
- rec(1, {disk_log, node(), n, {blocked_log, list_to_binary(B)}}),
+ rec(1, {disk_log, node(), n, {blocked_log, [list_to_binary(B)]}}),
ok = disk_log:balog_terms(n, [B,B,B,B]),
disk_log:close(n),
rec(1, {disk_log, node(), n, {blocked_log,
@@ -1752,7 +1750,7 @@ block_queue(Conf) when is_list(Conf) ->
true = [{1,a},{2,b},{3,c},{4,d},{5,e},{6,f},{7,g},{8,h}] == Terms,
del(File, 2),
Q = qlen(),
- true = (P0 == pps()),
+ check_pps(P0),
ok.
%% OTP-4880. Blocked processes did not get disk_log_stopped message.
@@ -1784,7 +1782,7 @@ block_queue2(Conf) when is_list(Conf) ->
{ok,<<>>} = file:read_file(File ++ ".1"),
del(File, No),
Q = qlen(),
- true = (P0 == pps()),
+ check_pps(P0),
ok.
@@ -2121,7 +2119,7 @@ close_block(Conf) when is_list(Conf) ->
0 = sync_do(Pid2, users),
sync_do(Pid2, terminate),
{error, no_such_log} = disk_log:info(n),
- true = (P0 == pps()),
+ check_pps(P0),
%% Users terminate (no link...).
Pid3 = spawn_link(?MODULE, lserv, [n]),
@@ -2139,7 +2137,7 @@ close_block(Conf) when is_list(Conf) ->
disk_log:close(n),
disk_log:close(n),
{error, no_such_log} = disk_log:info(n),
- true = (P0 == pps()),
+ check_pps(P0),
%% Blocking owner terminates.
Pid5 = spawn_link(?MODULE, lserv, [n]),
@@ -2156,7 +2154,7 @@ close_block(Conf) when is_list(Conf) ->
1 = users(n),
ok = disk_log:close(n),
{error, no_such_log} = disk_log:info(n),
- true = (P0 == pps()),
+ check_pps(P0),
%% Blocking user terminates.
Pid6 = spawn_link(?MODULE, lserv, [n]),
@@ -2176,7 +2174,7 @@ close_block(Conf) when is_list(Conf) ->
1 = users(n),
ok = disk_log:close(n),
{error, no_such_log} = disk_log:info(n),
- true = (P0 == pps()),
+ check_pps(P0),
%% Blocking owner terminates.
Pid7 = spawn_link(?MODULE, lserv, [n]),
@@ -2194,7 +2192,7 @@ close_block(Conf) when is_list(Conf) ->
1 = users(n),
ok = disk_log:close(n),
{error, no_such_log} = disk_log:info(n),
- true = (P0 == pps()),
+ check_pps(P0),
%% Two owners, the blocking one terminates.
Pid8 = spawn_link(?MODULE, lserv, [n]),
@@ -2209,7 +2207,7 @@ close_block(Conf) when is_list(Conf) ->
0 = sync_do(Pid9, users),
sync_do(Pid9, terminate),
{error, no_such_log} = disk_log:info(n),
- true = (P0 == pps()),
+ check_pps(P0),
%% Blocking user closes.
Pid10 = spawn_link(?MODULE, lserv, [n]),
@@ -2227,7 +2225,7 @@ close_block(Conf) when is_list(Conf) ->
ok = disk_log:close(n),
sync_do(Pid10, terminate),
{error, no_such_log} = disk_log:info(n),
- true = (P0 == pps()),
+ check_pps(P0),
%% Blocking user unblocks and closes.
Pid11 = spawn_link(?MODULE, lserv, [n]),
@@ -2246,7 +2244,7 @@ close_block(Conf) when is_list(Conf) ->
ok = disk_log:close(n),
{error, no_such_log} = disk_log:info(n),
sync_do(Pid11, terminate),
- true = (P0 == pps()),
+ check_pps(P0),
%% Blocking owner closes.
Pid12 = spawn_link(?MODULE, lserv, [n]),
@@ -2265,7 +2263,7 @@ close_block(Conf) when is_list(Conf) ->
ok = disk_log:close(n),
{error, no_such_log} = disk_log:info(n),
sync_do(Pid12, terminate),
- true = (P0 == pps()),
+ check_pps(P0),
%% Blocking owner unblocks and closes.
Pid13 = spawn_link(?MODULE, lserv, [n]),
@@ -2285,7 +2283,7 @@ close_block(Conf) when is_list(Conf) ->
ok = disk_log:close(n),
{error, no_such_log} = disk_log:info(n),
sync_do(Pid13, terminate),
- true = (P0 == pps()),
+ check_pps(P0),
del(File, No), % cleanup
ok.
@@ -2489,10 +2487,11 @@ error_repair(Conf) when is_list(Conf) ->
P0 = pps(),
{error, {file_error, _, _}} =
disk_log:open([{name, n}, {file, File}, {type, wrap}, {size,{40,4}}]),
- true = (P0 == pps()),
+ check_pps(P0),
del(File, No),
ok = file:del_dir(Dir),
+ error_logger:add_report_handler(?MODULE, self()),
%% repair a file
P1 = pps(),
{ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
@@ -2507,8 +2506,10 @@ error_repair(Conf) when is_list(Conf) ->
disk_log:open([{name, n}, {file, File}, {type, wrap},
{format, internal}, {size, {40,No}}]),
ok = disk_log:close(n),
- true = (P1 == pps()),
+ check_pps(P1),
del(File, No),
+ receive {info_msg, _, "disk_log: repairing" ++ _, _} -> ok
+ after 1000 -> ct:fail(failed) end,
%% yet another repair
P2 = pps(),
@@ -2523,8 +2524,10 @@ error_repair(Conf) when is_list(Conf) ->
disk_log:open([{name, n}, {file, File}, {type, wrap},
{format, internal}, {size, {4000,No}}]),
ok = disk_log:close(n),
- true = (P2 == pps()),
+ check_pps(P2),
del(File, No),
+ receive {info_msg, _, "disk_log: repairing" ++ _, _} -> ok
+ after 1000 -> ct:fail(failed) end,
%% Repair, large term
Big = term_to_binary(lists:duplicate(66000,$a)),
@@ -2540,6 +2543,8 @@ error_repair(Conf) when is_list(Conf) ->
ok = disk_log:close(n),
Got = Big,
del(File, No),
+ receive {info_msg, _, "disk_log: repairing" ++ _, _} -> ok
+ after 1000 -> ct:fail(failed) end,
%% A term a little smaller than a chunk, then big terms.
BigSmall = mk_bytes(1024*64-8-12),
@@ -2560,6 +2565,8 @@ error_repair(Conf) when is_list(Conf) ->
{type, halt}, {format, internal}]),
ok = disk_log:close(n),
file:delete(File),
+ receive {info_msg, _, "disk_log: repairing" ++ _, _} -> ok
+ after 1000 -> ct:fail(failed) end,
%% The header is recovered.
{ok,n} =
@@ -2573,12 +2580,13 @@ error_repair(Conf) when is_list(Conf) ->
crash(File, 30),
{repaired,n,{recovered,3},{badbytes,16}} =
disk_log:open([{name, n}, {file, File}, {type, halt},
- {format, internal},{repair,true},
+ {format, internal},{repair,true}, {quiet, true},
{head_func, {?MODULE, head_fun, [{ok,"head"}]}}]),
["head",'of',terms] = get_all_terms(n),
ok = disk_log:close(n),
-
+ error_logger:delete_report_handler(?MODULE),
file:delete(File),
+ {messages, []} = process_info(self(), messages),
ok.
@@ -2625,7 +2633,7 @@ error_log(Conf) when is_list(Conf) ->
{ok, n} = disk_log:open([{name, n}, {file, File}, {type, wrap},
{format, external},{size, {100, No}}]),
{error, {file_error, _, _}} = disk_log:truncate(n),
- true = (P0 == pps()),
+ check_pps(P0),
del(File, No),
%% OTP-4880.
@@ -2633,7 +2641,7 @@ error_log(Conf) when is_list(Conf) ->
{ok, n} = disk_log:open([{name, n}, {file, File}, {type, halt},
{format, external},{size, 100000}]),
{error, {file_error, _, eisdir}} = disk_log:reopen(n, LDir),
- true = (P0 == pps()),
+ check_pps(P0),
file:delete(File),
B = mk_bytes(60),
@@ -2995,7 +3003,7 @@ error_index(Conf) when is_list(Conf) ->
{error, {invalid_index_file, _}} = disk_log:open(Args),
del(File, No),
- true = (P0 == pps()),
+ check_pps(P0),
true = (Q == qlen()),
ok.
@@ -4428,7 +4436,7 @@ dist_open2(Conf) when is_list(Conf) ->
timer:sleep(500),
file:delete(File),
- true = (P0 == pps()),
+ check_pps(P0),
%% This time the first process has a naughty head_func. This test
%% does not add very much. Perhaps it should be removed. However,
@@ -4474,7 +4482,7 @@ dist_open2(Conf) when is_list(Conf) ->
timer:sleep(100),
{error, no_such_log} = disk_log:close(Log),
file:delete(File),
- true = (P0 == pps()),
+ check_pps(P0),
No = 2,
Log2 = n2,
@@ -4503,7 +4511,7 @@ dist_open2(Conf) when is_list(Conf) ->
file:delete(File2),
del(File, No),
- true = (P0 == pps()),
+ check_pps(P0),
R.
@@ -4548,7 +4556,7 @@ dist_open2_1(Conf, Delay) ->
{error, no_such_log} = disk_log:info(Log),
file:delete(File),
- true = (P0 == pps()),
+ check_pps(P0),
ok.
@@ -4605,7 +4613,7 @@ dist_open2_2(Conf, Delay) ->
{[{Node1,{repaired,_,_,_}}],[]}} -> ok
end,
- true = (P0 == pps()),
+ check_pps(P0),
stop_node(Node1),
file:delete(File),
ok.
@@ -4666,119 +4674,6 @@ other_groups(Conf) when is_list(Conf) ->
ok.
--define(MAX, 16384). % MAX in disk_log_1.erl
-%% Evil cases such as closed file descriptor port.
-evil(Conf) when is_list(Conf) ->
- Dir = ?privdir(Conf),
- File = filename:join(Dir, "n.LOG"),
- Log = n,
-
- %% Not a very thorough test.
-
- ok = setup_evil_filled_cache_wrap(Log, Dir),
- {error, {file_error,_,einval}} = disk_log:log(Log, apa),
- ok = disk_log:close(Log),
-
- ok = setup_evil_filled_cache_halt(Log, Dir),
- {error, {file_error,_,einval}} = disk_log:truncate(Log, apa),
- ok = stop_evil(Log),
-
- %% White box test.
- file:delete(File),
- Ports0 = erlang:ports(),
- {ok, Log} = disk_log:open([{name,Log},{file,File},{type,halt},
- {size,?MAX+50},{format,external}]),
- [Fd] = erlang:ports() -- Ports0,
- {B,_} = x_mk_bytes(30),
- ok = disk_log:blog(Log, <<0:(?MAX+1)/unit:8>>),
- exit(Fd, kill),
- {error, {file_error,_,einval}} = disk_log:blog_terms(Log, [B,B]),
- ok= disk_log:close(Log),
- file:delete(File),
-
- ok = setup_evil_wrap(Log, Dir),
- {error, {file_error,_,einval}} = disk_log:close(Log),
-
- ok = setup_evil_wrap(Log, Dir),
- {error, {file_error,_,einval}} = disk_log:log(Log, apa),
- ok = stop_evil(Log),
-
- ok = setup_evil_halt(Log, Dir),
- {error, {file_error,_,einval}} = disk_log:log(Log, apa),
- ok = stop_evil(Log),
-
- ok = setup_evil_wrap(Log, Dir),
- {error, {file_error,_,einval}} = disk_log:reopen(Log, apa),
- {error, {file_error,_,einval}} = disk_log:reopen(Log, apa),
- ok = stop_evil(Log),
-
- ok = setup_evil_wrap(Log, Dir),
- {error, {file_error,_,einval}} = disk_log:reopen(Log, apa),
- ok = stop_evil(Log),
-
- ok = setup_evil_wrap(Log, Dir),
- {error, {file_error,_,einval}} = disk_log:inc_wrap_file(Log),
- ok = stop_evil(Log),
-
- ok = setup_evil_wrap(Log, Dir),
- {error, {file_error,_,einval}} = disk_log:chunk(Log, start),
- ok = stop_evil(Log),
-
- ok = setup_evil_wrap(Log, Dir),
- {error, {file_error,_,einval}} = disk_log:truncate(Log),
- ok = stop_evil(Log),
-
- ok = setup_evil_wrap(Log, Dir),
- {error, {file_error,_,einval}} = disk_log:chunk_step(Log, start, 1),
- ok = stop_evil(Log),
-
- io:format("messages: ~p~n", [erlang:process_info(self(), messages)]),
- del(File, 2),
- file:delete(File),
- ok.
-
-setup_evil_wrap(Log, Dir) ->
- setup_evil(Log, [{type,wrap},{size,{100,2}}], Dir).
-
-setup_evil_halt(Log, Dir) ->
- setup_evil(Log, [{type,halt},{size,10000}], Dir).
-
-setup_evil(Log, Args, Dir) ->
- File = filename:join(Dir, lists:concat([Log, ".LOG"])),
- file:delete(File),
- del(File, 2),
- ok = disk_log:start(),
- Ports0 = erlang:ports(),
- {ok, Log} = disk_log:open([{name,Log},{file,File} | Args]),
- [Fd] = erlang:ports() -- Ports0,
- exit(Fd, kill),
- ok = disk_log:log_terms(n, [<<0:10/unit:8>>]),
- timer:sleep(2500), % TIMEOUT in disk_log_1.erl is 2000
- ok.
-
-stop_evil(Log) ->
- {error, _} = disk_log:close(Log),
- ok.
-
-setup_evil_filled_cache_wrap(Log, Dir) ->
- setup_evil_filled_cache(Log, [{type,wrap},{size,{?MAX,2}}], Dir).
-
-setup_evil_filled_cache_halt(Log, Dir) ->
- setup_evil_filled_cache(Log, [{type,halt},{size,infinity}], Dir).
-
-%% The cache is filled, and the file descriptor port gone.
-setup_evil_filled_cache(Log, Args, Dir) ->
- File = filename:join(Dir, lists:concat([Log, ".LOG"])),
- file:delete(File),
- del(File, 2),
- ok = disk_log:start(),
- Ports0 = erlang:ports(),
- {ok, Log} = disk_log:open([{name,Log},{file,File} | Args]),
- [Fd] = erlang:ports() -- Ports0,
- ok = disk_log:log_terms(n, [<<0:?MAX/unit:8>>]),
- exit(Fd, kill),
- ok.
-
%% OTP-6278. open/1 creates no status or crash report.
otp_6278(Conf) when is_list(Conf) ->
Dir = ?privdir(Conf),
@@ -4896,10 +4791,59 @@ log(Name, N) ->
format_error(E) ->
lists:flatten(disk_log:format_error(E)).
+check_pps({Ports0,Procs0} = P0) ->
+ case pps() of
+ P0 ->
+ ok;
+ _ ->
+ timer:sleep(500),
+ case pps() of
+ P0 ->
+ ok;
+ {Ports1,Procs1} = P1 ->
+ case {Ports1 -- Ports0, Procs1 -- Procs0} of
+ {[], []} -> ok;
+ {PortsDiff,ProcsDiff} ->
+ io:format("failure, got ~p~n, expected ~p\n", [P1, P0]),
+ show("Old port", Ports0 -- Ports1),
+ show("New port", PortsDiff),
+ show("Old proc", Procs0 -- Procs1),
+ show("New proc", ProcsDiff),
+ ct:fail(failed)
+ end
+ end
+ end.
+
+show(_S, []) ->
+ ok;
+show(S, [{Pid, Name, InitCall}|Pids]) when is_pid(Pid) ->
+ io:format("~s: ~w (~w), ~w: ~p~n",
+ [S, Pid, proc_reg_name(Name), InitCall,
+ erlang:process_info(Pid)]),
+ show(S, Pids);
+show(S, [{Port, _}|Ports]) when is_port(Port)->
+ io:format("~s: ~w: ~p~n", [S, Port, erlang:port_info(Port)]),
+ show(S, Ports).
+
pps() ->
timer:sleep(100),
- {erlang:ports(), lists:filter(fun(P) -> erlang:is_process_alive(P) end,
- processes())}.
+ {port_list(), process_list()}.
+
+port_list() ->
+ [{P,safe_second_element(erlang:port_info(P, name))} ||
+ P <- erlang:ports()].
+
+process_list() ->
+ [{P,process_info(P, registered_name),
+ safe_second_element(process_info(P, initial_call))} ||
+ P <- processes(), erlang:is_process_alive(P)].
+
+proc_reg_name({registered_name, Name}) -> Name;
+proc_reg_name([]) -> no_reg_name.
+
+safe_second_element({_,Info}) -> Info;
+safe_second_element(Other) -> Other.
+
qlen() ->
{_, {_, N}} = lists:keysearch(message_queue_len, 1, process_info(self())),
@@ -5007,6 +4951,9 @@ init(Tester) ->
handle_event({error_report, _GL, {Pid, crash_report, Report}}, Tester) ->
Tester ! {crash_report, Pid, Report},
{ok, Tester};
+handle_event({info_msg, _GL, {Pid, F,A}}, Tester) ->
+ Tester ! {info_msg, Pid, F, A},
+ {ok, Tester};
handle_event(_Event, State) ->
{ok, State}.
diff --git a/lib/kernel/test/erl_distribution_SUITE.erl b/lib/kernel/test/erl_distribution_SUITE.erl
index e43be77428..c3a022df0a 100644
--- a/lib/kernel/test/erl_distribution_SUITE.erl
+++ b/lib/kernel/test/erl_distribution_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -24,7 +24,10 @@
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2]).
--export([tick/1, tick_change/1, illegal_nodenames/1, hidden_node/1,
+-export([tick/1, tick_change/1,
+ connect_node/1,
+ nodenames/1, hostnames/1,
+ illegal_nodenames/1, hidden_node/1,
setopts/1,
table_waste/1, net_setuptime/1,
inet_dist_options_options/1,
@@ -37,7 +40,8 @@
monitor_nodes_errors/1,
monitor_nodes_combinations/1,
monitor_nodes_cleanup/1,
- monitor_nodes_many/1]).
+ monitor_nodes_many/1,
+ dist_ctrl_proc_smoke/1]).
%% Performs the test at another node.
-export([get_socket_priorities/0,
@@ -49,11 +53,10 @@
-export([init_per_testcase/2, end_per_testcase/2]).
--export([start_node/2]).
+-export([dist_cntrlr_output_test/2]).
-export([pinger/1]).
-
-define(DUMMY_NODE,dummy@test01).
%%-----------------------------------------------------------------
@@ -65,11 +68,13 @@
suite() ->
[{ct_hooks,[ts_install_cth]},
- {timetrap,{minutes,4}}].
+ {timetrap,{minutes,12}}].
all() ->
- [tick, tick_change, illegal_nodenames, hidden_node,
- setopts,
+ [dist_ctrl_proc_smoke,
+ tick, tick_change, nodenames, hostnames, illegal_nodenames,
+ connect_node,
+ hidden_node, setopts,
table_waste, net_setuptime, inet_dist_options_options,
{group, monitor_nodes}].
@@ -86,6 +91,7 @@ init_per_suite(Config) ->
Config.
end_per_suite(_Config) ->
+ [slave:stop(N) || N <- nodes()],
ok.
init_per_group(_GroupName, Config) ->
@@ -94,18 +100,30 @@ init_per_group(_GroupName, Config) ->
end_per_group(_GroupName, Config) ->
Config.
-
+init_per_testcase(TC, Config) when TC == hostnames;
+ TC == nodenames ->
+ file:make_dir("hostnames_nodedir"),
+ file:write_file("hostnames_nodedir/ignore_core_files",""),
+ Config;
init_per_testcase(Func, Config) when is_atom(Func), is_list(Config) ->
Config.
end_per_testcase(_Func, _Config) ->
ok.
+connect_node(Config) when is_list(Config) ->
+ Connected = nodes(connected),
+ true = net_kernel:connect_node(node()),
+ Connected = nodes(connected),
+ ok.
+
tick(Config) when is_list(Config) ->
- PaDir = filename:dirname(code:which(erl_distribution_SUITE)),
+ run_dist_configs(fun tick/2, Config).
+tick(DCfg, _Config) ->
%% First check that the normal case is OK!
- {ok, Node} = start_node(dist_test, "-pa " ++ PaDir),
+ [Name1, Name2] = get_nodenames(2, dist_test),
+ {ok, Node} = start_node(DCfg, Name1),
rpc:call(Node, erl_distribution_SUITE, tick_cli_test, [node()]),
erlang:monitor_node(Node, true),
@@ -129,14 +147,12 @@ tick(Config) when is_list(Config) ->
%% Set the ticktime on the server node to 100 secs so the server
%% node doesn't tick the client node within the interval ...
- {ok, ServNode} = start_node(dist_test_server,
- "-kernel net_ticktime 100 "
- "-pa " ++ PaDir),
+ {ok, ServNode} = start_node(DCfg, Name2,
+ "-kernel net_ticktime 100"),
rpc:call(ServNode, erl_distribution_SUITE, tick_serv_test, [Node, node()]),
- {ok, _} = start_node(dist_test,
- "-kernel net_ticktime 12 "
- "-pa " ++ PaDir),
+ {ok, Node} = start_node(DCfg, Name1,
+ "-kernel net_ticktime 12"),
rpc:call(Node, erl_distribution_SUITE, tick_cli_test, [ServNode]),
spawn_link(erl_distribution_SUITE, keep_conn, [Node]),
@@ -166,6 +182,9 @@ tick(Config) when is_list(Config) ->
%% Checks that pinging nonexistyent nodes does not waste space in distribution table.
table_waste(Config) when is_list(Config) ->
+ run_dist_configs(fun table_waste/2, Config).
+
+table_waste(DCfg, _Config) ->
{ok, HName} = inet:gethostname(),
F = fun(0,_F) -> [];
(N,F) ->
@@ -175,21 +194,128 @@ table_waste(Config) when is_list(Config) ->
F(N-1,F)
end,
F(256,F),
- {ok, N} = start_node(erl_distribution_300,""),
+ {ok, N} = start_node(DCfg, erl_distribution_300),
stop_node(N),
ok.
+%% Test that starting nodes with different legal name part works, and that illegal
+%% ones are filtered
+nodenames(Config) when is_list(Config) ->
+ legal("a1@b"),
+ legal("a-1@b"),
+ legal("a_1@b"),
+
+ %% Test that giving two -sname works as it should
+ test_node("a_1@b", false, long_or_short() ++ "a_0@b"),
+
+ illegal("cdé@a"),
+ illegal("te欢st@a").
+
+%% Test that starting nodes with different legal host part works, and that illegal
+%% ones are filtered
+hostnames(Config) when is_list(Config) ->
+ Host = gethostname(),
+ legal([$a,$@|atom_to_list(Host)]),
+ legal("1@b1"),
+ legal("b@b1-c"),
+ legal("c@b1_c"),
+ legal("d@b1#c"),
+ legal("f@::1"),
+ legal("g@1:bc3:4e3f:f20:0:1"),
+
+ case file:native_name_encoding() of
+ latin1 -> ignore;
+ _ -> legal("e@b1é")
+ end,
+ long_hostnames(net_kernel:longnames()),
+
+ illegal("h@testالع"),
+ illegal("i@языtest"),
+ illegal("j@te欢st").
+
+long_hostnames(true) ->
+ legal("[email protected]"),
+ legal("[email protected]"),
+ legal("[email protected]_c.d"),
+ legal("[email protected]"),
+ legal("[email protected]");
+long_hostnames(false) ->
+ illegal("[email protected]").
+
+legal(Name) ->
+ case test_node(Name) of
+ started ->
+ ok;
+ not_started ->
+ ct:fail("no ~p node started", [Name])
+ end.
+
+illegal(Name) ->
+ case test_node(Name, true) of
+ not_started ->
+ ok;
+ started ->
+ ct:fail("~p node started with illegal name", [Name])
+ end.
+test_node(Name) ->
+ test_node(Name, false).
+test_node(Name, Illigal) ->
+ test_node(Name, Illigal, "").
+test_node(Name, Illigal, ExtraArgs) ->
+ ProgName = ct:get_progname(),
+ Command = ProgName ++ " -noinput " ++ ExtraArgs ++
+ long_or_short() ++ Name ++
+ " -eval \"net_adm:ping('" ++ atom_to_list(node()) ++ "')\"" ++
+ case Illigal of
+ true ->
+ " -eval \"timer:sleep(10000),init:stop().\"";
+ false ->
+ ""
+ end,
+ net_kernel:monitor_nodes(true),
+ BinCommand = unicode:characters_to_binary(Command, utf8),
+ Prt = open_port({spawn, BinCommand}, [stream,{cd,"hostnames_nodedir"}]),
+ Node = list_to_atom(Name),
+ receive
+ {nodeup, Node} ->
+ net_kernel:monitor_nodes(false),
+ slave:stop(Node),
+ started
+ after 5000 ->
+ net_kernel:monitor_nodes(false),
+ not_started
+ end.
+
+long_or_short() ->
+ case net_kernel:longnames() of
+ true -> " -name ";
+ false -> " -sname "
+ end.
+
+% get the localhost's name, depending on the using name policy
+gethostname() ->
+ Hostname = case net_kernel:longnames() of
+ true->
+ net_adm:localhost();
+ _->
+ {ok, Name}=inet:gethostname(),
+ Name
+ end,
+ list_to_atom(Hostname).
%% Test that pinging an illegal nodename does not kill the node.
illegal_nodenames(Config) when is_list(Config) ->
- PaDir = filename:dirname(code:which(erl_distribution_SUITE)),
- {ok, Node}=start_node(illegal_nodenames, "-pa " ++ PaDir),
+ run_dist_configs(fun illegal_nodenames/2, Config).
+
+illegal_nodenames(DCfg, _Config) ->
+ {ok, Node}=start_node(DCfg, illegal_nodenames),
monitor_node(Node, true),
RPid=rpc:call(Node, erlang, spawn,
[?MODULE, pinger, [self()]]),
receive
{RPid, pinged} ->
+ monitor_node(Node, false),
ok;
{nodedown, Node} ->
ct:fail("Remote node died.")
@@ -206,22 +332,25 @@ pinger(Starter) ->
%% Test that you can set the net_setuptime properly.
net_setuptime(Config) when is_list(Config) ->
+ run_dist_configs(fun net_setuptime/2, Config).
+
+net_setuptime(DCfg, _Config) ->
+
%% In this test case, we reluctantly accept shorter times than the given
%% setup time, because the connection attempt can end in a
%% "Host unreachable" error before the timeout fires.
- Res0 = do_test_setuptime("2"),
+ Res0 = do_test_setuptime(DCfg, "2"),
io:format("Res0 = ~p", [Res0]),
true = (Res0 =< 4000),
- Res1 = do_test_setuptime("0.3"),
+ Res1 = do_test_setuptime(DCfg, "0.3"),
io:format("Res1 = ~p", [Res1]),
true = (Res1 =< 500),
ok.
-do_test_setuptime(Setuptime) when is_list(Setuptime) ->
- PaDir = filename:dirname(code:which(?MODULE)),
- {ok, Node} = start_node(dist_setuptime_test, "-pa " ++ PaDir ++
- " -kernel net_setuptime " ++ Setuptime),
+do_test_setuptime(DCfg, Setuptime) when is_list(Setuptime) ->
+ {ok, Node} = start_node(DCfg, dist_setuptime_test,
+ "-kernel net_setuptime " ++ Setuptime),
Res = rpc:call(Node,?MODULE,time_ping,[?DUMMY_NODE]),
stop_node(Node),
Res.
@@ -230,10 +359,10 @@ time_ping(Node) ->
T0 = erlang:monotonic_time(),
pang = net_adm:ping(Node),
T1 = erlang:monotonic_time(),
- erlang:convert_time_unit(T1 - T0, native, milli_seconds).
+ erlang:convert_time_unit(T1 - T0, native, millisecond).
%% Keep the connection with the client node up.
-%% This is neccessary as the client node runs with much shorter
+%% This is necessary as the client node runs with much shorter
%% tick time !!
keep_conn(Node) ->
sleep(1),
@@ -274,7 +403,7 @@ tick_cli_test1(Node) ->
receive
{whats_the_result, From} ->
Diff = erlang:convert_time_unit(T2-T1, native,
- milli_seconds),
+ millisecond),
case Diff of
T when T > 8000, T < 16000 ->
From ! {tick_test, T};
@@ -287,32 +416,36 @@ tick_cli_test1(Node) ->
end.
setopts(Config) when is_list(Config) ->
+ run_dist_configs(fun setopts/2, Config).
+
+setopts(DCfg, _Config) ->
register(setopts_regname, self()),
[N1,N2,N3,N4] = get_nodenames(4, setopts),
- {_N1F,Port1} = start_node_unconnected(N1, ?MODULE, run_remote_test,
+ {_N1F,Port1} = start_node_unconnected(DCfg, N1, ?MODULE, run_remote_test,
["setopts_do", atom_to_list(node()), "1", "ping"]),
0 = wait_for_port_exit(Port1),
- {_N2F,Port2} = start_node_unconnected(N2, ?MODULE, run_remote_test,
+ {_N2F,Port2} = start_node_unconnected(DCfg, N2, ?MODULE, run_remote_test,
["setopts_do", atom_to_list(node()), "2", "ping"]),
0 = wait_for_port_exit(Port2),
{ok, LSock} = gen_tcp:listen(0, [{packet,2}, {active,false}]),
{ok, LTcpPort} = inet:port(LSock),
- {N3F,Port3} = start_node_unconnected(N3, ?MODULE, run_remote_test,
+ {N3F,Port3} = start_node_unconnected(DCfg, N3, ?MODULE, run_remote_test,
["setopts_do", atom_to_list(node()),
"1", integer_to_list(LTcpPort)]),
wait_and_connect(LSock, N3F, Port3),
0 = wait_for_port_exit(Port3),
- {N4F,Port4} = start_node_unconnected(N4, ?MODULE, run_remote_test,
+ {N4F,Port4} = start_node_unconnected(DCfg, N4, ?MODULE, run_remote_test,
["setopts_do", atom_to_list(node()),
"2", integer_to_list(LTcpPort)]),
wait_and_connect(LSock, N4F, Port4),
0 = wait_for_port_exit(Port4),
+ unregister(setopts_regname),
ok.
wait_and_connect(LSock, NodeName, NodePort) ->
@@ -360,9 +493,9 @@ run_remote_test([FuncStr, TestNodeStr | Args]) ->
1
end
catch
- C:E ->
+ C:E:S ->
io:format("Node ~p got EXCEPTION ~p:~p\nat ~p\n",
- [node(), C, E, erlang:get_stacktrace()]),
+ [node(), C, E, S]),
2
end,
io:format("Node ~p doing halt(~p).\n",[node(), Status]),
@@ -406,9 +539,9 @@ opt_from_nr("2") -> {nodelay, false}.
change_val(true) -> false;
change_val(false) -> true.
-start_node_unconnected(Name, Mod, Func, Args) ->
+start_node_unconnected(DCfg, Name, Mod, Func, Args) ->
FullName = full_node_name(Name),
- CmdLine = mk_node_cmdline(Name,Mod,Func,Args),
+ CmdLine = mk_node_cmdline(DCfg, Name,Mod,Func,Args),
io:format("Starting node ~p: ~s~n", [FullName, CmdLine]),
case open_port({spawn, CmdLine}, [exit_status]) of
Port when is_port(Port) ->
@@ -422,7 +555,7 @@ full_node_name(PreName) ->
atom_to_list(node())),
list_to_atom(atom_to_list(PreName) ++ HostSuffix).
-mk_node_cmdline(Name,Mod,Func,Args) ->
+mk_node_cmdline(DCfg, Name,Mod,Func,Args) ->
Static = "-noinput",
Pa = filename:dirname(code:which(?MODULE)),
Prog = case catch init:get_argument(progname) of
@@ -439,6 +572,7 @@ mk_node_cmdline(Name,Mod,Func,Args) ->
Prog ++ " "
++ Static ++ " "
++ NameSw ++ " " ++ NameStr
+ ++ " " ++ DCfg
++ " -pa " ++ Pa
++ " -env ERL_CRASH_DUMP " ++ Pwd ++ "/erl_crash_dump." ++ NameStr
++ " -setcookie " ++ atom_to_list(erlang:get_cookie())
@@ -448,7 +582,9 @@ mk_node_cmdline(Name,Mod,Func,Args) ->
%% OTP-4255.
tick_change(Config) when is_list(Config) ->
- PaDir = filename:dirname(code:which(?MODULE)),
+ run_dist_configs(fun tick_change/2, Config).
+
+tick_change(DCfg, _Config) ->
[BN, CN] = get_nodenames(2, tick_change),
DefaultTT = net_kernel:get_net_ticktime(),
unchanged = net_kernel:set_net_ticktime(DefaultTT, 60),
@@ -465,14 +601,13 @@ tick_change(Config) when is_list(Config) ->
end,
wait_until(fun () -> 10 == net_kernel:get_net_ticktime() end),
- {ok, B} = start_node(BN, "-kernel net_ticktime 10 -pa " ++ PaDir),
- {ok, C} = start_node(CN, "-kernel net_ticktime 10 -hidden -pa "
- ++ PaDir),
+ {ok, B} = start_node(DCfg, BN, "-kernel net_ticktime 10"),
+ {ok, C} = start_node(DCfg, CN, "-kernel net_ticktime 10 -hidden"),
OTE = process_flag(trap_exit, true),
case catch begin
- run_tick_change_test(B, C, 10, 1, PaDir),
- run_tick_change_test(B, C, 1, 10, PaDir)
+ run_tick_change_test(DCfg, B, C, 10, 1),
+ run_tick_change_test(DCfg, B, C, 1, 10)
end of
{'EXIT', Reason} ->
stop_node(B),
@@ -514,7 +649,7 @@ wait_for_nodedowns(Tester, Ref) ->
end,
wait_for_nodedowns(Tester, Ref).
-run_tick_change_test(B, C, PrevTT, TT, PaDir) ->
+run_tick_change_test(DCfg, B, C, PrevTT, TT) ->
[DN, EN] = get_nodenames(2, tick_change),
Tester = self(),
@@ -528,8 +663,8 @@ run_tick_change_test(B, C, PrevTT, TT, PaDir) ->
wait_for_nodedowns(Tester, Ref)
end,
- {ok, D} = start_node(DN, "-kernel net_ticktime "
- ++ integer_to_list(PrevTT) ++ " -pa " ++ PaDir),
+ {ok, D} = start_node(DCfg, DN, "-kernel net_ticktime "
+ ++ integer_to_list(PrevTT)),
NMA = spawn_link(fun () -> MonitorNodes([B, C, D]) end),
NMB = spawn_link(B, fun () -> MonitorNodes([node(), C, D]) end),
@@ -562,8 +697,8 @@ run_tick_change_test(B, C, PrevTT, TT, PaDir) ->
sleep(7),
change_initiated = rpc:call(C,net_kernel,set_net_ticktime,[TT,10]),
- {ok, E} = start_node(EN, "-kernel net_ticktime "
- ++ integer_to_list(TT) ++ " -pa " ++ PaDir),
+ {ok, E} = start_node(DCfg, EN, "-kernel net_ticktime "
+ ++ integer_to_list(TT)),
NME = spawn_link(E, fun () -> MonitorNodes([node(), B, C, D]) end),
NMA2 = spawn_link(fun () -> MonitorNodes([E]) end),
NMB2 = spawn_link(B, fun () -> MonitorNodes([E]) end),
@@ -623,12 +758,13 @@ run_tick_change_test(B, C, PrevTT, TT, PaDir) ->
%%
%% Basic test of hidden node.
hidden_node(Config) when is_list(Config) ->
- PaDir = filename:dirname(code:which(?MODULE)),
- VArgs = "-pa " ++ PaDir,
- HArgs = "-hidden -pa " ++ PaDir,
- {ok, V} = start_node(visible_node, VArgs),
+ run_dist_configs(fun hidden_node/2, Config).
+
+hidden_node(DCfg, _Config) ->
+ HArgs = "-hidden",
+ {ok, V} = start_node(DCfg, visible_node),
VMN = start_monitor_nodes_proc(V),
- {ok, H} = start_node(hidden_node, HArgs),
+ {ok, H} = start_node(DCfg, hidden_node, HArgs),
%% Connect visible_node -> hidden_node
connect_nodes(V, H),
test_nodes(V, H),
@@ -636,9 +772,9 @@ hidden_node(Config) when is_list(Config) ->
sleep(5),
check_monitor_nodes_res(VMN, H),
stop_node(V),
- {ok, H} = start_node(hidden_node, HArgs),
+ {ok, H} = start_node(DCfg, hidden_node, HArgs),
HMN = start_monitor_nodes_proc(H),
- {ok, V} = start_node(visible_node, VArgs),
+ {ok, V} = start_node(DCfg, visible_node),
%% Connect hidden_node -> visible_node
connect_nodes(H, V),
test_nodes(V, H),
@@ -738,9 +874,9 @@ do_inet_dist_options_options(Prio) ->
"-kernel inet_dist_connect_options "++PriorityString++" "
"-kernel inet_dist_listen_options "++PriorityString,
{ok,Node1} =
- start_node(inet_dist_options_1, InetDistOptions),
+ start_node("", inet_dist_options_1, InetDistOptions),
{ok,Node2} =
- start_node(inet_dist_options_2, InetDistOptions),
+ start_node("", inet_dist_options_2, InetDistOptions),
%%
pong =
rpc:call(Node1, net_adm, ping, [Node2]),
@@ -773,6 +909,9 @@ get_socket_priorities() ->
%%
monitor_nodes_nodedown_reason(Config) when is_list(Config) ->
+ run_dist_configs(fun monitor_nodes_nodedown_reason/2, Config).
+
+monitor_nodes_nodedown_reason(DCfg, _Config) ->
MonNodeState = monitor_node_state(),
ok = net_kernel:monitor_nodes(true),
ok = net_kernel:monitor_nodes(true, [nodedown_reason]),
@@ -780,10 +919,10 @@ monitor_nodes_nodedown_reason(Config) when is_list(Config) ->
Names = get_numbered_nodenames(5, node),
[NN1, NN2, NN3, NN4, NN5] = Names,
- {ok, N1} = start_node(NN1),
- {ok, N2} = start_node(NN2),
- {ok, N3} = start_node(NN3),
- {ok, N4} = start_node(NN4, "-hidden"),
+ {ok, N1} = start_node(DCfg, NN1),
+ {ok, N2} = start_node(DCfg, NN2),
+ {ok, N3} = start_node(DCfg, NN3),
+ {ok, N4} = start_node(DCfg, NN4, "-hidden"),
receive {nodeup, N1} -> ok end,
receive {nodeup, N2} -> ok end,
@@ -813,7 +952,7 @@ monitor_nodes_nodedown_reason(Config) when is_list(Config) ->
ok = net_kernel:monitor_nodes(false, [nodedown_reason]),
- {ok, N5} = start_node(NN5),
+ {ok, N5} = start_node(DCfg, NN5),
stop_node(N5),
receive {nodeup, N5} -> ok end,
@@ -826,11 +965,14 @@ monitor_nodes_nodedown_reason(Config) when is_list(Config) ->
monitor_nodes_complex_nodedown_reason(Config) when is_list(Config) ->
+ run_dist_configs(fun monitor_nodes_complex_nodedown_reason/2, Config).
+
+monitor_nodes_complex_nodedown_reason(DCfg, _Config) ->
MonNodeState = monitor_node_state(),
Me = self(),
ok = net_kernel:monitor_nodes(true, [nodedown_reason]),
[Name] = get_nodenames(1, monitor_nodes_complex_nodedown_reason),
- {ok, Node} = start_node(Name, ""),
+ {ok, Node} = start_node(DCfg, Name, ""),
Pid = spawn(Node,
fun() ->
Me ! {stuff,
@@ -869,16 +1011,19 @@ monitor_nodes_complex_nodedown_reason(Config) when is_list(Config) ->
%%
monitor_nodes_node_type(Config) when is_list(Config) ->
+ run_dist_configs(fun monitor_nodes_node_type/2, Config).
+
+monitor_nodes_node_type(DCfg, _Config) ->
MonNodeState = monitor_node_state(),
ok = net_kernel:monitor_nodes(true),
ok = net_kernel:monitor_nodes(true, [{node_type, all}]),
Names = get_numbered_nodenames(9, node),
[NN1, NN2, NN3, NN4, NN5, NN6, NN7, NN8, NN9] = Names,
- {ok, N1} = start_node(NN1),
- {ok, N2} = start_node(NN2),
- {ok, N3} = start_node(NN3, "-hidden"),
- {ok, N4} = start_node(NN4, "-hidden"),
+ {ok, N1} = start_node(DCfg, NN1),
+ {ok, N2} = start_node(DCfg, NN2),
+ {ok, N3} = start_node(DCfg, NN3, "-hidden"),
+ {ok, N4} = start_node(DCfg, NN4, "-hidden"),
receive {nodeup, N1} -> ok end,
receive {nodeup, N2} -> ok end,
@@ -902,15 +1047,15 @@ monitor_nodes_node_type(Config) when is_list(Config) ->
receive {nodedown, N4, [{node_type, hidden}]} -> ok end,
ok = net_kernel:monitor_nodes(false, [{node_type, all}]),
- {ok, N5} = start_node(NN5),
+ {ok, N5} = start_node(DCfg, NN5),
receive {nodeup, N5} -> ok end,
stop_node(N5),
receive {nodedown, N5} -> ok end,
ok = net_kernel:monitor_nodes(true, [{node_type, hidden}]),
- {ok, N6} = start_node(NN6),
- {ok, N7} = start_node(NN7, "-hidden"),
+ {ok, N6} = start_node(DCfg, NN6),
+ {ok, N7} = start_node(DCfg, NN7, "-hidden"),
receive {nodeup, N6} -> ok end,
@@ -925,8 +1070,8 @@ monitor_nodes_node_type(Config) when is_list(Config) ->
ok = net_kernel:monitor_nodes(false, [{node_type, hidden}]),
ok = net_kernel:monitor_nodes(false),
- {ok, N8} = start_node(NN8),
- {ok, N9} = start_node(NN9, "-hidden"),
+ {ok, N8} = start_node(DCfg, NN8),
+ {ok, N9} = start_node(DCfg, NN9, "-hidden"),
receive {nodeup, N8, [{node_type, visible}]} -> ok end,
stop_node(N8),
@@ -946,6 +1091,9 @@ monitor_nodes_node_type(Config) when is_list(Config) ->
%%
monitor_nodes_misc(Config) when is_list(Config) ->
+ run_dist_configs(fun monitor_nodes_misc/2, Config).
+
+monitor_nodes_misc(DCfg, _Config) ->
MonNodeState = monitor_node_state(),
ok = net_kernel:monitor_nodes(true),
ok = net_kernel:monitor_nodes(true, [{node_type, all}, nodedown_reason]),
@@ -953,8 +1101,8 @@ monitor_nodes_misc(Config) when is_list(Config) ->
Names = get_numbered_nodenames(3, node),
[NN1, NN2, NN3] = Names,
- {ok, N1} = start_node(NN1),
- {ok, N2} = start_node(NN2, "-hidden"),
+ {ok, N1} = start_node(DCfg, NN1),
+ {ok, N2} = start_node(DCfg, NN2, "-hidden"),
receive {nodeup, N1} -> ok end,
@@ -980,7 +1128,7 @@ monitor_nodes_misc(Config) when is_list(Config) ->
ok = net_kernel:monitor_nodes(false, [{node_type, all}, nodedown_reason]),
- {ok, N3} = start_node(NN3),
+ {ok, N3} = start_node(DCfg, NN3),
receive {nodeup, N3} -> ok end,
stop_node(N3),
receive {nodedown, N3} -> ok end,
@@ -995,15 +1143,18 @@ monitor_nodes_misc(Config) when is_list(Config) ->
%% messages from Node and that {nodedown, Node} messages are
%% received after messages from Node.
monitor_nodes_otp_6481(Config) when is_list(Config) ->
+ run_dist_configs(fun monitor_nodes_otp_6481/2, Config).
+
+monitor_nodes_otp_6481(DCfg, Config) ->
io:format("Testing nodedown...~n"),
- monitor_nodes_otp_6481_test(Config, nodedown),
+ monitor_nodes_otp_6481_test(DCfg, Config, nodedown),
io:format("ok~n"),
io:format("Testing nodeup...~n"),
- monitor_nodes_otp_6481_test(Config, nodeup),
+ monitor_nodes_otp_6481_test(DCfg, Config, nodeup),
io:format("ok~n"),
ok.
-monitor_nodes_otp_6481_test(Config, TestType) when is_list(Config) ->
+monitor_nodes_otp_6481_test(DCfg, Config, TestType) when is_list(Config) ->
MonNodeState = monitor_node_state(),
NodeMsg = make_ref(),
Me = self(),
@@ -1041,25 +1192,24 @@ monitor_nodes_otp_6481_test(Config, TestType) when is_list(Config) ->
TestMonNodeState = monitor_node_state(),
%% io:format("~p~n", [TestMonNodeState]),
TestMonNodeState =
- MonNodeState
+ case TestType of
+ nodedown -> [];
+ nodeup -> [{self(), []}]
+ end
+ ++ lists:map(fun (_) -> {MN, []} end, Seq)
++ case TestType of
nodedown -> [{self(), []}];
nodeup -> []
end
- ++ lists:map(fun (_) -> {MN, []} end, Seq)
- ++ case TestType of
- nodedown -> [];
- nodeup -> [{self(), []}]
- end,
+ ++ MonNodeState,
-
- {ok, Node} = start_node(Name, "", this),
+ {ok, Node} = start_node(DCfg, Name, "", this),
receive {nodeup, Node} -> ok end,
RemotePid = spawn(Node,
fun () ->
receive after 1500 -> ok end,
- %% infinit loop of msgs
+ %% infinite loop of msgs
%% we want an endless stream of messages and the kill
%% the node mercilessly.
%% We then want to ensure that the nodedown message arrives
@@ -1138,17 +1288,20 @@ monitor_nodes_errors(Config) when is_list(Config) ->
ok.
monitor_nodes_combinations(Config) when is_list(Config) ->
+ run_dist_configs(fun monitor_nodes_combinations/2, Config).
+
+monitor_nodes_combinations(DCfg, _Config) ->
MonNodeState = monitor_node_state(),
monitor_nodes_all_comb(true),
[VisibleName, HiddenName] = get_nodenames(2,
monitor_nodes_combinations),
- {ok, Visible} = start_node(VisibleName, ""),
+ {ok, Visible} = start_node(DCfg, VisibleName, ""),
receive_all_comb_nodeup_msgs(visible, Visible),
no_msgs(),
stop_node(Visible),
receive_all_comb_nodedown_msgs(visible, Visible, connection_closed),
no_msgs(),
- {ok, Hidden} = start_node(HiddenName, "-hidden"),
+ {ok, Hidden} = start_node(DCfg, HiddenName, "-hidden"),
receive_all_comb_nodeup_msgs(hidden, Hidden),
no_msgs(),
stop_node(Hidden),
@@ -1284,6 +1437,9 @@ monitor_nodes_cleanup(Config) when is_list(Config) ->
ok.
monitor_nodes_many(Config) when is_list(Config) ->
+ run_dist_configs(fun monitor_nodes_many/2, Config).
+
+monitor_nodes_many(DCfg, _Config) ->
MonNodeState = monitor_node_state(),
[Name] = get_nodenames(1, monitor_nodes_many),
%% We want to perform more than 2^16 net_kernel:monitor_nodes
@@ -1291,7 +1447,7 @@ monitor_nodes_many(Config) when is_list(Config) ->
No = (1 bsl 16) + 17,
repeat(fun () -> ok = net_kernel:monitor_nodes(true) end, No),
No = length(monitor_node_state()) - length(MonNodeState),
- {ok, Node} = start_node(Name),
+ {ok, Node} = start_node(DCfg, Name),
repeat(fun () -> receive {nodeup, Node} -> ok end end, No),
stop_node(Node),
repeat(fun () -> receive {nodedown, Node} -> ok end end, No),
@@ -1300,8 +1456,118 @@ monitor_nodes_many(Config) when is_list(Config) ->
MonNodeState = monitor_node_state(),
ok.
+dist_ctrl_proc_smoke(Config) when is_list(Config) ->
+ ThisNode = node(),
+ [Name1, Name2] = get_nodenames(2, dist_ctrl_proc_example_smoke),
+ GetSizeArg = " -gen_tcp_dist_output_loop "
+ ++ atom_to_list(?MODULE) ++ " "
+ ++ "dist_cntrlr_output_test",
+ {ok, Node1} = start_node("", Name1, "-proto_dist gen_tcp"),
+ {ok, Node2} = start_node("", Name2, "-proto_dist gen_tcp" ++ GetSizeArg),
+ pong = rpc:call(Node1, net_adm, ping, [Node2]),
+ NL1 = lists:sort([ThisNode, Node2]),
+ NL2 = lists:sort([ThisNode, Node1]),
+ NL1 = lists:sort(rpc:call(Node1, erlang, nodes, [])),
+ NL2 = lists:sort(rpc:call(Node2, erlang, nodes, [])),
+
+ %% Verify that we actually are executing the distribution
+ %% module we expect and also massage message passing over
+ %% it a bit...
+ Ps1 = rpc:call(Node1, erlang, processes, []),
+ try
+ lists:foreach(
+ fun (P) ->
+ case rpc:call(Node1, erlang, process_info, [P, current_stacktrace]) of
+ undefined ->
+ ok;
+ {current_stacktrace, StkTrace} ->
+ lists:foreach(fun ({gen_tcp_dist,
+ dist_cntrlr_output_loop,
+ 2, _}) ->
+ io:format("~p ~p~n", [P, StkTrace]),
+ throw(found_it);
+ (_) ->
+ ok
+ end, StkTrace)
+ end
+ end, Ps1),
+ exit({missing, dist_cntrlr_output_loop})
+ catch
+ throw:found_it -> ok
+ end,
+
+ Ps2 = rpc:call(Node2, erlang, processes, []),
+ try
+ lists:foreach(
+ fun (P) ->
+ case rpc:call(Node2, erlang, process_info, [P, current_stacktrace]) of
+ undefined ->
+ ok;
+ {current_stacktrace, StkTrace} ->
+ lists:foreach(fun ({erl_distribution_SUITE,
+ dist_cntrlr_output_loop,
+ 2, _}) ->
+ io:format("~p ~p~n", [P, StkTrace]),
+ throw(found_it);
+ (_) ->
+ ok
+ end, StkTrace)
+ end
+ end, Ps2),
+ exit({missing, dist_cntrlr_output_loop})
+ catch
+ throw:found_it -> ok
+ end,
+
+ stop_node(Node1),
+ stop_node(Node2),
+ ok.
+
%% Misc. functions
+run_dist_configs(Func, Config) ->
+ GetSizeArg = " -gen_tcp_dist_output_loop "
+ ++ atom_to_list(?MODULE) ++ " "
+ ++ "dist_cntrlr_output_test",
+ lists:map(fun ({DCfgName, DCfg}) ->
+ io:format("~n~n=== Running ~s configuration ===~n~n",
+ [DCfgName]),
+ Func(DCfg, Config)
+ end,
+ [{"default", ""},
+ {"gen_tcp_dist", "-proto_dist gen_tcp"},
+ {"gen_tcp_dist (get_size)", "-proto_dist gen_tcp" ++ GetSizeArg}]).
+
+dist_cntrlr_output_test(DHandle, Socket) ->
+ false = erlang:dist_ctrl_get_opt(DHandle, get_size),
+ false = erlang:dist_ctrl_set_opt(DHandle, get_size, true),
+ true = erlang:dist_ctrl_get_opt(DHandle, get_size),
+ true = erlang:dist_ctrl_set_opt(DHandle, get_size, false),
+ false = erlang:dist_ctrl_get_opt(DHandle, get_size),
+ false = erlang:dist_ctrl_set_opt(DHandle, get_size, true),
+ true = erlang:dist_ctrl_get_opt(DHandle, get_size),
+ dist_cntrlr_output_loop(DHandle, Socket).
+
+dist_cntrlr_send_data(DHandle, Socket) ->
+ case erlang:dist_ctrl_get_data(DHandle) of
+ none ->
+ erlang:dist_ctrl_get_data_notification(DHandle);
+ {Size, Data} ->
+ Size = erlang:iolist_size(Data),
+ ok = gen_tcp:send(Socket, Data),
+ dist_cntrlr_send_data(DHandle, Socket)
+ end.
+
+dist_cntrlr_output_loop(DHandle, Socket) ->
+ receive
+ dist_data ->
+ %% Outgoing data from this node...
+ dist_cntrlr_send_data(DHandle, Socket);
+ _ ->
+ ok %% Drop garbage message...
+ end,
+ dist_cntrlr_output_loop(DHandle, Socket).
+
monitor_node_state() ->
erts_debug:set_internal_state(available_internal_state, true),
MonitoringNodes = erts_debug:get_internal_state(monitoring_nodes),
@@ -1327,25 +1593,25 @@ print_my_messages() ->
sleep(T) -> receive after T * 1000 -> ok end.
-start_node(Name, Param, this) ->
+start_node(DCfg, Name, Param, this) ->
NewParam = Param ++ " -pa " ++ filename:dirname(code:which(?MODULE)),
test_server:start_node(Name, peer, [{args, NewParam}, {erl, [this]}]);
-start_node(Name, Param, "this") ->
- NewParam = Param ++ " -pa " ++ filename:dirname(code:which(?MODULE)),
+start_node(DCfg, Name, Param, "this") ->
+ NewParam = Param ++ " -pa " ++ filename:dirname(code:which(?MODULE)) ++ " " ++ DCfg,
test_server:start_node(Name, peer, [{args, NewParam}, {erl, [this]}]);
-start_node(Name, Param, Rel) when is_atom(Rel) ->
- NewParam = Param ++ " -pa " ++ filename:dirname(code:which(?MODULE)),
+start_node(DCfg, Name, Param, Rel) when is_atom(Rel) ->
+ NewParam = Param ++ " -pa " ++ filename:dirname(code:which(?MODULE)) ++ " " ++ DCfg,
test_server:start_node(Name, peer, [{args, NewParam}, {erl, [{release, atom_to_list(Rel)}]}]);
-start_node(Name, Param, Rel) when is_list(Rel) ->
- NewParam = Param ++ " -pa " ++ filename:dirname(code:which(?MODULE)),
+start_node(DCfg, Name, Param, Rel) when is_list(Rel) ->
+ NewParam = Param ++ " -pa " ++ filename:dirname(code:which(?MODULE)) ++ " " ++ DCfg,
test_server:start_node(Name, peer, [{args, NewParam}, {erl, [{release, Rel}]}]).
-start_node(Name, Param) ->
- NewParam = Param ++ " -pa " ++ filename:dirname(code:which(?MODULE)),
+start_node(DCfg, Name, Param) ->
+ NewParam = Param ++ " -pa " ++ filename:dirname(code:which(?MODULE)) ++ " " ++ DCfg,
test_server:start_node(Name, slave, [{args, NewParam}]).
-start_node(Name) ->
- start_node(Name, "").
+start_node(DCfg, Name) ->
+ start_node(DCfg, Name, "").
stop_node(Node) ->
test_server:stop_node(Node).
diff --git a/lib/kernel/test/erl_distribution_wb_SUITE.erl b/lib/kernel/test/erl_distribution_wb_SUITE.erl
index 6a23ad0d11..8256444bdc 100644
--- a/lib/kernel/test/erl_distribution_wb_SUITE.erl
+++ b/lib/kernel/test/erl_distribution_wb_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -30,7 +30,7 @@
%% 1)
%%
-%% Connections are now always set up symetrically with respect to
+%% Connections are now always set up symmetrically with respect to
%% publication. If connecting node doesn't send DFLAG_PUBLISHED
%% the other node wont send DFLAG_PUBLISHED. If the connecting
%% node send DFLAG_PUBLISHED but the other node doesn't send
@@ -56,11 +56,18 @@
-define(DFLAG_HIDDEN_ATOM_CACHE,16#40).
-define(DFLAG_NEW_FUN_TAGS,16#80).
-define(DFLAG_EXTENDED_PIDS_PORTS,16#100).
+-define(DFLAG_UTF8_ATOMS, 16#10000).
%% From R9 and forward extended references is compulsory
%% From R10 and forward extended pids and ports are compulsory
--define(COMPULSORY_DFLAGS, (?DFLAG_EXTENDED_REFERENCES bor ?DFLAG_EXTENDED_PIDS_PORTS)).
+%% From R20 and forward UTF8 atoms are compulsory
+%% From R21 and forward NEW_FUN_TAGS is compulsory (no more tuple fallback {fun, ...})
+-define(COMPULSORY_DFLAGS, (?DFLAG_EXTENDED_REFERENCES bor
+ ?DFLAG_EXTENDED_PIDS_PORTS bor
+ ?DFLAG_UTF8_ATOMS bor
+ ?DFLAG_NEW_FUN_TAGS)).
+-define(PASS_THROUGH, $p).
-define(shutdown(X), exit(X)).
-define(int16(X), [((X) bsr 8) band 16#ff, (X) band 16#ff]).
@@ -670,15 +677,16 @@ build_rex_message(Cookie,OurName) ->
%% Receive a distribution message
recv_message(Socket) ->
case gen_tcp:recv(Socket, 0) of
+ {ok,[]} ->
+ recv_message(Socket); %% a tick, ignore
{ok,Data} ->
B0 = list_to_binary(Data),
- {_,B1} = erlang:split_binary(B0,1),
- Header = binary_to_term(B1),
- Siz = byte_size(term_to_binary(Header)),
- {_,B2} = erlang:split_binary(B1,Siz),
+ <<?PASS_THROUGH, B1/binary>> = B0,
+ {Header,Siz} = binary_to_term(B1,[used]),
+ <<_:Siz/binary,B2/binary>> = B1,
Message = case (catch binary_to_term(B2)) of
{'EXIT', _} ->
- could_not_digest_message;
+ {could_not_digest_message,B2};
Other ->
Other
end,
diff --git a/lib/kernel/test/erl_prim_loader_SUITE.erl b/lib/kernel/test/erl_prim_loader_SUITE.erl
index b6417210b9..16a127aa3e 100644
--- a/lib/kernel/test/erl_prim_loader_SUITE.erl
+++ b/lib/kernel/test/erl_prim_loader_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -33,6 +33,7 @@
primary_archive/1, virtual_dir_in_archive/1,
get_modules/1]).
+-define(PRIM_FILE, prim_file).
%%-----------------------------------------------------------------
%% Test suite for erl_prim_loader. (Most code is run during system start/stop.)
@@ -461,7 +462,7 @@ primary_archive(Config) when is_list(Config) ->
%% Set primary archive
ExpectedEbins = [Archive, DictDir ++ "/ebin", DummyDir ++ "/ebin"],
io:format("ExpectedEbins: ~p\n", [ExpectedEbins]),
- {ok, FileInfo} = prim_file:read_file_info(Archive),
+ {ok, FileInfo} = ?PRIM_FILE:read_file_info(Archive),
{ok, Ebins} = rpc:call(Node, erl_prim_loader, set_primary_archive,
[Archive, ArchiveBin, FileInfo,
fun escript:parse_file/1]),
diff --git a/lib/kernel/test/error_logger_SUITE.erl b/lib/kernel/test/error_logger_SUITE.erl
index 2d26a7246c..eab72e58a7 100644
--- a/lib/kernel/test/error_logger_SUITE.erl
+++ b/lib/kernel/test/error_logger_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -32,7 +32,8 @@
init_per_group/2,end_per_group/2,
off_heap/1,
error_report/1, info_report/1, error/1, info/1,
- emulator/1, tty/1, logfile/1, add/1, delete/1]).
+ emulator/1, via_logger_process/1, other_node/1,
+ tty/1, logfile/1, add/1, delete/1, format_depth/1]).
-export([generate_error/2]).
@@ -46,16 +47,20 @@ suite() ->
{timetrap,{minutes,1}}].
all() ->
- [off_heap, error_report, info_report, error, info, emulator, tty,
- logfile, add, delete].
+ [off_heap, error_report, info_report, error, info, emulator,
+ via_logger_process, other_node, tty, logfile, add, delete,
+ format_depth].
groups() ->
[].
init_per_suite(Config) ->
+ logger:add_handler(error_logger,error_logger,
+ #{level=>info,filter_default=>log}),
Config.
end_per_suite(_Config) ->
+ logger:remove_handler(error_logger),
ok.
init_per_group(_GroupName, Config) ->
@@ -226,6 +231,40 @@ generate_error(Error, Stack) ->
erlang:raise(error, Error, Stack).
%%-----------------------------------------------------------------
+
+via_logger_process(Config) ->
+ case os:type() of
+ {win32,_} ->
+ {skip,"Skip on windows - cant change file mode"};
+ _ ->
+ error_logger:add_report_handler(?MODULE, self()),
+ Dir = filename:join(?config(priv_dir,Config),"dummydir"),
+ Msg = "File operation error: eacces. Target: " ++
+ Dir ++ ". Function: list_dir. ",
+ ok = file:make_dir(Dir),
+ ok = file:change_mode(Dir,8#0222),
+ error = erl_prim_loader:list_dir(Dir),
+ ok = file:change_mode(Dir,8#0664),
+ _ = file:del_dir(Dir),
+ reported(error_report, std_error, Msg),
+ my_yes = error_logger:delete_report_handler(?MODULE),
+ ok
+ end.
+
+%%-----------------------------------------------------------------
+
+other_node(_Config) ->
+ error_logger:add_report_handler(?MODULE, self()),
+ {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]),
+ ok = rpc:call(Node,logger,add_handler,[error_logger,error_logger,
+ #{level=>info,filter_default=>log}]),
+ rpc:call(Node,error_logger,error_report,[hi_from_remote]),
+ reported(error_report,std_error,hi_from_remote),
+ test_server:stop_node(Node),
+ ok.
+
+
+%%-----------------------------------------------------------------
%% We don't enables or disables tty error logging here. We do not
%% want to interact with the test run.
%%-----------------------------------------------------------------
@@ -271,6 +310,21 @@ delete(Config) when is_list(Config) ->
ok.
%%-----------------------------------------------------------------
+
+format_depth(_Config) ->
+ ok = application:set_env(kernel,error_logger_format_depth,30),
+ 30 = error_logger:get_format_depth(),
+ ok = application:set_env(kernel,error_logger_format_depth,3),
+ 10 = error_logger:get_format_depth(),
+ ok = application:set_env(kernel,error_logger_format_depth,11),
+ 11 = error_logger:get_format_depth(),
+ ok = application:set_env(kernel,error_logger_format_depth,unlimited),
+ unlimited = error_logger:get_format_depth(),
+ ok = application:unset_env(kernel,error_logger_format_depth),
+ unlimited = error_logger:get_format_depth(),
+ ok.
+
+%%-----------------------------------------------------------------
%% Check that the report has been received.
%%-----------------------------------------------------------------
reported(Tag, Type, Report) ->
@@ -279,7 +333,7 @@ reported(Tag, Type, Report) ->
test_server:messages_get(),
ok
after 1000 ->
- ct:fail(no_report_received)
+ ct:fail({no_report_received,test_server:messages_get()})
end.
%%-----------------------------------------------------------------
diff --git a/lib/kernel/test/error_logger_warn_SUITE.erl b/lib/kernel/test/error_logger_warn_SUITE.erl
index a8087e11f9..8f1eb2ba0a 100644
--- a/lib/kernel/test/error_logger_warn_SUITE.erl
+++ b/lib/kernel/test/error_logger_warn_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2003-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2003-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -480,9 +480,12 @@ rb_utc() ->
UtcLog=case application:get_env(sasl,utc_log) of
{ok,true} ->
true;
- _AllOthers ->
+ {ok,false} ->
application:set_env(sasl,utc_log,true),
- false
+ false;
+ undefined ->
+ application:set_env(sasl,utc_log,true),
+ undefined
end,
application:start(sasl),
rb:start([{report_dir, rd()}]),
@@ -494,7 +497,12 @@ rb_utc() ->
Sum=one_rb_findstr([],"UTC"),
rb:stop(),
application:stop(sasl),
- application:set_env(sasl,utc_log,UtcLog),
+ case UtcLog of
+ undefined ->
+ application:unset_env(sasl,utc_log);
+ _ ->
+ application:set_env(sasl,utc_log,UtcLog)
+ end,
stop_node(Node),
ok.
diff --git a/lib/kernel/test/file_SUITE.erl b/lib/kernel/test/file_SUITE.erl
index c37d114a58..3bc8e6e828 100644
--- a/lib/kernel/test/file_SUITE.erl
+++ b/lib/kernel/test/file_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -18,7 +18,7 @@
%% %CopyrightEnd%
%%
-%% This is a developement feature when developing a new file module,
+%% This is a development feature when developing a new file module,
%% ugly but practical.
-ifndef(FILE_MODULE).
-define(FILE_MODULE, file).
@@ -39,6 +39,8 @@
-define(FILE_FIN_PER_TESTCASE(Config), Config).
-endif.
+-define(PRIM_FILE, prim_file).
+
-module(?FILE_SUITE).
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
@@ -54,7 +56,8 @@
open1/1,
old_modes/1, new_modes/1, path_open/1, open_errors/1]).
-export([ file_info_basic_file/1, file_info_basic_directory/1,
- file_info_bad/1, file_info_times/1, file_write_file_info/1]).
+ file_info_bad/1, file_info_times/1, file_write_file_info/1,
+ file_wfi_helpers/1]).
-export([rename/1, access/1, truncate/1, datasync/1, sync/1,
read_write/1, pread_write/1, append/1, exclusive/1]).
-export([ e_delete/1, e_rename/1, e_make_dir/1, e_del_dir/1]).
@@ -97,6 +100,12 @@
-export([unicode_mode/1]).
+-export([volume_relative_paths/1,unc_paths/1]).
+
+-export([tiny_writes/1, tiny_writes_delayed/1,
+ large_writes/1, large_writes_delayed/1,
+ tiny_reads/1, tiny_reads_ahead/1]).
+
%% Debug exports
-export([create_file_slow/2, create_file/2, create_bin/2]).
-export([verify_file/2, verify_bin/3]).
@@ -107,6 +116,8 @@
-export([disc_free/1, memsize/0]).
-include_lib("common_test/include/ct.hrl").
+-include_lib("common_test/include/ct_event.hrl").
+
-include_lib("kernel/include/file.hrl").
-define(THROW_ERROR(RES), throw({fail, ?LINE, RES})).
@@ -118,13 +129,13 @@ suite() ->
all() ->
[unicode, altname, read_write_file, {group, dirs},
- {group, files}, delete, rename, names, {group, errors},
- {group, compression}, {group, links}, copy,
+ {group, files}, delete, rename, names, volume_relative_paths, unc_paths,
+ {group, errors}, {group, compression}, {group, links}, copy,
delayed_write, read_ahead, segment_read, segment_write,
ipread, pid2name, interleaved_read_write, otp_5814, otp_10852,
large_file, large_write, read_line_1, read_line_2, read_line_3,
read_line_4, standard_io, old_io_protocol,
- unicode_mode
+ unicode_mode, {group, bench}
].
groups() ->
@@ -142,7 +153,8 @@ groups() ->
{pos, [], [pos1, pos2, pos3]},
{file_info, [],
[file_info_basic_file, file_info_basic_directory,
- file_info_bad, file_info_times, file_write_file_info]},
+ file_info_bad, file_info_times, file_write_file_info,
+ file_wfi_helpers]},
{consult, [], [consult1, path_consult]},
{eval, [], [eval1, path_eval]},
{script, [], [script1, path_script]},
@@ -154,11 +166,19 @@ groups() ->
write_compressed, compress_errors, catenated_gzips,
compress_async_crash]},
{links, [],
- [make_link, read_link_info_for_non_link, symlinks]}].
+ [make_link, read_link_info_for_non_link, symlinks]},
+ {bench, [],
+ [tiny_writes, tiny_writes_delayed,
+ large_writes, large_writes_delayed,
+ tiny_reads, tiny_reads_ahead]}].
init_per_group(_GroupName, Config) ->
Config.
+end_per_group(bench, Config) ->
+ ScratchDir = proplists:get_value(priv_dir, Config),
+ file:delete(filename:join(ScratchDir, "benchmark_scratch_file")),
+ Config;
end_per_group(_GroupName, Config) ->
Config.
@@ -381,11 +401,11 @@ read_write_0(Str, {Func, ReadFun}, Options) ->
io:format("~p:~p: ~p ERROR: ~ts vs~n ~w~n - ~p~n",
[?MODULE, Line, Func, Str, ReadBytes, Options]),
exit({error, ?LINE});
- error:What ->
+ error:What:Stacktrace ->
io:format("~p:??: ~p ERROR: ~p from~n ~w~n ~p~n",
[?MODULE, Func, What, Str, Options]),
- io:format("\t~p~n", [erlang:get_stacktrace()]),
+ io:format("\t~p~n", [Stacktrace]),
exit({error, ?LINE})
end.
@@ -473,7 +493,7 @@ um_check_unicode(_Utf8Bin, {ok, _ListOrBin}, _, _UTF8_) ->
um_filename(Bin, Dir, Options) when is_binary(Bin) ->
um_filename(binary_to_list(Bin), Dir, Options);
um_filename(Str = [_|_], Dir, Options) ->
- Name = hd(string:tokens(Str, ":")),
+ Name = hd(string:lexemes(Str, ":")),
Enc = atom_to_list(proplists:get_value(encoding, Options, latin1)),
File = case lists:member(binary, Options) of
true ->
@@ -638,6 +658,10 @@ cur_dir_0(Config) when is_list(Config) ->
{ok,NewDirFiles} = ?FILE_MODULE:list_dir("."),
true = lists:member(UncommonName,NewDirFiles),
+ %% Ensure that we get the same result with a trailing slash; the
+ %% APIs used on Windows will choke on them if passed directly.
+ {ok,NewDirFiles} = ?FILE_MODULE:list_dir("./"),
+
%% Delete the directory and return to the old current directory
%% and check that the created file isn't there (too!)
expect({error, einval}, {error, eacces},
@@ -690,10 +714,15 @@ win_cur_dir_1(_Config) ->
%% Get the drive letter from the current directory,
%% and try to get current directory for that drive.
- [Drive,$:|_] = BaseDir,
- {ok,BaseDir} = ?FILE_MODULE:get_cwd([Drive,$:]),
+ [CurDrive,$:|_] = BaseDir,
+ {ok,BaseDir} = ?FILE_MODULE:get_cwd([CurDrive,$:]),
io:format("BaseDir = ~s\n", [BaseDir]),
+ %% We should error out on non-existent drives. Any reasonable system will
+ %% have at least one.
+ CurDirs = [?FILE_MODULE:get_cwd([Drive,$:]) || Drive <- lists:seq($A, $Z)],
+ lists:member({error,eaccess}, CurDirs),
+
%% Unfortunately, there is no way to move away from the
%% current drive as we can't use the "subst" command from
%% a SSH connection. We can't test any more.
@@ -831,7 +860,7 @@ no_untranslatable_names() ->
end.
start_node(Name, Args) ->
- [_,Host] = string:tokens(atom_to_list(node()), "@"),
+ [_,Host] = string:lexemes(atom_to_list(node()), "@"),
ct:log("Trying to start ~w@~s~n", [Name,Host]),
case test_server:start_node(Name, peer, [{args,Args}]) of
{error,Reason} ->
@@ -1019,6 +1048,23 @@ close(Config) when is_list(Config) ->
Val = ?FILE_MODULE:close(Fd1),
io:format("Second close gave: ~p",[Val]),
+ %% All operations on a closed raw file should EINVAL, even if they're not
+ %% supported on the current platform.
+ {ok,Fd2} = ?FILE_MODULE:open(Name, [read, write, raw]),
+ ok = ?FILE_MODULE:close(Fd2),
+
+ {error, einval} = ?FILE_MODULE:advise(Fd2, 5, 5, normal),
+ {error, einval} = ?FILE_MODULE:allocate(Fd2, 5, 5),
+ {error, einval} = ?FILE_MODULE:close(Fd2),
+ {error, einval} = ?FILE_MODULE:datasync(Fd2),
+ {error, einval} = ?FILE_MODULE:position(Fd2, 5),
+ {error, einval} = ?FILE_MODULE:pread(Fd2, 5, 1),
+ {error, einval} = ?FILE_MODULE:pwrite(Fd2, 5, "einval please"),
+ {error, einval} = ?FILE_MODULE:read(Fd2, 1),
+ {error, einval} = ?FILE_MODULE:sync(Fd2),
+ {error, einval} = ?FILE_MODULE:truncate(Fd2),
+ {error, einval} = ?FILE_MODULE:write(Fd2, "einval please"),
+
[] = flush(),
ok.
@@ -1132,8 +1178,8 @@ pread_write_test(File, Data) ->
end,
I = Size + 17,
ok = ?FILE_MODULE:pwrite(File, 0, Data),
- Res = ?FILE_MODULE:pread(File, 0, I),
- {ok, Data} = Res,
+ {ok, Data} = ?FILE_MODULE:pread(File, 0, I),
+ {ok, [Data]} = ?FILE_MODULE:pread(File, [{0, I}]),
eof = ?FILE_MODULE:pread(File, I, 1),
ok = ?FILE_MODULE:pwrite(File, [{0, Data}, {I, Data}]),
{ok, [Data, eof, Data]} =
@@ -1321,6 +1367,10 @@ file_info_basic_file(Config) when is_list(Config) ->
io:put_chars(Fd1, "foo bar"),
ok = ?FILE_MODULE:close(Fd1),
+ %% Don't crash the file server when passing incorrect arguments.
+ {error,badarg} = ?FILE_MODULE:read_file_info(Name, [{time, gurka}]),
+ {error,badarg} = ?FILE_MODULE:read_file_info([#{} | gaffel]),
+
%% Test that the file has the expected attributes.
%% The times are tricky, so we will save them to a separate test case.
{ok,FileInfo} = ?FILE_MODULE:read_file_info(Name),
@@ -1564,6 +1614,39 @@ file_write_file_info(Config) when is_list(Config) ->
[] = flush(),
ok.
+file_wfi_helpers(Config) when is_list(Config) ->
+ RootDir = get_good_directory(Config),
+ io:format("RootDir = ~p", [RootDir]),
+
+ Name = filename:join(RootDir,
+ atom_to_list(?MODULE) ++ "_wfi_helpers"),
+
+ ok = ?FILE_MODULE:write_file(Name, "hello again"),
+ NewTime = {{1997, 02, 15}, {13, 18, 20}},
+ ok = ?FILE_MODULE:change_time(Name, NewTime, NewTime),
+
+ {ok, #file_info{atime=NewActAtime, mtime=NewTime}} =
+ ?FILE_MODULE:read_file_info(Name),
+
+ NewFilteredAtime = filter_atime(NewTime, Config),
+ NewFilteredAtime = filter_atime(NewActAtime, Config),
+
+ %% Make the file unwritable
+ ok = ?FILE_MODULE:change_mode(Name, 8#400),
+ {error, eacces} = ?FILE_MODULE:write_file(Name, "hello again"),
+
+ %% ... and writable again
+ ok = ?FILE_MODULE:change_mode(Name, 8#600),
+ ok = ?FILE_MODULE:write_file(Name, "hello again"),
+
+ %% We have no idea which users will work, so all we can do is to check
+ %% that it returns enoent instead of crashing.
+ {error, enoent} = ?FILE_MODULE:change_group("bogus file name", 0),
+ {error, enoent} = ?FILE_MODULE:change_owner("bogus file name", 0),
+
+ [] = flush(),
+ ok.
+
%% Returns a directory on a file system that has correct file times.
get_good_directory(Config) ->
@@ -2044,13 +2127,22 @@ names(Config) when is_list(Config) ->
ok = ?FILE_MODULE:close(Fd2),
{ok,Fd3} = ?FILE_MODULE:open(Name3,read),
ok = ?FILE_MODULE:close(Fd3),
+
+ %% Now try the same on raw files.
+ {ok,Fd4} = ?FILE_MODULE:open(Name2, [read, raw]),
+ ok = ?FILE_MODULE:close(Fd4),
+ {ok,Fd4f} = ?FILE_MODULE:open(lists:flatten(Name2), [read, raw]),
+ ok = ?FILE_MODULE:close(Fd4f),
+ {ok,Fd5} = ?FILE_MODULE:open(Name3, [read, raw]),
+ ok = ?FILE_MODULE:close(Fd5),
+
case length(Name1) > 255 of
true ->
io:format("Path too long for an atom:\n\n~p\n", [Name1]);
false ->
Name4 = list_to_atom(Name1),
- {ok,Fd4} = ?FILE_MODULE:open(Name4,read),
- ok = ?FILE_MODULE:close(Fd4)
+ {ok,Fd6} = ?FILE_MODULE:open(Name4,read),
+ ok = ?FILE_MODULE:close(Fd6)
end,
%% Try some path names
@@ -2074,6 +2166,49 @@ names(Config) when is_list(Config) ->
[] = flush(),
ok.
+volume_relative_paths(Config) when is_list(Config) ->
+ case os:type() of
+ {win32, _} ->
+ {ok, [Drive, $: | _]} = file:get_cwd(),
+ %% Relative to current device root.
+ {ok, RootInfo} = file:read_file_info([Drive, $:, $/]),
+ {ok, RootInfo} = file:read_file_info("/"),
+ %% Relative to current device directory.
+ {ok, DirContents} = file:list_dir([Drive, $:]),
+ {ok, DirContents} = file:list_dir("."),
+ [] = flush(),
+ ok;
+ _ ->
+ {skip, "This test is Windows-specific."}
+ end.
+
+unc_paths(Config) when is_list(Config) ->
+ case os:type() of
+ {win32, _} ->
+ %% We assume administrative shares are set up and reachable, and we
+ %% settle for testing presence as some of the returned data is
+ %% different.
+ {ok, _} = file:read_file_info("C:\\Windows\\explorer.exe"),
+ {ok, _} = file:read_file_info("\\\\localhost\\c$\\Windows\\explorer.exe"),
+
+ {ok, Files} = file:list_dir("C:\\Windows\\"),
+ {ok, Files} = file:list_dir("\\\\localhost\\c$\\Windows\\"),
+
+ {ok, Cwd} = file:get_cwd(),
+
+ try
+ ok = file:set_cwd("\\\\localhost\\c$\\Windows\\"),
+ {ok, _} = file:read_file_info("explorer.exe")
+ after
+ file:set_cwd(Cwd)
+ end,
+
+ [] = flush(),
+ ok;
+ _ ->
+ {skip, "This test is Windows-specific."}
+ end.
+
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -2102,13 +2237,14 @@ e_delete(Config) when is_list(Config) ->
case os:type() of
{win32, _} ->
%% Remove a character device.
- {error, eacces} = ?FILE_MODULE:delete("nul");
+ expect({error, eacces}, {error, einval},
+ ?FILE_MODULE:delete("nul"));
_ ->
?FILE_MODULE:write_file_info(
Base, #file_info {mode=0}),
{error, eacces} = ?FILE_MODULE:delete(Afile),
?FILE_MODULE:write_file_info(
- Base, #file_info {mode=8#600})
+ Base, #file_info {mode=8#700})
end,
[] = flush(),
@@ -2239,7 +2375,7 @@ e_make_dir(Config) when is_list(Config) ->
?FILE_MODULE:write_file_info(Base, #file_info {mode=0}),
{error, eacces} = ?FILE_MODULE:make_dir(filename:join(Base, "xxxx")),
?FILE_MODULE:write_file_info(
- Base, #file_info {mode=8#600})
+ Base, #file_info {mode=8#700})
end,
ok.
@@ -2285,7 +2421,7 @@ e_del_dir(Config) when is_list(Config) ->
ok = ?FILE_MODULE:make_dir(ADirectory),
?FILE_MODULE:write_file_info( Base, #file_info {mode=0}),
{error, eacces} = ?FILE_MODULE:del_dir(ADirectory),
- ?FILE_MODULE:write_file_info( Base, #file_info {mode=8#600})
+ ?FILE_MODULE:write_file_info( Base, #file_info {mode=8#700})
end,
[] = flush(),
ok.
@@ -2641,8 +2777,8 @@ altname(Config) when is_list(Config) ->
{skipped, "Altname not supported on this platform"};
{ok, "LONGAL~1"} ->
{ok, "A_FILE~1"} = ?FILE_MODULE:altname(Name),
- {ok, "C:/"} = ?FILE_MODULE:altname("C:/"),
- {ok, "C:\\"} = ?FILE_MODULE:altname("C:\\"),
+ {ok, "c:/"} = ?FILE_MODULE:altname("C:/"),
+ {ok, "c:/"} = ?FILE_MODULE:altname("C:\\"),
{error,enoent} = ?FILE_MODULE:altname(NonexName),
{ok, "short"} = ?FILE_MODULE:altname(ShortName),
ok
@@ -2923,20 +3059,22 @@ delayed_write(Config) when is_list(Config) ->
%%
%% Test caching and normal close of non-raw file
{ok, Fd1} =
- ?FILE_MODULE:open(File, [write, {delayed_write, Size+1, 2000}]),
+ ?FILE_MODULE:open(File, [write, {delayed_write, Size+1, 400}]),
ok = ?FILE_MODULE:write(Fd1, Data1),
- timer:sleep(1000), % Just in case the file system is slow
+ %% Wait for a reasonable amount of time to check whether the write was
+ %% practically instantaneous or actually delayed.
+ timer:sleep(100),
{ok, Fd2} = ?FILE_MODULE:open(File, [read]),
eof = ?FILE_MODULE:read(Fd2, 1),
ok = ?FILE_MODULE:write(Fd1, Data1), % Data flush on size
- timer:sleep(1000), % Just in case the file system is slow
+ timer:sleep(100),
{ok, Data1Data1} = ?FILE_MODULE:pread(Fd2, bof, 2*Size+1),
ok = ?FILE_MODULE:write(Fd1, Data1),
- timer:sleep(3000), % Wait until data flush on timeout
+ timer:sleep(500), % Wait until data flush on timeout
{ok, Data1Data1Data1} = ?FILE_MODULE:pread(Fd2, bof, 3*Size+1),
ok = ?FILE_MODULE:write(Fd1, Data1),
ok = ?FILE_MODULE:close(Fd1), % Data flush on close
- timer:sleep(1000), % Just in case the file system is slow
+ timer:sleep(100),
{ok, Data1Data1Data1Data1} = ?FILE_MODULE:pread(Fd2, bof, 4*Size+1),
ok = ?FILE_MODULE:close(Fd2),
%%
@@ -2970,7 +3108,7 @@ delayed_write(Config) when is_list(Config) ->
{'DOWN', Mref1, _, _, _} = Down1a ->
ct:fail(Down1a)
end,
- timer:sleep(1000), % Just in case the file system is slow
+ timer:sleep(100), % Just in case the file system is slow
{ok, Fd3} = ?FILE_MODULE:open(File, [read]),
eof = ?FILE_MODULE:read(Fd3, 1),
Child1 ! {Parent, continue, normal},
@@ -2980,7 +3118,7 @@ delayed_write(Config) when is_list(Config) ->
{'DOWN', Mref1, _, _, _} = Down1b ->
ct:fail(Down1b)
end,
- timer:sleep(1000), % Just in case the file system is slow
+ timer:sleep(100), % Just in case the file system is slow
{ok, Data1} = ?FILE_MODULE:pread(Fd3, bof, Size+1),
ok = ?FILE_MODULE:close(Fd3),
%%
@@ -2993,7 +3131,7 @@ delayed_write(Config) when is_list(Config) ->
{'DOWN', Mref2, _, _, _} = Down2a ->
ct:fail(Down2a)
end,
- timer:sleep(1000), % Just in case the file system is slow
+ timer:sleep(100), % Just in case the file system is slow
{ok, Fd4} = ?FILE_MODULE:open(File, [read]),
eof = ?FILE_MODULE:read(Fd4, 1),
Child2 ! {Parent, continue, kill},
@@ -3003,7 +3141,7 @@ delayed_write(Config) when is_list(Config) ->
{'DOWN', Mref2, _, _, _} = Down2b ->
ct:fail(Down2b)
end,
- timer:sleep(1000), % Just in case the file system is slow
+ timer:sleep(100), % Just in case the file system is slow
eof = ?FILE_MODULE:pread(Fd4, bof, 1),
ok = ?FILE_MODULE:close(Fd4),
%%
@@ -3095,6 +3233,16 @@ read_ahead(Config) when is_list(Config) ->
Data1Data2Data3 = Data1++Data2++Data3,
{ok, Data1Data2Data3} = ?FILE_MODULE:read(Fd5, 3*Size+1),
ok = ?FILE_MODULE:close(Fd5),
+
+ %% Ensure that a read that draws from both the buffer and the file won't
+ %% return anything wonky.
+ SplitData = << <<(I rem 256)>> || I <- lists:seq(1, 1024) >>,
+ file:write_file(File, SplitData),
+ {ok, Fd6} = ?FILE_MODULE:open(File, [raw, read, binary, {read_ahead, 256}]),
+ {ok, <<1>>} = file:read(Fd6, 1),
+ <<1, Shifted:512/binary, _Rest/binary>> = SplitData,
+ {ok, Shifted} = file:read(Fd6, 512),
+
%%
[] = flush(),
ok.
@@ -3596,19 +3744,33 @@ otp_10852(Config) when is_list(Config) ->
ok = rpc_call(Node, read_file, [B]),
ok = rpc_call(Node, make_link, [B,B]),
case rpc_call(Node, make_symlink, [B,B]) of
- ok -> ok;
- {error, E} when (E =:= enotsup) or (E =:= eperm) ->
- {win32,_} = os:type()
+ {error, eilseq} ->
+ %% Some versions of OS X refuse to create files with illegal names.
+ {unix,darwin} = os:type();
+ {error, eperm} ->
+ %% The test user might not have permission to create symlinks.
+ {win32,_} = os:type();
+ ok ->
+ ok
end,
ok = rpc_call(Node, delete, [B]),
- ok = rpc_call(Node, make_dir, [B]),
+ case rpc_call(Node, make_dir, [B]) of
+ {error, eilseq} ->
+ {unix,darwin} = os:type();
+ ok ->
+ ok
+ end,
ok = rpc_call(Node, del_dir, [B]),
- ok = rpc_call(Node, write_file, [B,B]),
- {ok, Fd} = rpc_call(Node, open, [B,[read]]),
- ok = rpc_call(Node, close, [Fd]),
- {ok,0} = rpc_call(Node, copy, [B,B]),
- {ok, Fd2, B} = rpc_call(Node, path_open, [["."], B, [read]]),
- ok = rpc_call(Node, close, [Fd2]),
+ case rpc_call(Node, write_file, [B,B]) of
+ {error, eilseq} ->
+ {unix,darwin} = os:type();
+ ok ->
+ {ok, Fd} = rpc_call(Node, open, [B,[read]]),
+ ok = rpc_call(Node, close, [Fd]),
+ {ok,0} = rpc_call(Node, copy, [B,B]),
+ {ok, Fd2, B} = rpc_call(Node, path_open, [["."], B, [read]]),
+ ok = rpc_call(Node, close, [Fd2])
+ end,
true = test_server:stop_node(Node),
ok.
@@ -3699,6 +3861,83 @@ do_large_write(Name) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%% Benchmarks
+%%
+%% Note that we only measure the time it takes to run the isolated file
+%% operations and that the actual test runtime can differ significantly,
+%% especially on the write side as the files need to be truncated before
+%% writing.
+
+large_writes(Config) when is_list(Config) ->
+ Modes = [raw, binary],
+ OpCount = 4096,
+ Data = <<0:(64 bsl 10)/unit:8>>,
+ run_write_benchmark(Config, Modes, OpCount, Data).
+
+large_writes_delayed(Config) when is_list(Config) ->
+ %% Each write is exactly as large as the delay buffer, causing the writes
+ %% to pass through each time, giving us a decent idea of how much overhead
+ %% delayed_write adds.
+ Modes = [raw, binary, {delayed_write, 64 bsl 10, 2000}],
+ OpCount = 4096,
+ Data = <<0:(64 bsl 10)/unit:8>>,
+ run_write_benchmark(Config, Modes, OpCount, Data).
+
+tiny_writes(Config) when is_list(Config) ->
+ Modes = [raw, binary],
+ OpCount = 512 bsl 10,
+ Data = <<0>>,
+ run_write_benchmark(Config, Modes, OpCount, Data).
+
+tiny_writes_delayed(Config) when is_list(Config) ->
+ Modes = [raw, binary, {delayed_write, 512 bsl 10, 2000}],
+ OpCount = 512 bsl 10,
+ Data = <<0>>,
+ run_write_benchmark(Config, Modes, OpCount, Data).
+
+%% The read benchmarks assume that "benchmark_scratch_file" has been filled by
+%% the write benchmarks.
+
+tiny_reads(Config) when is_list(Config) ->
+ Modes = [raw, binary],
+ OpCount = 512 bsl 10,
+ run_read_benchmark(Config, Modes, OpCount, 1).
+
+tiny_reads_ahead(Config) when is_list(Config) ->
+ Modes = [raw, binary, {read_ahead, 512 bsl 10}],
+ OpCount = 512 bsl 10,
+ run_read_benchmark(Config, Modes, OpCount, 1).
+
+run_write_benchmark(Config, Modes, OpCount, Data) ->
+ run_benchmark(Config, [write | Modes], OpCount, fun file:write/2, Data).
+
+run_read_benchmark(Config, Modes, OpCount, OpSize) ->
+ run_benchmark(Config, [read | Modes], OpCount, fun file:read/2, OpSize).
+
+run_benchmark(Config, Modes, OpCount, Fun, Arg) ->
+ ScratchDir = proplists:get_value(priv_dir, Config),
+ Path = filename:join(ScratchDir, "benchmark_scratch_file"),
+ {ok, Fd} = file:open(Path, Modes),
+ submit_throughput_results(Fun, [Fd, Arg], OpCount).
+
+submit_throughput_results(Fun, Args, Times) ->
+ MSecs = measure_repeated_file_op(Fun, Args, Times, millisecond),
+ IOPS = trunc(Times * (1000 / MSecs)),
+ ct_event:notify(#event{ name = benchmark_data, data = [{value,IOPS}] }),
+ {comment, io_lib:format("~p IOPS, ~p ms", [IOPS, trunc(MSecs)])}.
+
+measure_repeated_file_op(Fun, Args, Times, Unit) ->
+ Start = os:perf_counter(Unit),
+ repeated_apply(Fun, Args, Times),
+ os:perf_counter(Unit) - Start.
+
+repeated_apply(_F, _Args, Times) when Times =< 0 ->
+ ok;
+repeated_apply(F, Args, Times) ->
+ erlang:apply(F, Args),
+ repeated_apply(F, Args, Times - 1).
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
response_analysis(Module, Function, Arguments) ->
@@ -3738,7 +3977,7 @@ response_analysis(Module, Function, Arguments) ->
{Result, Comment}.
micro_ts() ->
- erlang:monotonic_time(micro_seconds).
+ erlang:monotonic_time(microsecond).
response_stat(init, Ts) ->
{0, 0, Ts, 0, 0};
@@ -3934,7 +4173,7 @@ read_line_create_files(TestData) ->
read_line_remove_files(TestData) ->
[ file:delete(File) || {_Function,File,_,_} <- TestData ].
-%% read_line with prim_file.
+%% read_line with ?PRIM_FILE.
read_line_1(Config) when is_list(Config) ->
PrivDir = proplists:get_value(priv_dir, Config),
All = read_line_testdata(PrivDir),
@@ -4103,9 +4342,9 @@ read_line_create7(Filename) ->
file:close(F).
read_line_all(Filename) ->
- {ok,F} = prim_file:open(Filename,[read,binary]),
+ {ok,F} = ?PRIM_FILE:open(Filename,[read,binary]),
X=read_rl_lines(F),
- prim_file:close(F),
+ ?PRIM_FILE:close(F),
Bin = list_to_binary([B || {ok,B} <- X]),
Bin = re:replace(list_to_binary([element(2,file:read_file(Filename))]),
"\r\n","\n",[global,{return,binary}]),
@@ -4138,7 +4377,7 @@ read_line_all4(Filename) ->
{length(X),Bin}.
read_rl_lines(F) ->
- case prim_file:read_line(F) of
+ case ?PRIM_FILE:read_line(F) of
eof ->
[];
{error,X} ->
@@ -4158,9 +4397,9 @@ read_rl_lines2(F) ->
end.
read_line_all_alternating(Filename) ->
- {ok,F} = prim_file:open(Filename,[read,binary]),
+ {ok,F} = ?PRIM_FILE:open(Filename,[read,binary]),
X=read_rl_lines(F,true),
- prim_file:close(F),
+ ?PRIM_FILE:close(F),
Bin = list_to_binary([B || {ok,B} <- X]),
Bin = re:replace(list_to_binary([element(2,file:read_file(Filename))]),
"\r\n","\n",[global,{return,binary}]),
@@ -4194,8 +4433,8 @@ read_line_all_alternating4(Filename) ->
read_rl_lines(F,Alternate) ->
case begin
case Alternate of
- true -> prim_file:read(F,1);
- false -> prim_file:read_line(F)
+ true -> ?PRIM_FILE:read(F,1);
+ false -> ?PRIM_FILE:read_line(F)
end
end of
eof ->
@@ -4275,15 +4514,18 @@ run_large_file_test(Config, Run, Name) ->
{{unix,sunos},OsVersion} when OsVersion < {5,5,1} ->
{skip,"Only supported on Win32, Unix or SunOS >= 5.5.1"};
{{unix,_},_} ->
- N = disc_free(proplists:get_value(priv_dir, Config)),
- io:format("Free disk: ~w KByte~n", [N]),
- if N < 5 * (1 bsl 20) ->
- %% Less than 5 GByte free
- {skip,"Less than 5 GByte free"};
- true ->
- do_run_large_file_test(Config, Run, Name)
- end;
- _ ->
+ case disc_free(proplists:get_value(priv_dir, Config)) of
+ error ->
+ {skip, "Failed to query disk space for priv_dir. "
+ "Is it on a remote file system?~n"};
+ N when N >= 5 * (1 bsl 20) ->
+ ct:pal("Free disk: ~w KByte~n", [N]),
+ do_run_large_file_test(Config, Run, Name);
+ N when N < 5 * (1 bsl 20) ->
+ ct:pal("Free disk: ~w KByte~n", [N]),
+ {skip,"Less than 5 GByte free"}
+ end;
+ _ ->
{skip,"Only supported on Win32, Unix or SunOS >= 5.5.1"}
end.
@@ -4317,12 +4559,18 @@ do_run_large_file_test(Config, Run, Name0) ->
disc_free(Path) ->
Data = disksup:get_disk_data(),
- {_,Tot,Perc} = hd(lists:filter(
- fun({P,_Size,_Full}) ->
- lists:prefix(filename:nativename(P),
- filename:nativename(Path))
- end, lists:reverse(lists:sort(Data)))),
- round(Tot * (1-(Perc/100))).
+
+ %% What partitions could Data be mounted on?
+ Partitions =
+ [D || {P, _Tot, _Perc}=D <- Data,
+ lists:prefix(filename:nativename(P), filename:nativename(Path))],
+
+ %% Sorting in descending order places the partition with the most specific
+ %% path first.
+ case lists:sort(fun erlang:'>='/2, Partitions) of
+ [{_,Tot, Perc} | _] -> round(Tot * (1-(Perc/100)));
+ [] -> error
+ end.
memsize() ->
{Tot,_Used,_} = memsup:get_memory_data(),
diff --git a/lib/kernel/test/file_SUITE_data/realmen.html b/lib/kernel/test/file_SUITE_data/realmen.html
index c810a5d088..92e13f23b8 100644
--- a/lib/kernel/test/file_SUITE_data/realmen.html
+++ b/lib/kernel/test/file_SUITE_data/realmen.html
@@ -237,7 +237,7 @@ destroy most of the interesting uses for EQUIVALENCE, and make it
impossible to modify the operating system code with negative
subscripts. Worst of all, bounds checking is inefficient.
-<LI> Source code maintainance systems. A Real Programmer keeps his
+<LI> Source code maintenance systems. A Real Programmer keeps his
code locked up in a card file, because it implies that its owner
cannot leave his important programs unguarded [5].
@@ -396,7 +396,7 @@ double stuff Oreos for special occasions.
<LI> Underneath the Oreos is a flow-charting template, left there by
the previous occupant of the office. (Real Programmers write programs,
-not documentation. Leave that to the maintainence people.)
+not documentation. Leave that to the maintenance people.)
</UL> <P>
diff --git a/lib/kernel/test/file_name_SUITE.erl b/lib/kernel/test/file_name_SUITE.erl
index 10b6b105d0..26cfd187c7 100644
--- a/lib/kernel/test/file_name_SUITE.erl
+++ b/lib/kernel/test/file_name_SUITE.erl
@@ -2,7 +2,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -77,6 +77,7 @@
init_per_testcase/2, end_per_testcase/2]).
-export([normal/1,icky/1,very_icky/1,normalize/1,home_dir/1]).
+-define(PRIM_FILE, prim_file).
init_per_testcase(_Func, Config) ->
Config.
@@ -131,7 +132,7 @@ home_dir(Config) when is_list(Config) ->
os:putenv("HOME",NewHome),
{"HOME",Save};
_ ->
- rm_rf(prim_file,NewHome),
+ rm_rf(?PRIM_FILE,NewHome),
throw(unsupported_os)
end,
try
@@ -145,7 +146,7 @@ home_dir(Config) when is_list(Config) ->
_ ->
os:putenv(SaveOldName,SaveOldValue)
end,
- rm_rf(prim_file,NewHome)
+ rm_rf(?PRIM_FILE,NewHome)
end
catch
throw:need_unicode_mode ->
@@ -190,7 +191,7 @@ normal(Config) when is_list(Config) ->
try
Priv = proplists:get_value(priv_dir, Config),
file:set_cwd(Priv),
- ok = check_normal(prim_file),
+ ok = check_normal(?PRIM_FILE),
ok = check_normal(file),
%% If all is good, delete dir again (avoid hanging dir on windows)
rm_rf(file,"normal_dir"),
@@ -210,7 +211,7 @@ icky(Config) when is_list(Config) ->
try
Priv = proplists:get_value(priv_dir, Config),
file:set_cwd(Priv),
- ok = check_icky(prim_file),
+ ok = check_icky(?PRIM_FILE),
ok = check_icky(file),
%% If all is good, delete dir again (avoid hanging dir on windows)
rm_rf(file,"icky_dir"),
@@ -229,7 +230,7 @@ very_icky(Config) when is_list(Config) ->
try
Priv = proplists:get_value(priv_dir, Config),
file:set_cwd(Priv),
- case check_very_icky(prim_file) of
+ case check_very_icky(?PRIM_FILE) of
need_unicode_mode ->
{skipped,"VM needs to be started in Unicode filename mode"};
ok ->
@@ -292,17 +293,14 @@ check_normal(Mod) ->
ok
end,
[ begin
- {ok, FD} = Mod:open(Name,[read]),
- {ok, Content} = Mod:read(FD,1024),
- ok = file:close(FD)
- end || {regular,Name,Content} <- NormalDir ],
- [ begin
{ok, FD} = Mod:open(Name,[read,binary]),
BC = list_to_binary(Content),
{ok, BC} = Mod:read(FD,1024),
ok = file:close(FD)
end || {regular,Name,Content} <- NormalDir ],
+ {error, badarg} = Mod:rename("fil1\0tmp_fil2","tmp_fil1"),
Mod:rename("fil1","tmp_fil1"),
+ {error, badarg} = Mod:read_file("tmp_fil1\0.txt"),
{ok, <<"fil1">>} = Mod:read_file("tmp_fil1"),
{error,enoent} = Mod:read_file("fil1"),
Mod:rename("tmp_fil1","fil1"),
@@ -383,7 +381,7 @@ check_icky(Mod) ->
ok
end,
- _ = make_icky_dir(Mod, treat_icky(<<"åäö_dir">>)),
+ _ = make_icky_dir(Mod, treat_icky(<<"åäö_dir"/utf8>>)),
if
UniMode and (OS =/= win32) ->
{error,enoent} = Mod:set_cwd("åäö_dir");
@@ -410,11 +408,6 @@ check_icky(Mod) ->
ok
end,
[ begin
- {ok, FD} = Mod:open(Name,[read]),
- {ok, Content} = Mod:read(FD,1024),
- ok = file:close(FD)
- end || {regular,Name,Content} <- IckyDir ],
- [ begin
{ok, FD} = Mod:open(Name,[read,binary]),
BC = list_to_binary([Content]),
{ok, BC} = Mod:read(FD,1024),
@@ -519,11 +512,6 @@ check_very_icky(Mod) ->
ok
end,
[ begin
- {ok, FD} = Mod:open(Name,[read]),
- {ok, Content} = Mod:read(FD,1024),
- ok = file:close(FD)
- end || {regular,Name,Content} <- VeryIckyDir ],
- [ begin
{ok, FD} = Mod:open(Name,[read,binary]),
BC = list_to_binary([Content]),
{ok, BC} = Mod:read(FD,1024),
@@ -644,10 +632,13 @@ make_icky_dir(Mod, IckyDirName) ->
hopeless_darwin() ->
case {os:type(),os:version()} of
- {{unix,darwin},{Major,_,_}} when Major < 9 ->
- true;
- _ ->
- false
+ {{unix,darwin},{Major,_,_}} ->
+ %% icky file names worked between 10 and 17, but started returning
+ %% EILSEQ in 18. The check against 18 is exact in case newer
+ %% versions of Darwin support them again.
+ Major < 9 orelse Major =:= 18;
+ _ ->
+ false
end.
make_very_icky_dir(Mod, DirName) ->
diff --git a/lib/kernel/test/gen_sctp_SUITE.erl b/lib/kernel/test/gen_sctp_SUITE.erl
index 620ab235a0..e4c489bd10 100644
--- a/lib/kernel/test/gen_sctp_SUITE.erl
+++ b/lib/kernel/test/gen_sctp_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2007-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2007-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -1038,8 +1038,7 @@ do_from_other_process(Fun) ->
Result ->
Parent ! {Ref,Result}
catch
- Class:Reason ->
- Stacktrace = erlang:get_stacktrace(),
+ Class:Reason:Stacktrace ->
Parent ! {Ref,Class,Reason,Stacktrace}
end
end),
@@ -1460,11 +1459,11 @@ do_open_and_connect(ServerAddresses, AddressToConnectTo) ->
do_open_and_connect(ServerAddresses, AddressToConnectTo, Fun).
%%
do_open_and_connect(ServerAddresses, AddressToConnectTo, Fun) ->
- ServerFamily = get_family_by_addrs(ServerAddresses),
+ {ServerFamily, ServerOpts} = get_family_by_addrs(ServerAddresses),
io:format("Serving ~p addresses: ~p~n",
[ServerFamily, ServerAddresses]),
S1 = ok(gen_sctp:open(0, [{ip,Addr} || Addr <- ServerAddresses] ++
- [ServerFamily])),
+ [ServerFamily|ServerOpts])),
ok = gen_sctp:listen(S1, true),
P1 = ok(inet:port(S1)),
ClientFamily = get_family_by_addr(AddressToConnectTo),
@@ -1494,9 +1493,9 @@ do_open_and_connect(ServerAddresses, AddressToConnectTo, Fun) ->
%% If at least one of the addresses is an ipv6 address, return inet6, else inet.
get_family_by_addrs(Addresses) ->
case lists:usort([get_family_by_addr(Addr) || Addr <- Addresses]) of
- [inet, inet6] -> inet6;
- [inet] -> inet;
- [inet6] -> inet6
+ [inet, inet6] -> {inet6, [{ipv6_v6only, false}]};
+ [inet] -> {inet, []};
+ [inet6] -> {inet6, []}
end.
get_family_by_addr(Addr) when tuple_size(Addr) =:= 4 -> inet;
@@ -1617,8 +1616,7 @@ s_start(Socket, Timeout, Parent) ->
try
s_loop(Socket, Timeout, Parent, Handler, gb_trees:empty())
catch
- Class:Reason ->
- Stacktrace = erlang:get_stacktrace(),
+ Class:Reason:Stacktrace ->
io:format(?MODULE_STRING":socket exception ~w:~w at~n"
"~p.~n", [Class,Reason,Stacktrace]),
erlang:raise(Class, Reason, Stacktrace)
diff --git a/lib/kernel/test/gen_tcp_api_SUITE.erl b/lib/kernel/test/gen_tcp_api_SUITE.erl
index 92a74465b7..1be016444f 100644
--- a/lib/kernel/test/gen_tcp_api_SUITE.erl
+++ b/lib/kernel/test/gen_tcp_api_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -302,9 +302,9 @@ t_implicit_inet6(Config) when is_list(Config) ->
end.
t_implicit_inet6(Host, Addr) ->
- case gen_tcp:listen(0, [inet6]) of
+ Loopback = {0,0,0,0,0,0,0,1},
+ case gen_tcp:listen(0, [inet6, {ip,Loopback}]) of
{ok,S1} ->
- Loopback = {0,0,0,0,0,0,0,1},
io:format("~s ~p~n", ["::1",Loopback]),
implicit_inet6(S1, Loopback),
ok = gen_tcp:close(S1),
@@ -524,10 +524,10 @@ local_handshake(S, SAddr, C, CAddr) ->
t_accept_inet6_tclass(Config) when is_list(Config) ->
TClassOpt = {tclass,8#56 bsl 2}, % Expedited forwarding
- case gen_tcp:listen(0, [inet6,TClassOpt]) of
+ Loopback = {0,0,0,0,0,0,0,1},
+ case gen_tcp:listen(0, [inet6, {ip, Loopback}, TClassOpt]) of
{ok,L} ->
LPort = ok(inet:port(L)),
- Loopback = {0,0,0,0,0,0,0,1},
Sa = ok(gen_tcp:connect(Loopback, LPort, [])),
Sb = ok(gen_tcp:accept(L)),
[TClassOpt] = ok(inet:getopts(Sb, [tclass])),
@@ -605,9 +605,9 @@ ok({ok,V}) -> V;
ok(NotOk) ->
try throw(not_ok)
catch
- Thrown ->
+ throw:Thrown:Stacktrace ->
erlang:raise(
- error, {Thrown, NotOk}, tl(erlang:get_stacktrace()))
+ error, {Thrown, NotOk}, tl(Stacktrace))
end.
get_localaddr() ->
diff --git a/lib/kernel/test/gen_tcp_api_SUITE_data/gen_tcp_api_SUITE.c b/lib/kernel/test/gen_tcp_api_SUITE_data/gen_tcp_api_SUITE.c
index 2990ddda41..96938f9071 100644
--- a/lib/kernel/test/gen_tcp_api_SUITE_data/gen_tcp_api_SUITE.c
+++ b/lib/kernel/test/gen_tcp_api_SUITE_data/gen_tcp_api_SUITE.c
@@ -17,7 +17,7 @@
*
* %CopyrightEnd%
*/
-#include "erl_nif.h"
+#include <erl_nif.h>
#include <stdio.h>
#include <string.h>
@@ -30,6 +30,7 @@
#define sock_close(s) closesocket(s)
#else
#include <sys/socket.h>
+#include <unistd.h>
#define sock_close(s) close(s)
#endif
diff --git a/lib/kernel/test/gen_tcp_misc_SUITE.erl b/lib/kernel/test/gen_tcp_misc_SUITE.erl
index 6b64c83fc2..de87bd9472 100644
--- a/lib/kernel/test/gen_tcp_misc_SUITE.erl
+++ b/lib/kernel/test/gen_tcp_misc_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2019. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -25,6 +25,7 @@
init_per_group/2,end_per_group/2,
controlling_process/1, controlling_process_self/1,
no_accept/1, close_with_pending_output/1, active_n/1,
+ active_n_closed/1,
data_before_close/1,
iter_max_socks/0, iter_max_socks/1,
get_status/1,
@@ -36,11 +37,13 @@
show_econnreset_passive/1, econnreset_after_sync_send/1,
econnreset_after_async_send_active/1,
econnreset_after_async_send_active_once/1,
- econnreset_after_async_send_passive/1, linger_zero/1,
+ econnreset_after_async_send_passive/1,
+ linger_zero/1, linger_zero_sndbuf/1,
default_options/1, http_bad_packet/1,
busy_send/1, busy_disconnect_passive/1, busy_disconnect_active/1,
fill_sendq/1, partial_recv_and_close/1,
partial_recv_and_close_2/1,partial_recv_and_close_3/1,so_priority/1,
+ recvtos/1, recvttl/1, recvtosttl/1, recvtclass/1,
%% Accept tests
primitive_accept/1,multi_accept_close_listen/1,accept_timeout/1,
accept_timeouts_in_order/1,accept_timeouts_in_order2/1,
@@ -50,9 +53,9 @@
killing_acceptor/1,killing_multi_acceptors/1,killing_multi_acceptors2/1,
several_accepts_in_one_go/1, accept_system_limit/1,
active_once_closed/1, send_timeout/1, send_timeout_active/1,
- otp_7731/1, zombie_sockets/1, otp_7816/1, otp_8102/1,
- wrapping_oct/0, wrapping_oct/1,
- otp_9389/1]).
+ otp_7731/1, zombie_sockets/1, otp_7816/1, otp_8102/1,
+ wrapping_oct/0, wrapping_oct/1, otp_9389/1, otp_13939/1,
+ otp_12242/1, delay_send_error/1]).
%% Internal exports.
-export([sender/3, not_owner/1, passive_sockets_server/2, priority_server/1,
@@ -72,19 +75,21 @@ suite() ->
all() ->
[controlling_process, controlling_process_self, no_accept,
close_with_pending_output, data_before_close,
- iter_max_socks, passive_sockets, active_n,
+ iter_max_socks, passive_sockets, active_n, active_n_closed,
accept_closed_by_other_process, otp_3924, closed_socket,
shutdown_active, shutdown_passive, shutdown_pending,
show_econnreset_active, show_econnreset_active_once,
show_econnreset_passive, econnreset_after_sync_send,
econnreset_after_async_send_active,
econnreset_after_async_send_active_once,
- econnreset_after_async_send_passive, linger_zero,
+ econnreset_after_async_send_passive,
+ linger_zero, linger_zero_sndbuf,
default_options, http_bad_packet, busy_send,
busy_disconnect_passive, busy_disconnect_active,
fill_sendq, partial_recv_and_close,
partial_recv_and_close_2, partial_recv_and_close_3,
- so_priority, primitive_accept,
+ so_priority, recvtos, recvttl, recvtosttl,
+ recvtclass, primitive_accept,
multi_accept_close_listen, accept_timeout,
accept_timeouts_in_order, accept_timeouts_in_order2,
accept_timeouts_in_order3, accept_timeouts_in_order4,
@@ -94,7 +99,8 @@ all() ->
killing_multi_acceptors2, several_accepts_in_one_go, accept_system_limit,
active_once_closed, send_timeout, send_timeout_active, otp_7731,
wrapping_oct,
- zombie_sockets, otp_7816, otp_8102, otp_9389].
+ zombie_sockets, otp_7816, otp_8102, otp_9389,
+ otp_12242, delay_send_error].
groups() ->
[].
@@ -1353,7 +1359,42 @@ linger_zero(Config) when is_list(Config) ->
ok = gen_tcp:close(Client),
ok = ct:sleep(1),
undefined = erlang:port_info(Client, connected),
- {error, econnreset} = gen_tcp:recv(S, PayloadSize).
+ {error, econnreset} = gen_tcp:recv(S, PayloadSize),
+ ok.
+
+
+linger_zero_sndbuf(Config) when is_list(Config) ->
+ %% All the econnreset tests will prove that {linger, {true, 0}} aborts
+ %% a connection when the driver queue is empty. We will test here
+ %% that it also works when the driver queue is not empty
+ %% and the linger zero option is set on the listen socket.
+ {OS, _} = os:type(),
+ {ok, Listen} =
+ gen_tcp:listen(0, [{active, false},
+ {recbuf, 4096},
+ {show_econnreset, true},
+ {linger, {true, 0}}]),
+ {ok, Port} = inet:port(Listen),
+ {ok, Client} =
+ gen_tcp:connect(localhost, Port,
+ [{active, false},
+ {sndbuf, 4096}]),
+ {ok, Server} = gen_tcp:accept(Listen),
+ ok = gen_tcp:close(Listen),
+ PayloadSize = 1024 * 1024,
+ Payload = binary:copy(<<"0123456789ABCDEF">>, 256 * 1024), % 1 MB
+ ok = gen_tcp:send(Server, Payload),
+ case erlang:port_info(Server, queue_size) of
+ {queue_size, N} when N > 0 -> ok;
+ {queue_size, 0} when OS =:= win32 -> ok;
+ {queue_size, 0} = T -> ct:fail(T)
+ end,
+ {ok, [{linger, {true, 0}}]} = inet:getopts(Server, [linger]),
+ ok = gen_tcp:close(Server),
+ ok = ct:sleep(1),
+ undefined = erlang:port_info(Server, connected),
+ {error, closed} = gen_tcp:recv(Client, PayloadSize),
+ ok.
%% Thanks to Luke Gorrie. Tests for a very specific problem with
@@ -1573,52 +1614,56 @@ fill_sendq(Config) when is_list(Config) ->
Master = self(),
Server =
spawn_link(fun () ->
- {ok,L} = gen_tcp:listen
- (0, [{active,false},binary,
- {reuseaddr,true},{packet,0}]),
+ {ok,L} = gen_tcp:listen(0, [{active,false},binary,
+ {reuseaddr,true},{packet,0}]),
{ok,Port} = inet:port(L),
Master ! {self(),client,
fill_sendq_client(Port, Master)},
fill_sendq_srv(L, Master)
end),
io:format("~p Server~n", [Server]),
- receive {Server,client,Client} ->
- io:format("~p Client~n", [Client]),
- receive {Server,reader,Reader} ->
- io:format("~p Reader~n", [Reader]),
- fill_sendq_loop(Server, Client, Reader)
+ receive
+ {Server,client,Client} ->
+ io:format("~p Client~n", [Client]),
+ receive
+ {Server,reader,Reader} ->
+ io:format("~p Reader~n", [Reader]),
+ fill_sendq_loop(Server, Client, Reader)
end
end.
fill_sendq_loop(Server, Client, Reader) ->
%% Master
%%
- receive {Server,send} ->
+ receive
+ {Server,send} ->
fill_sendq_loop(Server, Client, Reader)
after 2000 ->
%% Send queue full, sender blocked -> close client.
io:format("Send timeout, closing Client...~n", []),
Client ! {self(),close},
- receive {Server,[{error,closed}]} ->
- io:format("Got server closed.~n"),
- receive {Reader,[{error,closed}]} ->
- io:format
- ("Got reader closed.~n"),
- ok
- after 3000 ->
- ct:fail({timeout,{closed,reader}})
- end;
- {Reader,[{error,closed}]} ->
- io:format("Got reader closed.~n"),
- receive {Server,[{error,closed}]} ->
- io:format("Got server closed~n"),
- ok
- after 3000 ->
- ct:fail({timeout,{closed,server}})
- end
- after 3000 ->
- ct:fail({timeout,{closed,[server,reader]}})
- end
+ receive
+ {Server,[{error,closed}]} ->
+ io:format("Got server closed.~n"),
+ receive
+ {Reader,[{error,closed}]} ->
+ io:format("Got reader closed.~n"),
+ ok
+ after 3000 ->
+ ct:fail({timeout,{closed,reader}})
+ end;
+ {Reader,[{error,closed}]} ->
+ io:format("Got reader closed.~n"),
+ receive
+ {Server,[{error,closed}]} ->
+ io:format("Got server closed~n"),
+ ok
+ after 3000 ->
+ ct:fail({timeout,{closed,server}})
+ end
+ after 3000 ->
+ ct:fail({timeout,{closed,[server,reader]}})
+ end
end.
fill_sendq_srv(L, Master) ->
@@ -1911,28 +1956,287 @@ so_priority(Config) when is_list(Config) ->
end
end.
+
+
+%% IP_RECVTOS and IP_RECVTCLASS for IP_PKTOPTIONS
+%% does not seem to be implemented in Linux until kernel 3.1
+%%
+%% It seems pktoptions does not return valid values
+%% for IPv4 connect sockets. On the accept socket
+%% we get valid values, but on the connect socket we get
+%% the default values for TOS and TTL.
+%%
+%% Therefore the argument CheckConnect that enables
+%% checking the returned values for the connect socket.
+%% It is only used for recvtclass that is an IPv6 option
+%% and there we get valid values from both socket ends.
+
+recvtos(_Config) ->
+ test_pktoptions(
+ inet, [{recvtos,tos,96}],
+ fun recvtos_ok/2,
+ false).
+
+recvtosttl(_Config) ->
+ test_pktoptions(
+ inet, [{recvtos,tos,96},{recvttl,ttl,33}],
+ fun (OSType, OSVer) ->
+ recvtos_ok(OSType, OSVer) andalso recvttl_ok(OSType, OSVer)
+ end,
+ false).
+
+recvttl(_Config) ->
+ test_pktoptions(
+ inet, [{recvttl,ttl,33}],
+ fun recvttl_ok/2,
+ false).
+
+recvtclass(_Config) ->
+ {ok,IFs} = inet:getifaddrs(),
+ case
+ [Name ||
+ {Name,Opts} <- IFs,
+ lists:member({addr,{0,0,0,0,0,0,0,1}}, Opts)]
+ of
+ [_] ->
+ test_pktoptions(
+ inet6, [{recvtclass,tclass,224}],
+ fun recvtclass_ok/2,
+ true);
+ [] ->
+ {skip,{ipv6_not_supported,IFs}}
+ end.
+
+%% These version numbers are above the highest noted
+%% in daily tests where the test fails for a plausible reason,
+%% so skip on platforms of lower version, i.e they are future
+%% versions where it is possible that it might not fail.
+%%
+%% When machines with newer versions gets installed,
+%% if the test still fails for a plausible reason these
+%% version numbers simply should be increased.
+%% Or maybe we should change to only test on known good
+%% platforms - change {unix,_} to false?
+
+%% pktoptions is not supported for IPv4
+recvtos_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,6,0});
+recvtos_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {19,0,0});
+%% Using the option returns einval, so it is not implemented.
+recvtos_ok({unix,freebsd}, OSVer) -> not semver_lt(OSVer, {12,1,0});
+recvtos_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
+%% Does not return any value - not implemented for pktoptions
+recvtos_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {3,1,0});
+%%
+recvtos_ok({unix,_}, _) -> true;
+recvtos_ok(_, _) -> false.
+
+%% pktoptions is not supported for IPv4
+recvttl_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,6,0});
+recvttl_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {19,0,0});
+%% Using the option returns einval, so it is not implemented.
+recvttl_ok({unix,freebsd}, OSVer) -> not semver_lt(OSVer, {12,1,0});
+recvttl_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
+%% Does not return any value - not implemented for pktoptions
+recvttl_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {2,7,0});
+%%
+recvttl_ok({unix,_}, _) -> true;
+recvttl_ok(_, _) -> false.
+
+%% pktoptions is not supported for IPv6
+recvtclass_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,6,0});
+recvtclass_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {19,0,0});
+recvtclass_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
+%% Using the option returns einval, so it is not implemented.
+recvtclass_ok({unix,freebsd}, OSVer) -> not semver_lt(OSVer, {12,1,0});
+%% Does not return any value - not implemented for pktoptions
+recvtclass_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {3,1,0});
+%%
+recvtclass_ok({unix,_}, _) -> true;
+recvtclass_ok(_, _) -> false.
+
+semver_lt({X1,Y1,Z1}, {X2,Y2,Z2}) ->
+ if
+ X1 > X2 -> false;
+ X1 < X2 -> true;
+ Y1 > Y2 -> false;
+ Y1 < Y2 -> true;
+ Z1 > Z2 -> false;
+ Z1 < Z2 -> true;
+ true -> false
+ end;
+semver_lt(_, {_,_,_}) -> false.
+
+test_pktoptions(Family, Spec, OSFilter, CheckConnect) ->
+ OSType = os:type(),
+ OSVer = os:version(),
+ case OSFilter(OSType, OSVer) of
+ true ->
+ io:format("Os: ~p, ~p~n", [OSType,OSVer]),
+ test_pktoptions(Family, Spec, CheckConnect, OSType, OSVer);
+ false ->
+ {skip,{not_supported_for_os_version,{OSType,OSVer}}}
+ end.
+%%
+test_pktoptions(Family, Spec, CheckConnect, OSType, OSVer) ->
+ Timeout = 5000,
+ RecvOpts = [RecvOpt || {RecvOpt,_,_} <- Spec],
+ TrueRecvOpts = [{RecvOpt,true} || {RecvOpt,_,_} <- Spec],
+ FalseRecvOpts = [{RecvOpt,false} || {RecvOpt,_,_} <- Spec],
+ Opts = [Opt || {_,Opt,_} <- Spec],
+ OptsVals = [{Opt,Val} || {_,Opt,Val} <- Spec],
+ Address =
+ case Family of
+ inet ->
+ {127,0,0,1};
+ inet6 ->
+ {0,0,0,0,0,0,0,1}
+ end,
+ %%
+ %% Set RecvOpts on listen socket
+ {ok,L} =
+ gen_tcp:listen(
+ 0,
+ [Family,binary,{active,false},{send_timeout,Timeout}
+ |TrueRecvOpts]),
+ {ok,P} = inet:port(L),
+ {ok,TrueRecvOpts} = inet:getopts(L, RecvOpts),
+ {ok,OptsValsDefault} = inet:getopts(L, Opts),
+ %%
+ %% Set RecvOpts and Option values on connect socket
+ {ok,S2} =
+ gen_tcp:connect(
+ Address, P,
+ [Family,binary,{active,false},{send_timeout,Timeout}
+ |TrueRecvOpts ++ OptsVals],
+ Timeout),
+ {ok,TrueRecvOpts} = inet:getopts(S2, RecvOpts),
+ {ok,OptsVals} = inet:getopts(S2, Opts),
+ %%
+ %% Accept socket inherits the options from listen socket
+ {ok,S1} = gen_tcp:accept(L, Timeout),
+ {ok,TrueRecvOpts} = inet:getopts(S1, RecvOpts),
+ {ok,OptsValsDefault} = inet:getopts(S1, Opts),
+%%% %%
+%%% %% Handshake
+%%% ok = gen_tcp:send(S1, <<"hello">>),
+%%% {ok,<<"hello">>} = gen_tcp:recv(S2, 5, Timeout),
+%%% ok = gen_tcp:send(S2, <<"hi">>),
+%%% {ok,<<"hi">>} = gen_tcp:recv(S1, 2, Timeout),
+ %%
+ %% Verify returned remote options
+ VerifyRemOpts =
+ fun(S, Role) ->
+ case inet:getopts(S, [pktoptions]) of
+ {ok, [{pktoptions, PktOpts1}]} ->
+ PktOpts1;
+ {ok, UnexpOK1} ->
+ io:format("Unexpected OK (~w): "
+ "~n ~p"
+ "~n", [Role, UnexpOK1]),
+ exit({unexpected_getopts_ok,
+ Role,
+ Spec,
+ TrueRecvOpts,
+ OptsVals,
+ OptsValsDefault,
+ UnexpOK1});
+ {error, UnexpERR1} ->
+ io:format("Unexpected ERROR (~w): "
+ "~n ~p"
+ "~n", [Role, UnexpERR1]),
+ exit({unexpected_getopts_failure,
+ Role,
+ Spec,
+ TrueRecvOpts,
+ OptsVals,
+ OptsValsDefault,
+ UnexpERR1})
+ end
+ end,
+ OptsVals1 = VerifyRemOpts(S1, dest),
+ OptsVals2 = VerifyRemOpts(S2, orig),
+ %% {ok,[{pktoptions,OptsVals1}]} = inet:getopts(S1, [pktoptions]),
+ %% {ok,[{pktoptions,OptsVals2}]} = inet:getopts(S2, [pktoptions]),
+ (Result1 = sets_eq(OptsVals1, OptsVals))
+ orelse io:format(
+ "Accept differs: ~p neq ~p~n", [OptsVals1,OptsVals]),
+ (Result2 = sets_eq(OptsVals2, OptsValsDefault))
+ orelse io:format(
+ "Connect differs: ~p neq ~p~n",
+ [OptsVals2,OptsValsDefault]),
+ %%
+ ok = gen_tcp:close(S2),
+ ok = gen_tcp:close(S1),
+ %%
+ %%
+ %% Clear RecvOpts on listen socket and set Option values
+ ok = inet:setopts(L, FalseRecvOpts ++ OptsVals),
+ {ok,FalseRecvOpts} = inet:getopts(L, RecvOpts),
+ {ok,OptsVals} = inet:getopts(L, Opts),
+ %%
+ %% Set RecvOpts on connecting socket
+ %%
+ {ok,S4} =
+ gen_tcp:connect(
+ Address, P,
+ [Family,binary,{active,false},{send_timeout,Timeout}
+ |TrueRecvOpts],
+ Timeout),
+ {ok,TrueRecvOpts} = inet:getopts(S4, RecvOpts),
+ {ok,OptsValsDefault} = inet:getopts(S4, Opts),
+ %%
+ %% Accept socket inherits the options from listen socket
+ {ok,S3} = gen_tcp:accept(L, Timeout),
+ {ok,FalseRecvOpts} = inet:getopts(S3, RecvOpts),
+ {ok,OptsVals} = inet:getopts(S3, Opts),
+ %%
+ %% Verify returned remote options
+ {ok,[{pktoptions,[]}]} = inet:getopts(S3, [pktoptions]),
+ {ok,[{pktoptions,OptsVals4}]} = inet:getopts(S4, [pktoptions]),
+ (Result3 = sets_eq(OptsVals4, OptsVals))
+ orelse io:format(
+ "Accept2 differs: ~p neq ~p~n", [OptsVals4,OptsVals]),
+ %%
+ ok = gen_tcp:close(S4),
+ ok = gen_tcp:close(S3),
+ ok = gen_tcp:close(L),
+ (Result1 and ((not CheckConnect) or (Result2 and Result3)))
+ orelse
+ exit({failed,
+ [{OptsVals1,OptsVals4,OptsVals},
+ {OptsVals2,OptsValsDefault}],
+ {OSType,OSVer}}),
+%% exit({{OSType,OSVer},success}), % In search for the truth
+ ok.
+
+sets_eq(L1, L2) ->
+ lists:sort(L1) == lists:sort(L2).
+
+
+
%% Accept test utilities (suites are below)
millis() ->
- erlang:monotonic_time(milli_seconds).
+ erlang:monotonic_time(millisecond).
collect_accepts(0,_) -> [];
collect_accepts(N,Tmo) ->
A = millis(),
receive
{accepted,P,Msg} ->
- [{P,Msg}] ++ collect_accepts(N-1,Tmo-(millis() - A))
+ NextN = if N =:= infinity -> N; true -> N - 1 end,
+ [{P,Msg}] ++ collect_accepts(NextN, Tmo - (millis()-A))
after Tmo ->
[]
end.
-define(EXPECT_ACCEPTS(Pattern,N,Timeout),
(fun() ->
- case collect_accepts(if N =:= infinity -> -1; true -> N end,Timeout) of
+ case collect_accepts((N), (Timeout)) of
Pattern ->
ok;
- Other ->
- {error,{unexpected,{Other,process_info(self(),messages)}}}
+ Other__ ->
+ {error,{unexpected,{Other__,process_info(self(),messages)}}}
end
end)()).
@@ -2202,7 +2506,7 @@ wait_until_accepting(Proc,0) ->
exit({timeout_waiting_for_accepting,Proc});
wait_until_accepting(Proc,N) ->
case process_info(Proc,current_function) of
- {current_function,{prim_inet,accept0,2}} ->
+ {current_function,{prim_inet,accept0,3}} ->
case process_info(Proc,status) of
{status,waiting} ->
ok;
@@ -2317,7 +2621,51 @@ active_once_closed(Config) when is_list(Config) ->
ok = inet:setopts(A,[{active,once}]),
ok = receive {tcp_closed, A} -> ok after 1000 -> error end
end)().
-
+
+%% Check that active n and tcp_close messages behave as expected.
+active_n_closed(Config) when is_list(Config) ->
+ {ok, L} = gen_tcp:listen(0, [binary, {active, false}]),
+
+ P = self(),
+
+ {ok,Port} = inet:port(L),
+
+ spawn_link(fun() ->
+ Payload = <<0:50000/unit:8>>,
+ Cnt = 10000,
+ P ! {size,Cnt * byte_size(Payload)},
+ {ok, S} = gen_tcp:connect("localhost", Port, [binary, {active, false}]),
+ _ = [gen_tcp:send(S, Payload) || _ <- lists:seq(1, Cnt)],
+ gen_tcp:close(S)
+ end),
+
+ receive {size,SendSize} -> SendSize end,
+ {ok, S} = gen_tcp:accept(L),
+ inet:setopts(S, [{active, 10}]),
+ RecvSize =
+ (fun Server(Size) ->
+ receive
+ {tcp, S, Bin} ->
+ Server(byte_size(Bin) + Size);
+ {tcp_closed, S} ->
+ Size;
+ {tcp_passive, S} ->
+ inet:setopts(S, [{active, 10}]),
+ Server(Size);
+ Msg ->
+ io:format("~p~n", [Msg]),
+ Server(Size)
+ end
+ end)(0),
+
+ gen_tcp:close(L),
+
+ if SendSize =:= RecvSize ->
+ ok;
+ true ->
+ ct:fail("Send and Recv size not equal: ~p ~p",[SendSize, RecvSize])
+ end.
+
%% Test the send_timeout socket option.
send_timeout(Config) when is_list(Config) ->
Dir = filename:dirname(code:which(?MODULE)),
@@ -3014,3 +3362,211 @@ ok({ok,V}) -> V.
get_hostname(Name) ->
"@"++Host = lists:dropwhile(fun(C) -> C =/= $@ end, atom_to_list(Name)),
Host.
+
+otp_13939(doc) ->
+ ["Check that writing to a remotely closed socket doesn't block forever "
+ "when exit_on_close is false."];
+otp_13939(suite) ->
+ [];
+otp_13939(Config) when is_list(Config) ->
+ {Pid, Ref} = spawn_opt(
+ fun() ->
+ {ok, Listener} = gen_tcp:listen(0, [{exit_on_close, false}]),
+ {ok, Port} = inet:port(Listener),
+
+ spawn_link(
+ fun() ->
+ {ok, Client} = gen_tcp:connect("localhost", Port,
+ [{active, false}]),
+ ok = gen_tcp:close(Client)
+ end),
+
+ {ok, Accepted} = gen_tcp:accept(Listener),
+
+ ok = gen_tcp:send(Accepted, <<0:(10*1024*1024*8)>>),
+
+ %% The bug surfaces when there's a delay between the send
+ %% operations; inet:getstat is a red herring.
+ timer:sleep(100),
+
+ {error, Code} = gen_tcp:send(Accepted, <<0:(10*1024*1024*8)>>),
+ ct:pal("gen_tcp:send returned ~p~n", [Code])
+ end, [link, monitor]),
+
+ receive
+ {'DOWN', Ref, process, Pid, normal} ->
+ ok
+ after 1000 ->
+ demonitor(Ref, [flush]),
+ exit(Pid, normal),
+ ct:fail("Server process blocked on send.")
+ end.
+
+otp_12242(Config) when is_list(Config) ->
+ case os:type() of
+ {win32,_} ->
+ %% Even if we set sndbuf and recbuf to small sizes
+ %% Windows either happily accepts to send GBytes of data
+ %% in no time, so the second send below that is supposed
+ %% to time out just succedes, or the first send that
+ %% is supposed to fill the inet_drv I/O queue and
+ %% start waiting for when more data can be sent
+ %% instead sends all data but suffers a send
+ %% failure that closes the socket
+ {skipped,backpressure_broken_on_win32};
+ _ ->
+ %% Find the IPv4 address of an up and running interface
+ %% that is not loopback nor pointtopoint
+ {ok,IFList} = inet:getifaddrs(),
+ ct:pal("IFList ~p~n", [IFList]),
+ case
+ lists:flatten(
+ [lists:filtermap(
+ fun ({addr,Addr}) when tuple_size(Addr) =:= 4 ->
+ {true,Addr};
+ (_) ->
+ false
+ end, Opts)
+ || {_,Opts} <- IFList,
+ case lists:keyfind(flags, 1, Opts) of
+ {_,Flags} ->
+ lists:member(up, Flags)
+ andalso
+ lists:member(running, Flags)
+ andalso
+ not lists:member(loopback, Flags)
+ andalso
+ not lists:member(pointtopoint, Flags);
+ false ->
+ false
+ end])
+ of
+ [Addr|_] ->
+ otp_12242(Addr);
+ Other ->
+ {skipped,{no_external_address,Other}}
+ end
+ end;
+%%
+otp_12242(Addr) when tuple_size(Addr) =:= 4 ->
+ ct:timetrap(30000),
+ ct:pal("Using address ~p~n", [Addr]),
+ Bufsize = 16 * 1024,
+ Datasize = 128 * 1024 * 1024, % At least 1 s on GBit interface
+ Blob = binary:copy(<<$x>>, Datasize),
+ LOpts =
+ [{backlog,4},{reuseaddr,true},{ip,Addr},
+ binary,{active,false},
+ {recbuf,Bufsize},{sndbuf,Bufsize},{buffer,Bufsize}],
+ COpts =
+ [binary,{active,false},{ip,Addr},
+ {linger,{true,1}}, % 1 s
+ {send_timeout,500},
+ {recbuf,Bufsize},{sndbuf,Bufsize},{buffer,Bufsize}],
+ Dir = filename:dirname(code:which(?MODULE)),
+ {ok,ListenerNode} =
+ test_server:start_node(
+ ?UNIQ_NODE_NAME, slave, [{args,"-pa " ++ Dir}]),
+ Tester = self(),
+ Listener =
+ spawn(
+ ListenerNode,
+ fun () ->
+ {ok,L} = gen_tcp:listen(0, LOpts),
+ {ok,LPort} = inet:port(L),
+ Tester ! {self(),port,LPort},
+ {ok,A} = gen_tcp:accept(L),
+ ok = gen_tcp:close(L),
+ receive
+ {Tester,stop} ->
+ ok = gen_tcp:close(A)
+ end
+ end),
+ ListenerMref = monitor(process, Listener),
+ LPort = receive {Listener,port,P} -> P end,
+ {ok,C} = gen_tcp:connect(Addr, LPort, COpts, infinity),
+ {ok,ReadCOpts} = inet:getopts(C, [recbuf,sndbuf,buffer]),
+ ct:pal("ReadCOpts ~p~n", [ReadCOpts]),
+ %%
+ %% Fill the buffers
+ ct:pal("Sending ~p bytes~n", [Datasize]),
+ ok = gen_tcp:send(C, Blob),
+ ct:pal("Sent ~p bytes~n", [Datasize]),
+ %% Spawn the Closer,
+ %% try to ensure that the close call is in progress
+ %% before the owner proceeds with sending
+ Owner = self(),
+ {_Closer,CloserMref} =
+ spawn_opt(
+ fun () ->
+ Owner ! {tref, erlang:start_timer(50, Owner, closing)},
+ ct:pal("Calling gen_tcp:close(C)~n"),
+ try gen_tcp:close(C) of
+ Result ->
+ ct:pal("gen_tcp:close(C) -> ~p~n", [Result]),
+ ok = Result
+ catch
+ Class:Reason:Stacktrace ->
+ ct:pal(
+ "gen_tcp:close(C) >< ~p:~p~n ~p~n",
+ [Class,Reason,Stacktrace]),
+ erlang:raise(Class, Reason, Stacktrace)
+ end
+ end, [link,monitor]),
+ receive
+ {tref,Tref} ->
+ receive {timeout,Tref,_} -> ok end,
+ ct:pal("Sending ~p bytes again~n", [Datasize]),
+ %% Now should the close be in progress...
+ %% All buffers are full, remote end is not reading,
+ %% and the send timeout is 1 s so this will timeout:
+ {error,timeout} = gen_tcp:send(C, Blob),
+ ct:pal("Sending ~p bytes again timed out~n", [Datasize]),
+ ok = inet:setopts(C, [{send_timeout,10000}]),
+ %% There is a hidden timeout here. Port close is sampled
+ %% every 5 s by prim_inet:send_recv_reply.
+ %% Linger is 3 s so the Closer will finish this send:
+ ct:pal("Sending ~p bytes with 10 s timeout~n", [Datasize]),
+ {error,closed} = gen_tcp:send(C, Blob),
+ ct:pal("Sending ~p bytes with 10 s timeout was closed~n",
+ [Datasize]),
+ normal = wait(CloserMref),
+ ct:pal("The Closer has exited~n"),
+ Listener ! {Tester,stop},
+ receive {'DOWN',ListenerMref,_,_,_} -> ok end,
+ ct:pal("The Listener has exited~n"),
+ test_server:stop_node(ListenerNode),
+ ok
+ end.
+
+wait(Mref) ->
+ receive {'DOWN',Mref,_,_,Reason} -> Reason end.
+
+%% OTP-15536
+%% Test that send error works correctly for delay_send
+delay_send_error(_Config) ->
+ {ok, LS} = gen_tcp:listen(0, [{reuseaddr, true}, {packet, 1}, {active, false}]),
+ {ok,{{0,0,0,0},PortNum}}=inet:sockname(LS),
+ P = spawn_link(
+ fun() ->
+ {ok, S} = gen_tcp:accept(LS),
+ receive die -> gen_tcp:close(S) end
+ end),
+ erlang:monitor(process, P),
+ {ok, S} = gen_tcp:connect("localhost", PortNum,
+ [{packet, 1}, {active, false}, {delay_send, true}]),
+
+ %% Do a couple of sends first to see that it works
+ ok = gen_tcp:send(S, "hello"),
+ ok = gen_tcp:send(S, "hello"),
+ ok = gen_tcp:send(S, "hello"),
+
+ %% Make the receiver close
+ P ! die,
+ receive _Down -> ok end,
+
+ ok = gen_tcp:send(S, "hello"),
+ timer:sleep(500), %% Sleep in order for delay_send to have time to trigger
+
+ %% This used to result in a double free
+ {error, closed} = gen_tcp:send(S, "hello").
diff --git a/lib/kernel/test/gen_udp_SUITE.erl b/lib/kernel/test/gen_udp_SUITE.erl
index 1029d7ef0a..70d8caf478 100644
--- a/lib/kernel/test/gen_udp_SUITE.erl
+++ b/lib/kernel/test/gen_udp_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2019. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -34,8 +34,10 @@
-export([init_per_testcase/2, end_per_testcase/2]).
-export([send_to_closed/1, active_n/1,
- buffer_size/1, binary_passive_recv/1, bad_address/1,
+ buffer_size/1, binary_passive_recv/1, max_buffer_size/1, bad_address/1,
read_packets/1, open_fd/1, connect/1, implicit_inet6/1,
+ recvtos/1, recvtosttl/1, recvttl/1, recvtclass/1,
+ sendtos/1, sendtosttl/1, sendttl/1, sendtclass/1,
local_basic/1, local_unbound/1,
local_fdopen/1, local_fdopen_unbound/1, local_abstract/1]).
@@ -44,9 +46,11 @@ suite() ->
{timetrap,{minutes,1}}].
all() ->
- [send_to_closed, buffer_size, binary_passive_recv,
+ [send_to_closed, buffer_size, binary_passive_recv, max_buffer_size,
bad_address, read_packets, open_fd, connect,
implicit_inet6, active_n,
+ recvtos, recvtosttl, recvttl, recvtclass,
+ sendtos, sendtosttl, sendttl, sendtclass,
{group, local}].
groups() ->
@@ -237,6 +241,14 @@ buffer_size_server_recv(Socket, IP, Port, Cnt) ->
end.
+%%-------------------------------------------------------------
+%% OTP-15206: Keep buffer small for udp
+%%-------------------------------------------------------------
+max_buffer_size(Config) when is_list(Config) ->
+ {ok, Socket} = gen_udp:open(0, [binary]),
+ ok = inet:setopts(Socket,[{recbuf, 1 bsl 20}]),
+ {ok, [{buffer, 65536}]} = inet:getopts(Socket,[buffer]),
+ gen_udp:close(Socket).
%%-------------------------------------------------------------
%% OTP-3823 gen_udp:recv does not return address in binary mode
@@ -288,69 +300,56 @@ bad_address(Config) when is_list(Config) ->
%%
%% Starts a slave node that on command sends a bunch of messages
%% to our UDP port. The receiving process just receives and
-%% ignores the incoming messages, but counts them.
-%% A tracing process traces the receiving process for
-%% 'receive' and scheduling events. From the trace,
-%% message contents is verified; and, how many messages
-%% are received per in/out scheduling, which should be
-%% the same as the read_packets parameter.
-%%
-%% What happens on the SMP emulator remains to be seen...
-%%
+%% ignores the incoming messages.
+%% A tracing process traces the receiving port for
+%% 'send' and scheduling events. From the trace,
+%% how many messages are received per in/out scheduling,
+%% which should never be more than the read_packet parameter.
%% OTP-6249 UDP option for number of packet reads.
read_packets(Config) when is_list(Config) ->
- case erlang:system_info(smp_support) of
- false ->
- read_packets_1();
- true ->
- %% We would need some new sort of tracing to test this
- %% option reliably in an SMP emulator.
- {skip,"SMP emulator"}
- end.
-
-read_packets_1() ->
N1 = 5,
- N2 = 7,
+ N2 = 1,
+ Msgs = 30000,
{ok,R} = gen_udp:open(0, [{read_packets,N1}]),
{ok,RP} = inet:port(R),
{ok,Node} = start_node(gen_udp_SUITE_read_packets),
- Die = make_ref(),
- Loop = erlang:spawn_link(fun () -> infinite_loop(Die) end),
%%
- Msgs1 = [erlang:integer_to_list(M) || M <- lists:seq(1, N1*3)],
- [V1|_] = read_packets_test(R, RP, Msgs1, Node),
+ {V1, Trace1} = read_packets_test(R, RP, Msgs, Node),
{ok,[{read_packets,N1}]} = inet:getopts(R, [read_packets]),
%%
ok = inet:setopts(R, [{read_packets,N2}]),
- Msgs2 = [erlang:integer_to_list(M) || M <- lists:seq(1, N2*3)],
- [V2|_] = read_packets_test(R, RP, Msgs2, Node),
+ {V2, Trace2} = read_packets_test(R, RP, Msgs, Node),
{ok,[{read_packets,N2}]} = inet:getopts(R, [read_packets]),
%%
stop_node(Node),
- Mref = erlang:monitor(process, Loop),
- Loop ! Die,
- receive
- {'DOWN',Mref,_,_, normal} ->
- case {V1,V2} of
- {N1,N2} ->
- ok;
- _ when V1 =/= N1, V2 =/= N2 ->
- ok
- end
+ ct:log("N1=~p, V1=~p vs N2=~p, V2=~p",[N1,V1,N2,V2]),
+
+ dump_terms(Config, "trace1.terms", Trace1),
+ dump_terms(Config, "trace2.terms", Trace2),
+
+ %% Because of the inherit racy-ness of the feature it is
+ %% hard to test that it behaves correctly.
+ %% Right now (OTP 21) a port task takes 5% of the
+ %% allotted port task reductions to execute, so
+ %% the max number of executions a port is allowed to
+ %% do before being re-scheduled is N * 20
+
+ if
+ V1 > (N1 * 20) ->
+ ct:fail("Got ~p msgs, max was ~p", [V1, N1]);
+ V2 > (N2 * 20) ->
+ ct:fail("Got ~p msgs, max was ~p", [V2, N2]);
+ true ->
+ ok
end.
-infinite_loop(Die) ->
- receive
- Die ->
- ok
- after
- 0 ->
- infinite_loop(Die)
- end.
+dump_terms(Config, Name, Terms) ->
+ FName = filename:join(proplists:get_value(priv_dir, Config),Name),
+ file:write_file(FName, term_to_binary(Terms)),
+ ct:log("Logged terms to ~s",[FName]).
read_packets_test(R, RP, Msgs, Node) ->
- Len = length(Msgs),
Receiver = self(),
Tracer =
spawn_link(
@@ -375,24 +374,24 @@ read_packets_test(R, RP, Msgs, Node) ->
[link,{priority,high}]),
receive
{Sender,{port,SP}} ->
- erlang:trace(self(), true,
- [running,'receive',{tracer,Tracer}]),
+ erlang:trace(R, true,
+ [running_ports,'send',{tracer,Tracer}]),
erlang:yield(),
Sender ! {Receiver,go},
- read_packets_recv(Len),
- erlang:trace(self(), false, [all]),
+ read_packets_recv(Msgs),
+ erlang:trace(R, false, [all]),
Tracer ! {Receiver,get_trace},
receive
{Tracer,{trace,Trace}} ->
- read_packets_verify(R, SP, Msgs, Trace)
+ {read_packets_verify(R, SP, Trace), Trace}
end
end.
-read_packets_send(S, RP, [Msg|Msgs]) ->
- ok = gen_udp:send(S, localhost, RP, Msg),
- read_packets_send(S, RP, Msgs);
-read_packets_send(_S, _RP, []) ->
- ok.
+read_packets_send(_S, _RP, 0) ->
+ ok;
+read_packets_send(S, RP, Msgs) ->
+ ok = gen_udp:send(S, localhost, RP, "UDP FLOOOOOOD"),
+ read_packets_send(S, RP, Msgs - 1).
read_packets_recv(0) ->
ok;
@@ -404,23 +403,24 @@ read_packets_recv(N) ->
timeout
end.
-read_packets_verify(R, SP, Msg, Trace) ->
- lists:reverse(
- lists:sort(read_packets_verify(R, SP, Msg, Trace, 0))).
-
-read_packets_verify(R, SP, Msgs, [{trace,Self,OutIn,_}|Trace], M)
- when Self =:= self(), OutIn =:= out;
- Self =:= self(), OutIn =:= in ->
- push(M, read_packets_verify(R, SP, Msgs, Trace, 0));
-read_packets_verify(R, SP, [Msg|Msgs],
- [{trace,Self,'receive',{udp,R,{127,0,0,1},SP,Msg}}
- |Trace], M)
+read_packets_verify(R, SP, Trace) ->
+ [Max | _] = Pkts = lists:reverse(lists:sort(read_packets_verify(R, SP, Trace, 0))),
+ ct:pal("~p",[lists:sublist(Pkts,10)]),
+ Max.
+
+read_packets_verify(R, SP, [{trace,R,OutIn,_}|Trace], M)
+ when OutIn =:= out; OutIn =:= in ->
+ push(M, read_packets_verify(R, SP, Trace, 0));
+read_packets_verify(R, SP, [{trace, R,'receive',timeout}|Trace], M) ->
+ push(M, read_packets_verify(R, SP, Trace, 0));
+read_packets_verify(R, SP,
+ [{trace,R,'send',{udp,R,{127,0,0,1},SP,_Msg}, Self} | Trace], M)
when Self =:= self() ->
- read_packets_verify(R, SP, Msgs, Trace, M+1);
-read_packets_verify(_R, _SP, [], [], M) ->
+ read_packets_verify(R, SP, Trace, M+1);
+read_packets_verify(_R, _SP, [], M) ->
push(M, []);
-read_packets_verify(_R, _SP, Msgs, Trace, M) ->
- ct:fail({read_packets_verify,mismatch,Msgs,Trace,M}).
+read_packets_verify(_R, _SP, Trace, M) ->
+ ct:fail({read_packets_verify,mismatch,Trace,M}).
push(0, Vs) ->
Vs;
@@ -566,6 +566,277 @@ active_n(Config) when is_list(Config) ->
ok.
+
+recvtos(_Config) ->
+ test_recv_opts(
+ inet, [{recvtos,tos,96}], false,
+ fun recvtos_ok/2).
+
+recvtosttl(_Config) ->
+ test_recv_opts(
+ inet, [{recvtos,tos,96},{recvttl,ttl,33}], false,
+ fun (OSType, OSVer) ->
+ recvtos_ok(OSType, OSVer) andalso recvttl_ok(OSType, OSVer)
+ end).
+
+recvttl(_Config) ->
+ test_recv_opts(
+ inet, [{recvttl,ttl,33}], false,
+ fun recvttl_ok/2).
+
+recvtclass(_Config) ->
+ {ok,IFs} = inet:getifaddrs(),
+ case
+ [Name ||
+ {Name,Opts} <- IFs,
+ lists:member({addr,{0,0,0,0,0,0,0,1}}, Opts)]
+ of
+ [_] ->
+ test_recv_opts(
+ inet6, [{recvtclass,tclass,224}], false,
+ fun recvtclass_ok/2);
+ [] ->
+ {skip,ipv6_not_supported,IFs}
+ end.
+
+
+sendtos(_Config) ->
+ test_recv_opts(
+ inet, [{recvtos,tos,96}], true,
+ fun sendtos_ok/2).
+
+sendtosttl(_Config) ->
+ test_recv_opts(
+ inet, [{recvtos,tos,96},{recvttl,ttl,33}], true,
+ fun (OSType, OSVer) ->
+ sendtos_ok(OSType, OSVer) andalso sendttl_ok(OSType, OSVer)
+ end).
+
+sendttl(_Config) ->
+ test_recv_opts(
+ inet, [{recvttl,ttl,33}], true,
+ fun sendttl_ok/2).
+
+sendtclass(_Config) ->
+ {ok,IFs} = inet:getifaddrs(),
+ case
+ [Name ||
+ {Name,Opts} <- IFs,
+ lists:member({addr,{0,0,0,0,0,0,0,1}}, Opts)]
+ of
+ [_] ->
+ test_recv_opts(
+ inet6, [{recvtclass,tclass,224}], true,
+ fun sendtclass_ok/2);
+ [] ->
+ {skip,ipv6_not_supported,IFs}
+ end.
+
+%% These version numbers are just above the highest noted in daily tests
+%% where the test fails for a plausible reason, that is the lowest
+%% where we can expect that the test might succeed, so
+%% skip on platforms lower than this.
+%%
+%% On newer versions it might be fixed, but we'll see about that
+%% when machines with newer versions gets installed...
+%% If the test still fails for a plausible reason these
+%% version numbers simply should be increased.
+%% Or maybe we should change to only test on known good platforms?
+
+%% Using the option returns einval, so it is not implemented.
+recvtos_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {17,6,0});
+%% Using the option returns einval, so it is not implemented.
+recvtos_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,6,0});
+%% Using the option returns einval, so it is not implemented.
+recvtos_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
+%%
+recvtos_ok({unix,_}, _) -> true;
+recvtos_ok(_, _) -> false.
+
+%% Option has no effect
+recvttl_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
+%%
+recvttl_ok({unix,_}, _) -> true;
+recvttl_ok(_, _) -> false.
+
+%% Using the option returns einval, so it is not implemented.
+recvtclass_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {9,9,0});
+recvtclass_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {2,6,11});
+%% Option has no effect
+recvtclass_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
+%%
+recvtclass_ok({unix,_}, _) -> true;
+recvtclass_ok(_, _) -> false.
+
+
+%% To send ancillary data seems to require much higher version numbers
+%% than receiving it...
+%%
+
+%% Using the option returns einval, so it is not implemented.
+sendtos_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {19,0,0});
+sendtos_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,6,0});
+sendtos_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
+sendtos_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {4,0,0});
+sendtos_ok({unix,freebsd}, OSVer) -> not semver_lt(OSVer, {12,1,0});
+%%
+sendtos_ok({unix,_}, _) -> true;
+sendtos_ok(_, _) -> false.
+
+%% Using the option returns einval, so it is not implemented.
+sendttl_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {19,0,0});
+sendttl_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {4,0,0});
+%% Using the option returns enoprotoopt, so it is not implemented.
+sendttl_ok({unix,freebsd}, OSVer) -> not semver_lt(OSVer, {12,1,0});
+%% Option has no effect
+sendttl_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
+sendttl_ok({unix,openbsd}, OSVer) -> not semver_lt(OSVer, {6,6,0});
+%%
+sendttl_ok({unix,_}, _) -> true;
+sendttl_ok(_, _) -> false.
+
+%% Using the option returns einval, so it is not implemented.
+sendtclass_ok({unix,darwin}, OSVer) -> not semver_lt(OSVer, {9,9,0});
+sendtclass_ok({unix,linux}, OSVer) -> not semver_lt(OSVer, {2,6,11});
+%% Option has no effect
+sendtclass_ok({unix,sunos}, OSVer) -> not semver_lt(OSVer, {5,12,0});
+%%
+sendtclass_ok({unix,_}, _) -> true;
+sendtclass_ok(_, _) -> false.
+
+
+semver_lt({X1,Y1,Z1}, {X2,Y2,Z2}) ->
+ if
+ X1 > X2 -> false;
+ X1 < X2 -> true;
+ Y1 > Y2 -> false;
+ Y1 < Y2 -> true;
+ Z1 > Z2 -> false;
+ Z1 < Z2 -> true;
+ true -> false
+ end;
+semver_lt(_, {_,_,_}) -> false.
+
+test_recv_opts(Family, Spec, TestSend, OSFilter) ->
+ OSType = os:type(),
+ OSVer = os:version(),
+ case OSFilter(OSType, OSVer) of
+ true ->
+ io:format("Os: ~p, ~p~n", [OSType,OSVer]),
+ test_recv_opts(Family, Spec, TestSend, OSType, OSVer);
+ false ->
+ {skip,{not_supported_for_os_version,{OSType,OSVer}}}
+ end.
+%%
+test_recv_opts(Family, Spec, TestSend, _OSType, _OSVer) ->
+ Timeout = 5000,
+ RecvOpts = [RecvOpt || {RecvOpt,_,_} <- Spec],
+ TrueRecvOpts = [{RecvOpt,true} || {RecvOpt,_,_} <- Spec],
+ FalseRecvOpts = [{RecvOpt,false} || {RecvOpt,_,_} <- Spec],
+ Opts = [Opt || {_,Opt,_} <- Spec],
+ OptsVals = [{Opt,Val} || {_,Opt,Val} <- Spec],
+ TrueRecvOpts_OptsVals = TrueRecvOpts ++ OptsVals,
+ Addr =
+ case Family of
+ inet ->
+ {127,0,0,1};
+ inet6 ->
+ {0,0,0,0,0,0,0,1}
+ end,
+ %%
+ {ok,S1} =
+ gen_udp:open(0, [Family,binary,{active,false}|TrueRecvOpts]),
+ {ok,P1} = inet:port(S1),
+ {ok,TrueRecvOpts} = inet:getopts(S1, RecvOpts),
+ ok = inet:setopts(S1, FalseRecvOpts),
+ {ok,FalseRecvOpts} = inet:getopts(S1, RecvOpts),
+ ok = inet:setopts(S1, TrueRecvOpts_OptsVals),
+ {ok,TrueRecvOpts_OptsVals} = inet:getopts(S1, RecvOpts ++ Opts),
+ %%
+ %% S1 now has true receive options and set option values
+ %%
+ {ok,S2} =
+ gen_udp:open(0, [Family,binary,{active,true}|FalseRecvOpts]),
+ {ok,P2} = inet:port(S2),
+ {ok,FalseRecvOpts_OptsVals2} = inet:getopts(S2, RecvOpts ++ Opts),
+ OptsVals2 = FalseRecvOpts_OptsVals2 -- FalseRecvOpts,
+ %%
+ %% S2 now has false receive options and default option values,
+ %% OptsVals2 contains the default option values
+ %%
+ ok = gen_udp:send(S2, {Addr,P1}, <<"abcde">>),
+ ok = gen_udp:send(S1, Addr, P2, <<"fghij">>),
+ TestSend andalso
+ begin
+ ok = gen_udp:send(S2, Addr, P1, OptsVals, <<"ABCDE">>),
+ ok = gen_udp:send(S2, {Addr,P1}, OptsVals, <<"12345">>)
+ end,
+ {ok,{_,P2,OptsVals3,<<"abcde">>}} = gen_udp:recv(S1, 0, Timeout),
+ verify_sets_eq(OptsVals3, OptsVals2),
+ TestSend andalso
+ begin
+ {ok,{_,P2,OptsVals0,<<"ABCDE">>}} = gen_udp:recv(S1, 0, Timeout),
+ {ok,{_,P2,OptsVals1,<<"12345">>}} = gen_udp:recv(S1, 0, Timeout),
+ verify_sets_eq(OptsVals0, OptsVals),
+ verify_sets_eq(OptsVals1, OptsVals)
+ end,
+ receive
+ {udp,S2,_,P1,<<"fghij">>} ->
+ ok;
+ Other1 ->
+ exit({unexpected,Other1})
+ after Timeout ->
+ exit(timeout)
+ end,
+ %%
+ ok = inet:setopts(S1, FalseRecvOpts),
+ {ok,FalseRecvOpts} = inet:getopts(S1, RecvOpts),
+ ok = inet:setopts(S2, TrueRecvOpts),
+ {ok,TrueRecvOpts} = inet:getopts(S2, RecvOpts),
+ %%
+ %% S1 now has false receive options and set option values
+ %%
+ %% S2 now has true receive options and default option values
+ %%
+ ok = gen_udp:send(S2, {Addr,P1}, [], <<"klmno">>),
+ ok = gen_udp:send(S1, {Family,{loopback,P2}}, <<"pqrst">>),
+ TestSend andalso
+ begin
+ ok = gen_udp:send(S1, {Family,{loopback,P2}}, OptsVals2, <<"PQRST">>)
+ end,
+ {ok,{_,P2,<<"klmno">>}} = gen_udp:recv(S1, 0, Timeout),
+ receive
+ {udp,S2,_,P1,OptsVals4,<<"pqrst">>} ->
+ verify_sets_eq(OptsVals4, OptsVals);
+ Other2 ->
+ exit({unexpected,Other2})
+ after Timeout ->
+ exit(timeout)
+ end,
+ TestSend andalso
+ receive
+ {udp,S2,_,P1,OptsVals5,<<"PQRST">>} ->
+ verify_sets_eq(OptsVals5, OptsVals2);
+ Other3 ->
+ exit({unexpected,Other3})
+ after Timeout ->
+ exit(timeout)
+ end,
+ ok = gen_udp:close(S1),
+ ok = gen_udp:close(S2),
+%%% exit({{_OSType,_OSVer},success}), % In search for the truth
+ ok.
+
+verify_sets_eq(L1, L2) ->
+ L = lists:sort(L1),
+ case lists:sort(L2) of
+ L ->
+ ok;
+ _ ->
+ exit({sets_neq,L1,L2})
+ end.
+
+
local_basic(_Config) ->
SFile = local_filename(server),
SAddr = {local,bin_filename(SFile)},
@@ -707,6 +978,10 @@ connect(Config) when is_list(Config) ->
implicit_inet6(Config) when is_list(Config) ->
Host = ok(inet:gethostname()),
case inet:getaddr(Host, inet6) of
+ {ok,{16#fe80,0,0,0,_,_,_,_} = Addr} ->
+ {skip,
+ "Got link local IPv6 address: "
+ ++inet:ntoa(Addr)};
{ok,Addr} ->
implicit_inet6(Host, Addr);
{error,Reason} ->
@@ -717,9 +992,9 @@ implicit_inet6(Config) when is_list(Config) ->
implicit_inet6(Host, Addr) ->
Active = {active,false},
- case gen_udp:open(0, [inet6,Active]) of
+ Loopback = {0,0,0,0,0,0,0,1},
+ case gen_udp:open(0, [inet6,Active,{ip, Loopback}]) of
{ok,S1} ->
- Loopback = {0,0,0,0,0,0,0,1},
io:format("~s ~p~n", ["::1",Loopback]),
implicit_inet6(S1, Active, Loopback),
ok = gen_udp:close(S1),
@@ -757,11 +1032,12 @@ ok({ok,V}) -> V;
ok(NotOk) ->
try throw(not_ok)
catch
- Thrown ->
- erlang:raise(
- error, {Thrown, NotOk}, tl(erlang:get_stacktrace()))
+ throw:not_ok:Stacktrace ->
+ raise_error({not_ok, NotOk}, tl(Stacktrace))
end.
+raise_error(Reason, Stacktrace) ->
+ erlang:raise(error, Reason, Stacktrace).
local_filename(Tag) ->
"/tmp/" ?MODULE_STRING "_" ++ os:getpid() ++ "_" ++ atom_to_list(Tag).
diff --git a/lib/kernel/test/global_SUITE.erl b/lib/kernel/test/global_SUITE.erl
index 0a7f73c344..5bff9cc292 100644
--- a/lib/kernel/test/global_SUITE.erl
+++ b/lib/kernel/test/global_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2019. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -1383,7 +1383,7 @@ ring(Config) when is_list(Config) ->
rpc_cast(Cp8, ?MODULE, single_node, [Time, Cp7, Config]),
%% sleep to make the partitioned net ready
- ct:sleep(Time - msec()),
+ sleep(Time - msec()),
pong = net_adm:ping(Cp0),
pong = net_adm:ping(Cp1),
@@ -1466,7 +1466,7 @@ simple_ring(Config) when is_list(Config) ->
rpc_cast(Cp5, ?MODULE, single_node, [Time, Cp4, Config]),
%% sleep to make the partitioned net ready
- ct:sleep(Time - msec()),
+ sleep(Time - msec()),
pong = net_adm:ping(Cp0),
pong = net_adm:ping(Cp1),
@@ -1542,7 +1542,7 @@ line(Config) when is_list(Config) ->
rpc_cast(Cp8, ?MODULE, single_node, [Time, Cp7, Config]),
%% Sleep to make the partitioned net ready
- ct:sleep(Time - msec()),
+ sleep(Time - msec()),
pong = net_adm:ping(Cp0),
pong = net_adm:ping(Cp1),
@@ -1626,7 +1626,7 @@ simple_line(Config) when is_list(Config) ->
rpc_cast(Cp5, ?MODULE, single_node, [Time, Cp4, Config]),
%% sleep to make the partitioned net ready
- ct:sleep(Time - msec()),
+ sleep(Time - msec()),
pong = net_adm:ping(Cp0),
pong = net_adm:ping(Cp1),
@@ -2512,8 +2512,10 @@ re_register_name(Config) when is_list(Config) ->
Me = self(),
Pid1 = spawn(fun() -> proc(Me) end),
yes = global:register_name(name, Pid1),
+ wait_for_monitor(Pid1),
Pid2 = spawn(fun() -> proc(Me) end),
_ = global:re_register_name(name, Pid2),
+ wait_for_monitor(Pid2),
Pid2 ! die,
Pid1 ! die,
receive {Pid1, MonitoredBy1} -> [] = MonitoredBy1 end,
@@ -2522,6 +2524,15 @@ re_register_name(Config) when is_list(Config) ->
init_condition(Config),
ok.
+wait_for_monitor(Pid) ->
+ case process_info(Pid, monitored_by) of
+ {monitored_by, []} ->
+ timer:sleep(1),
+ wait_for_monitor(Pid);
+ {monitored_by, [_]} ->
+ ok
+ end.
+
proc(Parent) ->
receive die -> ok end,
{monitored_by, MonitoredBy} = process_info(self(), monitored_by),
@@ -3470,8 +3481,8 @@ start_procs(Parent, N1, N2, N3, Config) ->
Pid6 = rpc:call(N3, ?MODULE, start_proc3, [test4]),
assert_pid(Pid6),
yes = global:register_name(test1, Pid3),
- yes = global:register_name(test2, Pid4, {global, notify_all_name}),
- yes = global:register_name(test3, Pid5, {global, random_notify_name}),
+ yes = global:register_name(test2, Pid4, fun global:notify_all_name/3),
+ yes = global:register_name(test3, Pid5, fun global:random_notify_name/3),
Resolve = fun(Name, Pid1, Pid2) ->
Parent ! {resolve_called, Name, node()},
{Min, Max} = minmax(Pid1, Pid2),
@@ -3546,7 +3557,7 @@ start_proc_basic(Name) ->
end.
init_proc_basic(Parent, Name) ->
- X = global:register_name(Name, self(), {?MODULE, fix_basic_name}),
+ X = global:register_name(Name, self(), fun ?MODULE:fix_basic_name/3),
Parent ! {self(),X},
loop().
@@ -3555,7 +3566,7 @@ single_node(Time, Node, Config) ->
lists:foreach(fun(N) -> _ = erlang:disconnect_node(N) end, nodes()),
?UNTIL(get_known(node()) =:= [node()]),
spawn(?MODULE, init_2, []),
- ct:sleep(Time - msec()),
+ sleep(Time - msec()),
net_adm:ping(Node).
init_2() ->
@@ -3791,15 +3802,6 @@ stop() ->
test_server:stop_node(Node)
end, nodes()).
-dbg_logs(Name) -> dbg_logs(Name, ?NODES).
-
-dbg_logs(Name, Nodes) ->
- lists:foreach(fun(N) ->
- F = lists:concat([Name, ".log.", N, ".txt"]),
- ok = sys:log_to_file({global_name_server, N}, F)
- end, Nodes).
-
-
%% Tests that locally loaded nodes do not loose contact with other nodes.
global_lost_nodes(Config) when is_list(Config) ->
Timeout = 60,
@@ -4018,13 +4020,6 @@ collect_nodes(N, Max) ->
[Node | collect_nodes(N+1, Max)]
end.
-only_element(_E, []) ->
- true;
-only_element(E, [E|R]) ->
- only_element(E, R);
-only_element(_E, _) ->
- false.
-
exit_p(Pid) ->
Ref = erlang:monitor(process, Pid),
Pid ! die,
@@ -4047,6 +4042,11 @@ wait_for_exit_fast(Pid) ->
ok
end.
+sleep(Time) when Time > 0 ->
+ ct:sleep(Time);
+sleep(_Time) ->
+ ok.
+
check_everywhere(Nodes, Name, Config) ->
?UNTIL(begin
case rpc:multicall(Nodes, global, whereis_name, [Name]) of
@@ -4171,10 +4171,10 @@ rpc_cast(Node, Module, Function, Args, File) ->
%% The emulator now ensures that the node has been removed from
%% nodes().
-rpc_disconnect_node(Node, DisconnectedNode, _Config) ->
- True = rpc:call(Node, erlang, disconnect_node, [DisconnectedNode]),
- False = lists:member(DisconnectedNode, rpc:call(Node, erlang, nodes, [])),
- {true, false} = {True, False}.
+rpc_disconnect_node(Node, DisconnectedNode, Config) ->
+ true = rpc:call(Node, erlang, disconnect_node, [DisconnectedNode]),
+ ?UNTIL
+ (not lists:member(DisconnectedNode, rpc:call(Node, erlang, nodes, []))).
%%%
%%% Utility
diff --git a/lib/kernel/test/heart_SUITE.erl b/lib/kernel/test/heart_SUITE.erl
index 45032faf6d..f5ca6d0e1d 100644
--- a/lib/kernel/test/heart_SUITE.erl
+++ b/lib/kernel/test/heart_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -168,7 +168,7 @@ reboot(Config) when is_list(Config) ->
{ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
ok = rpc:call(Node, heart, set_cmd,
- [atom_to_list(lib:progname()) ++
+ [ct:get_progname() ++
" -noshell -heart " ++ name(Node) ++ "&"]),
rpc:call(Node, init, reboot, []),
receive
@@ -203,7 +203,7 @@ node_start_immediately_after_crash_test(Config) when is_list(Config) ->
[{"ERL_CRASH_DUMP_SECONDS", "0"}]),
ok = rpc:call(Node, heart, set_cmd,
- [atom_to_list(lib:progname()) ++
+ [ct:get_progname() ++
" -noshell -heart " ++ name(Node) ++ "&"]),
Mod = exhaust_atoms,
@@ -254,7 +254,7 @@ node_start_soon_after_crash_test(Config) when is_list(Config) ->
[{"ERL_CRASH_DUMP_SECONDS", "10"}]),
ok = rpc:call(Node, heart, set_cmd,
- [atom_to_list(lib:progname()) ++
+ [ct:get_progname() ++
" -noshell -heart " ++ name(Node) ++ "&"]),
Mod = exhaust_atoms,
@@ -309,7 +309,7 @@ set_cmd(Config) when is_list(Config) ->
clear_cmd(Config) when is_list(Config) ->
{ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
ok = rpc:call(Node, heart, set_cmd,
- [atom_to_list(lib:progname()) ++
+ [ct:get_progname() ++
" -noshell -heart " ++ name(Node) ++ "&"]),
rpc:call(Node, init, reboot, []),
receive
@@ -346,9 +346,16 @@ clear_cmd(Config) when is_list(Config) ->
get_cmd(Config) when is_list(Config) ->
{ok, Node} = start_check(slave, ?UNIQ_NODE_NAME),
- Cmd = "test",
- ok = rpc:call(Node, heart, set_cmd, [Cmd]),
- {ok, Cmd} = rpc:call(Node, heart, get_cmd, []),
+
+ ShortCmd = "test",
+ ok = rpc:call(Node, heart, set_cmd, [ShortCmd]),
+ {ok, ShortCmd} = rpc:call(Node, heart, get_cmd, []),
+
+ %% This would hang prior to OTP-15024 being fixed.
+ LongCmd = [$a || _ <- lists:seq(1, 160)],
+ ok = rpc:call(Node, heart, set_cmd, [LongCmd]),
+ {ok, LongCmd} = rpc:call(Node, heart, get_cmd, []),
+
stop_node(Node),
ok.
diff --git a/lib/kernel/test/inet_SUITE.erl b/lib/kernel/test/inet_SUITE.erl
index f60c13d2e3..44ec7e7076 100644
--- a/lib/kernel/test/inet_SUITE.erl
+++ b/lib/kernel/test/inet_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -21,6 +21,7 @@
-include_lib("common_test/include/ct.hrl").
-include_lib("kernel/include/inet.hrl").
+-include_lib("kernel/src/inet_res.hrl").
-include_lib("kernel/src/inet_dns.hrl").
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
@@ -34,13 +35,15 @@
ipv4_to_ipv6/0, ipv4_to_ipv6/1,
host_and_addr/0, host_and_addr/1,
t_gethostnative/1,
- gethostnative_parallell/1, cname_loop/1,
+ gethostnative_parallell/1, cname_loop/1, missing_hosts_reload/1,
gethostnative_soft_restart/0, gethostnative_soft_restart/1,
gethostnative_debug_level/0, gethostnative_debug_level/1,
lookup_bad_search_option/1,
getif/1,
getif_ifr_name_overflow/1,getservbyname_overflow/1, getifaddrs/1,
- parse_strict_address/1, simple_netns/1, simple_netns_open/1]).
+ parse_strict_address/1, ipv4_mapped_ipv6_address/1,
+ simple_netns/1, simple_netns_open/1,
+ simple_bind_to_device/1, simple_bind_to_device_open/1]).
-export([get_hosts/1, get_ipv6_hosts/1, parse_hosts/1, parse_address/1,
kill_gethost/0, parallell_gethost/0, test_netns/0]).
@@ -54,11 +57,12 @@ all() ->
[t_gethostbyaddr, t_gethostbyname, t_getaddr,
t_gethostbyaddr_v6, t_gethostbyname_v6, t_getaddr_v6,
ipv4_to_ipv6, host_and_addr, {group, parse},
- t_gethostnative, gethostnative_parallell, cname_loop,
+ t_gethostnative, gethostnative_parallell, cname_loop, missing_hosts_reload,
gethostnative_debug_level, gethostnative_soft_restart,
lookup_bad_search_option,
getif, getif_ifr_name_overflow, getservbyname_overflow,
- getifaddrs, parse_strict_address, simple_netns, simple_netns_open].
+ getifaddrs, parse_strict_address, simple_netns, simple_netns_open,
+ simple_bind_to_device, simple_bind_to_device_open].
groups() ->
[{parse, [], [parse_hosts, parse_address]}].
@@ -94,7 +98,7 @@ end_per_group(_GroupName, Config) ->
init_per_testcase(lookup_bad_search_option, Config) ->
Db = inet_db,
Key = res_lookup,
- %% The bad option can not enter through inet_db:set_lookup/1,
+ %% The bad option cannot enter through inet_db:set_lookup/1,
%% but through e.g .inetrc.
Prev = ets:lookup(Db, Key),
ets:delete(Db, Key),
@@ -445,91 +449,125 @@ parse_hosts(Config) when is_list(Config) ->
inet_parse:resolv(ResolvErr1).
parse_address(Config) when is_list(Config) ->
- V4Strict =
+ V4Reversable =
[{{0,0,0,0},"0.0.0.0"},
- {{1,2,3,4},"1.2.3.4"},
+ {{1,2,3,4},"1.2.3.4"},
{{253,252,251,250},"253.252.251.250"},
{{1,2,255,254},"1.2.255.254"}],
- V6Strict =
+ V6Reversable =
[{{0,0,0,0,0,0,0,0},"::"},
+ {{0,0,0,0,0,0,0,1},"::1"},
+ {{0,0,0,0,0,0,0,2},"::0.0.0.2"},
{{15,0,0,0,0,0,0,2},"f::2"},
- {{15,16#f11,0,0,0,0,256,2},"f:f11::0100:2"},
- {{0,0,0,0,0,0,0,16#17},"::17"},
- {{16#700,0,0,0,0,0,0,0},"0700::"},
- {{0,0,0,0,0,0,2,1},"::2:1"},
+ {{15,16#f11,0,0,0,0,256,2},"f:f11::100:2"},
+ {{16#700,0,0,0,0,0,0,0},"700::"},
+ {{0,0,0,0,0,0,2,1},"::0.2.0.1"},
{{0,0,0,0,0,3,2,1},"::3:2:1"},
{{0,0,0,0,4,3,2,1},"::4:3:2:1"},
{{0,0,0,5,4,3,2,1},"::5:4:3:2:1"},
{{0,0,6,5,4,3,2,1},"::6:5:4:3:2:1"},
- {{0,7,6,5,4,3,2,1},"::7:6:5:4:3:2:1"},
+ {{0,7,6,5,4,3,2,1},"0:7:6:5:4:3:2:1"},
{{7,0,0,0,0,0,0,0},"7::"},
{{7,6,0,0,0,0,0,0},"7:6::"},
{{7,6,5,0,0,0,0,0},"7:6:5::"},
{{7,6,5,4,0,0,0,0},"7:6:5:4::"},
{{7,6,5,4,3,0,0,0},"7:6:5:4:3::"},
{{7,6,5,4,3,2,0,0},"7:6:5:4:3:2::"},
- {{7,6,5,4,3,2,1,0},"7:6:5:4:3:2:1::"},
+ {{7,6,5,4,3,2,1,0},"7:6:5:4:3:2:1:0"},
+ {{0,0,6,5,4,3,0,0},"::6:5:4:3:0:0"},
+ {{0,0,6,5,4,0,0,0},"0:0:6:5:4::"},
+ {{8,0,0,5,4,0,0,1},"8::5:4:0:0:1"},
+ {{8,0,0,5,0,0,0,1},"8:0:0:5::1"},
+ {{0,7,6,5,4,3,2,0},"0:7:6:5:4:3:2:0"},
+ {{0,0,6,5,4,3,0,0},"::6:5:4:3:0:0"},
+ {{0,0,0,5,4,0,0,0},"::5:4:0:0:0"},
+ {{0,0,0,0,4,0,0,0},"::4:0:0:0"},
+ {{0,0,0,5,0,0,0,0},"0:0:0:5::"},
{{16#c11,16#c22,16#5c33,16#c440,16#55c0,16#c66c,16#77,16#88},
- "c11:0c22:5c33:c440:55c0:c66c:77:0088"},
+ "c11:c22:5c33:c440:55c0:c66c:77:88"},
+ {{0,16#c22,16#5c33,16#c440,16#55c0,16#c66c,16#77,16#88},
+ "0:c22:5c33:c440:55c0:c66c:77:88"},
{{16#c11,0,16#5c33,16#c440,16#55c0,16#c66c,16#77,16#88},
- "c11::5c33:c440:55c0:c66c:77:0088"},
+ "c11:0:5c33:c440:55c0:c66c:77:88"},
{{16#c11,16#c22,0,16#c440,16#55c0,16#c66c,16#77,16#88},
- "c11:0c22::c440:55c0:c66c:77:0088"},
+ "c11:c22:0:c440:55c0:c66c:77:88"},
{{16#c11,16#c22,16#5c33,0,16#55c0,16#c66c,16#77,16#88},
- "c11:0c22:5c33::55c0:c66c:77:0088"},
+ "c11:c22:5c33:0:55c0:c66c:77:88"},
{{16#c11,16#c22,16#5c33,16#c440,0,16#c66c,16#77,16#88},
- "c11:0c22:5c33:c440::c66c:77:0088"},
+ "c11:c22:5c33:c440:0:c66c:77:88"},
{{16#c11,16#c22,16#5c33,16#c440,16#55c0,0,16#77,16#88},
- "c11:0c22:5c33:c440:55c0::77:0088"},
+ "c11:c22:5c33:c440:55c0:0:77:88"},
{{16#c11,16#c22,16#5c33,16#c440,16#55c0,16#c66c,0,16#88},
- "c11:0c22:5c33:c440:55c0:c66c::0088"},
+ "c11:c22:5c33:c440:55c0:c66c:0:88"},
+ {{16#c11,16#c22,16#5c33,16#c440,16#55c0,16#c66c,16#77,0},
+ "c11:c22:5c33:c440:55c0:c66c:77:0"},
+ {{0,0,16#5c33,16#c440,16#55c0,16#c66c,16#77,16#88},
+ "::5c33:c440:55c0:c66c:77:88"},
{{16#c11,0,0,16#c440,16#55c0,16#c66c,16#77,16#88},
- "c11::c440:55c0:c66c:77:0088"},
+ "c11::c440:55c0:c66c:77:88"},
{{16#c11,16#c22,0,0,16#55c0,16#c66c,16#77,16#88},
- "c11:0c22::55c0:c66c:77:0088"},
+ "c11:c22::55c0:c66c:77:88"},
{{16#c11,16#c22,16#5c33,0,0,16#c66c,16#77,16#88},
- "c11:0c22:5c33::c66c:77:0088"},
+ "c11:c22:5c33::c66c:77:88"},
{{16#c11,16#c22,16#5c33,16#c440,0,0,16#77,16#88},
- "c11:0c22:5c33:c440::77:0088"},
+ "c11:c22:5c33:c440::77:88"},
{{16#c11,16#c22,16#5c33,16#c440,16#55c0,0,0,16#88},
- "c11:0c22:5c33:c440:55c0::0088"},
+ "c11:c22:5c33:c440:55c0::88"},
+ {{16#c11,16#c22,16#5c33,16#c440,16#55c0,16#c66c,0,0},
+ "c11:c22:5c33:c440:55c0:c66c::"},
+ {{0,0,0,16#c440,16#55c0,16#c66c,16#77,16#88},
+ "::c440:55c0:c66c:77:88"},
{{16#c11,0,0,0,16#55c0,16#c66c,16#77,16#88},
- "c11::55c0:c66c:77:0088"},
+ "c11::55c0:c66c:77:88"},
{{16#c11,16#c22,0,0,0,16#c66c,16#77,16#88},
- "c11:0c22::c66c:77:0088"},
+ "c11:c22::c66c:77:88"},
{{16#c11,16#c22,16#5c33,0,0,0,16#77,16#88},
- "c11:0c22:5c33::77:0088"},
+ "c11:c22:5c33::77:88"},
{{16#c11,16#c22,16#5c33,16#c440,0,0,0,16#88},
- "c11:0c22:5c33:c440::0088"},
+ "c11:c22:5c33:c440::88"},
+ {{16#c11,16#c22,16#5c33,16#c440,16#55c0,0,0,0},
+ "c11:c22:5c33:c440:55c0::"},
+ {{0,0,0,0,16#55c0,16#c66c,16#77,16#88},
+ "::55c0:c66c:77:88"},
{{16#c11,0,0,0,0,16#c66c,16#77,16#88},
- "c11::c66c:77:0088"},
+ "c11::c66c:77:88"},
{{16#c11,16#c22,0,0,0,0,16#77,16#88},
- "c11:0c22::77:0088"},
+ "c11:c22::77:88"},
{{16#c11,16#c22,16#5c33,0,0,0,0,16#88},
- "c11:0c22:5c33::0088"},
+ "c11:c22:5c33::88"},
+ {{16#c11,16#c22,16#5c33,16#c440,0,0,0,0},
+ "c11:c22:5c33:c440::"},
+ {{0,0,0,0,0,16#c66c,16#77,16#88},
+ "::c66c:77:88"},
{{16#c11,0,0,0,0,0,16#77,16#88},
- "c11::77:0088"},
+ "c11::77:88"},
{{16#c11,16#c22,0,0,0,0,0,16#88},
- "c11:0c22::0088"},
- {{0,0,0,0,0,65535,258,65534},"::FFFF:1.2.255.254"},
+ "c11:c22::88"},
+ {{16#c11,16#c22,16#5c33,0,0,0,0,0},
+ "c11:c22:5c33::"},
+ {{0,0,0,0,0,65535,258,65534},"::ffff:1.2.255.254"},
+ {{16#fe80,12345,0,0,0,0,0,16#12},"fe80::12%012345"},
{{16#ffff,16#ffff,16#ffff,16#ffff,16#ffff,16#ffff,16#ffff,16#ffff},
"ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"}
- |[{{D2,0,0,0,0,P,(D1 bsl 8) bor D2,(D3 bsl 8) bor D4},
- erlang:integer_to_list(D2, 16)++"::"++Q++S}
- || {{D1,D2,D3,D4},S} <- V4Strict,
- {P,Q} <- [{0,""},{16#17,"17:"},{16#ff0,"0ff0:"}]]],
+ |[{list_to_tuple(P++[(D1 bsl 8) bor D2,(D3 bsl 8) bor D4]),
+ Q++S}
+ || {{D1,D2,D3,D4},S} <-
+ tl(V4Reversable),
+ {P,Q} <-
+ [{[0,0,0,0,0,16#ffff],"::ffff:"},
+ {[0,0,0,0,0,0],"::"}]]],
V4Sloppy =
[{{10,1,16#98,16#76},"10.0x019876"},
{{8#12,1,8#130,8#321},"012.01.054321"},
- {{255,255,255,255},"255.255.255.0377"},
- {{255,255,255,255},"0Xff.000000000377.0x0000ff.255"},
- {{255,255,255,255},"255.255.65535"},
- {{255,255,255,255},"255.0xFF.0177777"},
- {{255,255,255,255},"255.16777215"},
- {{255,255,255,255},"00377.0XFFFFFF"},
- {{255,255,255,255},"4294967295"},
- {{255,255,255,255},"0xffffffff"},
- {{255,255,255,255},"00000000000037777777777"},
+ {{252,253,254,255},"252.253.254.0377"},
+ {{252,253,254,255},"0Xfc.000000000375.0x0000fe.255"},
+ {{252,253,254,255},"252.253.65279"},
+ {{252,253,254,255},"252.0xFD.0177377"},
+ {{252,253,254,255},"252.16645887"},
+ {{252,253,254,255},"00374.0XFDFEFF"},
+ {{252,253,254,255},"4244504319"},
+ {{252,253,254,255},"0xfcfdfeff"},
+ {{252,253,254,255},"00000000000037477377377"},
{{16#12,16#34,16#56,16#78},"0x12345678"},
{{16#12,16#34,16#56,16#78},"0x12.0x345678"},
{{16#12,16#34,16#56,16#78},"0x12.0X34.0x5678"},
@@ -541,8 +579,14 @@ parse_address(Config) when is_list(Config) ->
{{0,0,0,0},"0.00.0.0"},
{{0,0,0,0},"0.0.000000000000.0"}],
V6Sloppy =
- [{{0,0,0,0,0,65535,(D1 bsl 8) bor D2,(D3 bsl 8) bor D4},S}
- || {{D1,D2,D3,D4},S} <- V4Strict++V4Sloppy],
+ [{{16#a,16#b,16#c,16#0,16#0,16#d,16#e,16#f},"A:B:C::d:e:f"},
+ {{16#fe80,0,0,0,0,0,0,16#12},"fe80::12%XXXXXXX"}]
+ ++
+ [{{P,0,0,0,0,D2,(D1 bsl 8) bor D2,(D3 bsl 8) bor D4},
+ Q++erlang:integer_to_list(D2, 16)++":"++S}
+ || {{D1,D2,D3,D4},S} <- V4Reversable,
+ {P,Q} <-
+ [{16#2001,"2001::"},{16#177,"177::"},{16#ff0,"Ff0::"}]],
V4Err =
["0.256.0.1",
"1.2.3.4.5",
@@ -586,28 +630,36 @@ parse_address(Config) when is_list(Config) ->
"fec0::fFfF:127.0.0.1."],
t_parse_address
(parse_ipv6_address,
- V6Strict++V6Sloppy++V6Err++V4Err),
+ false,
+ V6Reversable++V6Sloppy++V6Err++V4Err),
t_parse_address
(parse_ipv6strict_address,
- V6Strict++V6Err++V4Err++[S || {_,S} <- V6Sloppy]),
+ true,
+ V6Reversable++V6Err++V4Err),
t_parse_address
(parse_ipv4_address,
- V4Strict++V4Sloppy++V4Err++V6Err++[S || {_,S} <- V6Strict]),
+ false,
+ V4Reversable++V4Sloppy++V4Err++V6Err++[S || {_,S} <- V6Reversable]),
t_parse_address
(parse_ipv4strict_address,
- V4Strict++V4Err++V6Err++[S || {_,S} <- V4Sloppy++V6Strict]).
+ true,
+ V4Reversable++V4Err++V6Err++[S || {_,S} <- V4Sloppy++V6Reversable]).
-t_parse_address(Func, []) ->
+t_parse_address(Func, _Reversable, []) ->
io:format("~p done.~n", [Func]),
ok;
-t_parse_address(Func, [{Addr,String}|L]) ->
+t_parse_address(Func, Reversable, [{Addr,String}|L]) ->
io:format("~p = ~p.~n", [Addr,String]),
{ok,Addr} = inet:Func(String),
- t_parse_address(Func, L);
-t_parse_address(Func, [String|L]) ->
+ case Reversable of
+ true ->String = inet:ntoa(Addr);
+ false -> ok
+ end,
+ t_parse_address(Func, Reversable, L);
+t_parse_address(Func, Reversable, [String|L]) ->
io:format("~p.~n", [String]),
{error,einval} = inet:Func(String),
- t_parse_address(Func, L).
+ t_parse_address(Func, Reversable, L).
parse_strict_address(Config) when is_list(Config) ->
{ok, {127,0,0,1}} =
@@ -617,6 +669,26 @@ parse_strict_address(Config) when is_list(Config) ->
{ok, {3089,3106,23603,50240,0,0,119,136}} =
inet:parse_strict_address("c11:0c22:5c33:c440::077:0088").
+ipv4_mapped_ipv6_address(Config) when is_list(Config) ->
+ {D1,D2,D3,D4} = IPv4Address =
+ {rand:uniform(256) - 1,
+ rand:uniform(256) - 1,
+ rand:uniform(256) - 1,
+ rand:uniform(256) - 1},
+ E7 = (D1 bsl 8) bor D2,
+ E8 = (D3 bsl 8) bor D4,
+ io:format("IPv4Address: ~p.~n", [IPv4Address]),
+ {0,0,0,0,0,65535,E7,E8} = inet:ipv4_mapped_ipv6_address(IPv4Address),
+ IPv6Address =
+ {rand:uniform(65536) - 1,
+ rand:uniform(65536) - 1,
+ rand:uniform(65536) - 1,
+ rand:uniform(65536) - 1,
+ rand:uniform(65536) - 1,
+ rand:uniform(65536) - 1, E7, E8},
+ IPv4Address = inet:ipv4_mapped_ipv6_address(IPv6Address),
+ ok.
+
t_gethostnative(Config) when is_list(Config) ->
%% this will result in 26 bytes sent which causes problem in Windows
%% if the port-program has not assured stdin to be read in BINARY mode
@@ -769,6 +841,32 @@ cname_loop(Config) when is_list(Config) ->
ok.
+%% Test that hosts file gets reloaded correctly in case when it
+% was missing during initial startup
+missing_hosts_reload(Config) when is_list(Config) ->
+ RootDir = proplists:get_value(priv_dir,Config),
+ HostsFile = filename:join(RootDir, atom_to_list(?MODULE) ++ ".hosts"),
+ InetRc = filename:join(RootDir, "inetrc"),
+ ok = file:write_file(InetRc, "{hosts_file, \"" ++ HostsFile ++ "\"}.\n"),
+ {error, enoent} = file:read_file_info(HostsFile),
+ % start a node
+ Pa = filename:dirname(code:which(?MODULE)),
+ {ok, TestNode} = test_server:start_node(?MODULE, slave,
+ [{args, "-pa " ++ Pa ++ " -kernel inetrc '\"" ++ InetRc ++ "\"'"}]),
+ % ensure it has our RC
+ Rc = rpc:call(TestNode, inet_db, get_rc, []),
+ {hosts_file, HostsFile} = lists:keyfind(hosts_file, 1, Rc),
+ % ensure it does not resolve
+ {error, nxdomain} = rpc:call(TestNode, inet_hosts, gethostbyname, ["somehost"]),
+ % write hosts file
+ ok = file:write_file(HostsFile, "1.2.3.4 somehost"),
+ % wait for cached timestamp to expire
+ timer:sleep(?RES_FILE_UPDATE_TM * 1000 + 100),
+ % ensure it DOES resolve
+ {ok,{hostent,"somehost",[],inet,4,[{1,2,3,4}]}} =
+ rpc:call(TestNode, inet_hosts, gethostbyname, ["somehost"]),
+ % cleanup
+ true = test_server:stop_node(TestNode).
%% These must be run in the whole suite since they need
%% the host list and require inet_gethost_native to be started.
@@ -989,28 +1087,26 @@ getservbyname_overflow(Config) when is_list(Config) ->
getifaddrs(Config) when is_list (Config) ->
{ok,IfAddrs} = inet:getifaddrs(),
io:format("IfAddrs = ~p.~n", [IfAddrs]),
- case
- {os:type(),
- [If ||
- {If,Opts} <- IfAddrs,
- lists:keymember(hwaddr, 1, Opts)]} of
- {{unix,sunos},[]} -> ok;
- {OT,[]} ->
- ct:fail({should_have_hwaddr,OT});
- _ -> ok
+ case [If || {If,Opts} <- IfAddrs, lists:keymember(hwaddr, 1, Opts)] of
+ [] ->
+ case os:type() of
+ {unix,sunos} -> ok;
+ OT ->
+ ct:fail({should_have_hwaddr,OT})
+ end;
+ [_|_] -> ok
end,
- Addrs =
- [element(1, A) || A <- ifaddrs(IfAddrs)],
+ Addrs = ifaddrs(IfAddrs),
io:format("Addrs = ~p.~n", [Addrs]),
[check_addr(Addr) || Addr <- Addrs],
ok.
-check_addr({addr,Addr})
+check_addr(Addr)
when tuple_size(Addr) =:= 8,
element(1, Addr) band 16#FFC0 =:= 16#FE80 ->
io:format("Addr: ~p link local; SKIPPED!~n", [Addr]),
ok;
-check_addr({addr,Addr}) ->
+check_addr(Addr) ->
io:format("Addr: ~p.~n", [Addr]),
Ping = "ping",
Pong = "pong",
@@ -1026,80 +1122,86 @@ check_addr({addr,Addr}) ->
ok = gen_tcp:close(S2),
ok = gen_tcp:close(L).
--record(ifopts, {name,flags,addrs=[],hwaddr}).
-
-ifaddrs([]) -> [];
-ifaddrs([{If,Opts}|IOs]) ->
- #ifopts{flags=F} = Ifopts = check_ifopts(Opts, #ifopts{name=If}),
- case F of
- {flags,Flags} ->
- case lists:member(up, Flags) of
- true ->
- Ifopts#ifopts.addrs;
- false ->
- []
- end ++ ifaddrs(IOs);
- undefined ->
- ifaddrs(IOs)
+ifaddrs(IfOpts) ->
+ IfMap = collect_ifopts(IfOpts),
+ ChkFun =
+ fun Self({{_,Flags} = Key, Opts}, ok) ->
+ Broadcast = lists:member(broadcast, Flags),
+ P2P = lists:member(pointtopoint, Flags),
+ case Opts of
+ [{addr,_},{netmask,_},{broadaddr,_}|Os]
+ when Broadcast ->
+ Self({Key, Os}, ok);
+ [{addr,_},{netmask,_},{dstaddr,_}|Os]
+ when P2P ->
+ Self({Key, Os}, ok);
+ [{addr,_},{netmask,_}|Os] ->
+ Self({Key, Os}, ok);
+ [{hwaddr,_}|Os] ->
+ Self({Key, Os}, ok);
+ [] ->
+ ok
+ end
+ end,
+ fold_ifopts(ChkFun, ok, IfMap),
+ AddrsFun =
+ fun ({{_,Flags}, Opts}, Acc) ->
+ case
+ lists:member(running, Flags)
+ andalso (not lists:member(pointtopoint, Flags))
+ of
+ true ->
+ lists:reverse(
+ [Addr || {addr,Addr} <- Opts],
+ Acc);
+ false ->
+ Acc
+ end
+ end,
+ fold_ifopts(AddrsFun, [], IfMap).
+
+collect_ifopts(IfOpts) ->
+ collect_ifopts(IfOpts, #{}).
+%%
+collect_ifopts(IfOpts, IfMap) ->
+ case IfOpts of
+ [{If,[{flags,Flags}|Opts]}|IfOs] ->
+ Key = {If,Flags},
+ case maps:is_key(Key, IfMap) of
+ true ->
+ ct:fail({unexpected_ifopts,IfOpts,IfMap});
+ false ->
+ collect_ifopts(IfOs, IfMap, Opts, Key, [])
+ end;
+ [] ->
+ IfMap;
+ _ ->
+ ct:fail({unexpected_ifopts,IfOpts,IfMap})
+ end.
+%%
+collect_ifopts(IfOpts, IfMap, Opts, Key, R) ->
+ case Opts of
+ [{flags,_}|_] ->
+ {If,_} = Key,
+ collect_ifopts(
+ [{If,Opts}|IfOpts], maps:put(Key, lists:reverse(R), IfMap));
+ [OptVal|Os] ->
+ collect_ifopts(IfOpts, IfMap, Os, Key, [OptVal|R]);
+ [] ->
+ collect_ifopts(IfOpts, maps:put(Key, lists:reverse(R), IfMap))
end.
-check_ifopts([], #ifopts{flags=F,addrs=Raddrs}=Ifopts) ->
- Addrs = lists:reverse(Raddrs),
- R = Ifopts#ifopts{addrs=Addrs},
- io:format("~p.~n", [R]),
- %% See how we did...
- {flags,Flags} = F,
- case lists:member(broadcast, Flags) of
- true ->
- [case A of
- {{addr,_},{netmask,_},{broadaddr,_}} ->
- A;
- {{addr,T},{netmask,_}} when tuple_size(T) =:= 8 ->
- A
- end || A <- Addrs];
- false ->
- case lists:member(pointtopoint, Flags) of
- true ->
- [case A of
- {{addr,_},{netmask,_},{dstaddr,_}} ->
- A
- end || A <- Addrs];
- false ->
- [case A of
- {{addr,_},{netmask,_}} ->
- A
- end || A <- Addrs]
- end
- end,
- R;
-check_ifopts([{flags,_}=F|Opts], #ifopts{flags=undefined}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{flags=F});
-check_ifopts([{flags,_}=F|Opts], #ifopts{flags=Flags}=Ifopts) ->
- case F of
- Flags ->
- check_ifopts(Opts, Ifopts);
- _ ->
- ct:fail({multiple_flags,F,Ifopts})
- end;
-check_ifopts(
- [{addr,_}=A,{netmask,_}=N,{dstaddr,_}=D|Opts],
- #ifopts{addrs=Addrs}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{addrs=[{A,N,D}|Addrs]});
-check_ifopts(
- [{addr,_}=A,{netmask,_}=N,{broadaddr,_}=B|Opts],
- #ifopts{addrs=Addrs}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{addrs=[{A,N,B}|Addrs]});
-check_ifopts(
- [{addr,_}=A,{netmask,_}=N|Opts],
- #ifopts{addrs=Addrs}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{addrs=[{A,N}|Addrs]});
-check_ifopts([{addr,_}=A|Opts], #ifopts{addrs=Addrs}=Ifopts) ->
- check_ifopts(Opts, Ifopts#ifopts{addrs=[{A}|Addrs]});
-check_ifopts([{hwaddr,Hwaddr}=H|Opts], #ifopts{hwaddr=undefined}=Ifopts)
- when is_list(Hwaddr) ->
- check_ifopts(Opts, Ifopts#ifopts{hwaddr=H});
-check_ifopts([{hwaddr,_}=H|_], #ifopts{}=Ifopts) ->
- ct:fail({multiple_hwaddrs,H,Ifopts}).
+fold_ifopts(Fun, Acc, IfMap) ->
+ fold_ifopts(Fun, Acc, IfMap, maps:keys(IfMap)).
+%%
+fold_ifopts(Fun, Acc, IfMap, Keys) ->
+ case Keys of
+ [Key|Ks] ->
+ Opts = maps:get(Key, IfMap),
+ fold_ifopts(Fun, Fun({Key,Opts}, Acc), IfMap, Ks);
+ [] ->
+ Acc
+ end.
%% Works just like lists:member/2, except that any {127,_,_,_} tuple
%% matches any other {127,_,_,_}. We do this to handle Linux systems
@@ -1247,3 +1349,67 @@ cmd(CmdString) ->
io:put_chars(["# ",CmdString,io_lib:nl()]),
io:put_chars([os:cmd(CmdString++" ; echo ' =>' $?")]),
ok.
+
+-define(CAP_NET_RAW, 13). %% from /usr/include/linux/capability.h
+
+can_bind_to_device({unix, linux}, {Major, _, _})
+ when Major > 2 ->
+ Status = os:cmd("cat /proc/self/status | grep CapEff"),
+ [_, CapEffStr] = string:tokens(Status, [$\n, $\t]),
+ CapEff = list_to_integer(CapEffStr, 16),
+ if CapEff band (1 bsl ?CAP_NET_RAW) =/= 0 ->
+ ok;
+ true ->
+ {skip,"insufficient capabilities, CAP_NET_RAW not granted"}
+ end;
+can_bind_to_device(_OS, _Version) ->
+ {skip,"socket option bind_to_device not supported on this OS or version"}.
+
+simple_bind_to_device(Config) when is_list(Config) ->
+ case can_bind_to_device(os:type(), os:version()) of
+ ok ->
+ {ok,U} = gen_udp:open(0),
+ jog_bind_to_device_opt(U),
+ ok = gen_udp:close(U),
+ %%
+ {ok,L} = gen_tcp:listen(0, []),
+ jog_bind_to_device_opt(L),
+ ok = gen_tcp:close(L),
+ %%
+ case gen_sctp:open() of
+ {ok,S} ->
+ jog_bind_to_device_opt(S),
+ ok = gen_sctp:close(S);
+ {error,eprotonosupport} ->
+ ok
+ end;
+ Other ->
+ Other
+ end.
+
+%% Smoke test bind_to_device support.
+simple_bind_to_device_open(Config) when is_list(Config) ->
+ case can_bind_to_device(os:type(), os:version()) of
+ ok ->
+ {ok,U} = gen_udp:open(0, [binary,{bind_to_device,<<"lo">>},inet]),
+ ok = gen_udp:close(U),
+ {ok,T} = gen_tcp:listen(0, [binary,{bind_to_device,<<"lo">>},inet]),
+ ok = gen_tcp:close(T),
+
+ case gen_sctp:open(0, [binary,{bind_to_device,<<"lo">>},inet]) of
+ {ok,S} ->
+ ok = gen_sctp:close(S);
+ {error,eprotonosupport} ->
+ ok
+ end;
+ Other ->
+ Other
+ end.
+
+jog_bind_to_device_opt(S) ->
+ %% This is just jogging the option mechanics
+ ok = inet:setopts(S, [{bind_to_device,<<>>}]),
+ {ok,[{bind_to_device,<<>>}]} = inet:getopts(S, [bind_to_device]),
+ ok = inet:setopts(S, [{bind_to_device,<<"lo">>}]),
+ {ok,[{bind_to_device,<<"lo">>}]} = inet:getopts(S, [bind_to_device]),
+ ok.
diff --git a/lib/kernel/test/inet_res_SUITE.erl b/lib/kernel/test/inet_res_SUITE.erl
index 6691ad9c06..6b545fa414 100644
--- a/lib/kernel/test/inet_res_SUITE.erl
+++ b/lib/kernel/test/inet_res_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2009-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2009-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -46,15 +46,16 @@
%% a temporary local nameserver BIND 8 or 9 that must be installed
%% on your machine.
%%
-%% For example, on Ubuntu 14.04, as root:
+%% For example, on Ubuntu 16.04 / 18.04, as root:
%% apt-get install bind9
%% Now, that is not enough since Apparmor will not allow
%% the nameserver daemon /usr/sbin/named to read from the test directory.
%% Assuming that you run tests in /ldisk/daily_build, and still on
-%% Ubuntu 14.04, make /usr/apparmor.d/local/usr.sbin.named contain:
+%% Ubuntu 14.04, make /etc/apparmor.d/local/usr.sbin.named contain:
%% /ldisk/daily_build/** r,
%% And yes; the trailing comma must be there...
-
+%% And yes; create the file if it does not exist.
+%% And yes; restart the apparmor daemon using "service apparmor restart"
suite() ->
@@ -217,10 +218,10 @@ proxy_start(TC, {NS,P}) ->
spawn_link(
fun () ->
try proxy_start(TC, NS, P, Parent, Tag)
- catch C:X ->
+ catch C:X:Stacktrace ->
io:format(
"~w: ~w:~p ~p~n",
- [self(),C,X,erlang:get_stacktrace()])
+ [self(),C,X,Stacktrace])
end
end),
receive {started,Tag,Port} ->
@@ -531,7 +532,7 @@ edns0(Config) when is_list(Config) ->
case os:version() of
{M,V,_} when M < 5; M == 5, V =< 8 ->
%% In our test park only known platform
- %% with an DNS resolver that can not do
+ %% with an DNS resolver that cannot do
%% EDNS0.
{comment,"No EDNS0"}
end
diff --git a/lib/kernel/test/inet_sockopt_SUITE.erl b/lib/kernel/test/inet_sockopt_SUITE.erl
index 322b9f30fe..27ff74e309 100644
--- a/lib/kernel/test/inet_sockopt_SUITE.erl
+++ b/lib/kernel/test/inet_sockopt_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2007-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2007-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -110,9 +110,14 @@ simple(Config) when is_list(Config) ->
{S1,S2} = create_socketpair(Opt, Opt),
{ok,Opt} = inet:getopts(S1,OptTags),
{ok,Opt} = inet:getopts(S2,OptTags),
- COpt = [{X,case X of nodelay -> false;_ -> Y end} || {X,Y} <- Opt],
+ NoPushOpt = case os:type() of
+ {unix, Osname} when Osname =:= linux; Osname =:= freebsd -> {nopush, true};
+ {_,_} -> {nopush, false}
+ end,
+ COpt = [{X,case X of nodelay -> false;_ -> Y end} || {X,Y} <- [NoPushOpt|Opt]],
+ COptTags = [X || {X,_} <- COpt],
inet:setopts(S1,COpt),
- {ok,COpt} = inet:getopts(S1,OptTags),
+ {ok,COpt} = inet:getopts(S1,COptTags),
{ok,Opt} = inet:getopts(S2,OptTags),
gen_tcp:close(S1),
gen_tcp:close(S2),
@@ -620,7 +625,7 @@ ipv6_v6only_close(Module, Socket) ->
%% Test using socket option ipv6_v6only for UDP.
use_ipv6_v6only_udp(Config) when is_list(Config) ->
- case gen_udp:open(0, [inet6,{ipv6_v6only,true}]) of
+ case gen_udp:open(0, [inet6,{ip,{0,0,0,0,0,0,0,1}}, {ipv6_v6only,true}]) of
{ok,S6} ->
case inet:getopts(S6, [ipv6_v6only]) of
{ok,[{ipv6_v6only,true}]} ->
diff --git a/lib/kernel/test/init_SUITE.erl b/lib/kernel/test/init_SUITE.erl
index 2b59eb2bfe..a0154b2694 100644
--- a/lib/kernel/test/init_SUITE.erl
+++ b/lib/kernel/test/init_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -295,11 +295,11 @@ is_real_system(KernelVsn, StdlibVsn) ->
%% before restart.
%% ------------------------------------------------
many_restarts() ->
- [{timetrap,{minutes,8}}].
+ [{timetrap,{minutes,16}}].
many_restarts(Config) when is_list(Config) ->
{ok, Node} = loose_node:start(init_test, "", ?DEFAULT_TIMEOUT_SEC),
- loop_restart(50,Node,rpc:call(Node,erlang,whereis,[error_logger])),
+ loop_restart(50,Node,rpc:call(Node,erlang,whereis,[logger])),
loose_node:stop(Node),
ok.
@@ -315,14 +315,14 @@ loop_restart(N,Node,EHPid) ->
loose_node:stop(Node),
ct:fail(not_stopping)
end,
- ok = wait_for(30, Node, EHPid),
- loop_restart(N-1,Node,rpc:call(Node,erlang,whereis,[error_logger])).
+ ok = wait_for(60, Node, EHPid),
+ loop_restart(N-1,Node,rpc:call(Node,erlang,whereis,[logger])).
wait_for(0,Node,_) ->
loose_node:stop(Node),
error;
wait_for(N,Node,EHPid) ->
- case rpc:call(Node, erlang, whereis, [error_logger]) of
+ case rpc:call(Node, erlang, whereis, [logger]) of
Pid when is_pid(Pid), Pid =/= EHPid ->
%% erlang:display(ok),
ok;
@@ -365,7 +365,10 @@ restart(Config) when is_list(Config) ->
%% Ok, the node is up, now the real test test begins.
erlang:monitor_node(Node, true),
SysProcs0 = rpc:call(Node, ?MODULE, find_system_processes, []),
- [InitPid, PurgerPid, LitCollectorPid, DirtyCodePid] = SysProcs0,
+ io:format("SysProcs0=~p~n", [SysProcs0]),
+ [InitPid, PurgerPid, LitCollectorPid,
+ DirtySigNPid, DirtySigHPid, DirtySigMPid,
+ PrimFilePid] = SysProcs0,
InitPid = rpc:call(Node, erlang, whereis, [init]),
PurgerPid = rpc:call(Node, erlang, whereis, [erts_code_purger]),
Procs = rpc:call(Node, erlang, processes, []),
@@ -381,7 +384,10 @@ restart(Config) when is_list(Config) ->
ok = wait_restart(30, Node),
SysProcs1 = rpc:call(Node, ?MODULE, find_system_processes, []),
- [InitPid1, PurgerPid1, LitCollectorPid1, DirtyCodePid1] = SysProcs1,
+ io:format("SysProcs1=~p~n", [SysProcs1]),
+ [InitPid1, PurgerPid1, LitCollectorPid1,
+ DirtySigNPid1, DirtySigHPid1, DirtySigMPid1,
+ PrimFilePid1] = SysProcs1,
%% Still the same init process!
InitPid1 = rpc:call(Node, erlang, whereis, [init]),
@@ -394,20 +400,22 @@ restart(Config) when is_list(Config) ->
PurgerP = pid_to_list(PurgerPid1),
%% and same literal area collector process!
- case LitCollectorPid of
- undefined -> undefined = LitCollectorPid1;
- _ ->
- LitCollectorP = pid_to_list(LitCollectorPid),
- LitCollectorP = pid_to_list(LitCollectorPid1)
- end,
-
- %% and same dirty process code checker process!
- case DirtyCodePid of
- undefined -> undefined = DirtyCodePid1;
- _ ->
- DirtyCodeP = pid_to_list(DirtyCodePid),
- DirtyCodeP = pid_to_list(DirtyCodePid1)
- end,
+ LitCollectorP = pid_to_list(LitCollectorPid),
+ LitCollectorP = pid_to_list(LitCollectorPid1),
+
+ %% and same normal dirty signal handler process!
+ DirtySigNP = pid_to_list(DirtySigNPid),
+ DirtySigNP = pid_to_list(DirtySigNPid1),
+ %% and same high dirty signal handler process!
+ DirtySigHP = pid_to_list(DirtySigHPid),
+ DirtySigHP = pid_to_list(DirtySigHPid1),
+ %% and same max dirty signal handler process!
+ DirtySigMP = pid_to_list(DirtySigMPid),
+ DirtySigMP = pid_to_list(DirtySigMPid1),
+
+ %% and same prim_file helper process!
+ PrimFileP = pid_to_list(PrimFilePid),
+ PrimFileP = pid_to_list(PrimFilePid1),
NewProcs0 = rpc:call(Node, erlang, processes, []),
NewProcs = NewProcs0 -- SysProcs1,
@@ -433,7 +441,10 @@ restart(Config) when is_list(Config) ->
-record(sys_procs, {init,
code_purger,
literal_collector,
- dirty_proc_checker}).
+ dirty_sig_handler_normal,
+ dirty_sig_handler_high,
+ dirty_sig_handler_max,
+ prim_file}).
find_system_processes() ->
find_system_procs(processes(), #sys_procs{}).
@@ -442,21 +453,36 @@ find_system_procs([], SysProcs) ->
[SysProcs#sys_procs.init,
SysProcs#sys_procs.code_purger,
SysProcs#sys_procs.literal_collector,
- SysProcs#sys_procs.dirty_proc_checker];
+ SysProcs#sys_procs.dirty_sig_handler_normal,
+ SysProcs#sys_procs.dirty_sig_handler_high,
+ SysProcs#sys_procs.dirty_sig_handler_max,
+ SysProcs#sys_procs.prim_file];
find_system_procs([P|Ps], SysProcs) ->
- case process_info(P, initial_call) of
- {initial_call,{otp_ring0,start,2}} ->
+ case process_info(P, [initial_call, priority]) of
+ [{initial_call,{erl_init,start,2}},_] ->
undefined = SysProcs#sys_procs.init,
find_system_procs(Ps, SysProcs#sys_procs{init = P});
- {initial_call,{erts_code_purger,start,0}} ->
+ [{initial_call,{erts_code_purger,start,0}},_] ->
undefined = SysProcs#sys_procs.code_purger,
find_system_procs(Ps, SysProcs#sys_procs{code_purger = P});
- {initial_call,{erts_literal_area_collector,start,0}} ->
+ [{initial_call,{erts_literal_area_collector,start,0}},_] ->
undefined = SysProcs#sys_procs.literal_collector,
find_system_procs(Ps, SysProcs#sys_procs{literal_collector = P});
- {initial_call,{erts_dirty_process_code_checker,start,0}} ->
- undefined = SysProcs#sys_procs.dirty_proc_checker,
- find_system_procs(Ps, SysProcs#sys_procs{dirty_proc_checker = P});
+ [{initial_call,{erts_dirty_process_signal_handler,start,0}},
+ {priority,normal}] ->
+ undefined = SysProcs#sys_procs.dirty_sig_handler_normal,
+ find_system_procs(Ps, SysProcs#sys_procs{dirty_sig_handler_normal = P});
+ [{initial_call,{erts_dirty_process_signal_handler,start,0}},
+ {priority,high}] ->
+ undefined = SysProcs#sys_procs.dirty_sig_handler_high,
+ find_system_procs(Ps, SysProcs#sys_procs{dirty_sig_handler_high = P});
+ [{initial_call,{erts_dirty_process_signal_handler,start,0}},
+ {priority,max}] ->
+ undefined = SysProcs#sys_procs.dirty_sig_handler_max,
+ find_system_procs(Ps, SysProcs#sys_procs{dirty_sig_handler_max = P});
+ [{initial_call,{prim_file,start,0}},_] ->
+ undefined = SysProcs#sys_procs.prim_file,
+ find_system_procs(Ps, SysProcs#sys_procs{prim_file = P});
_ ->
find_system_procs(Ps, SysProcs)
end.
diff --git a/lib/kernel/test/interactive_shell_SUITE.erl b/lib/kernel/test/interactive_shell_SUITE.erl
index fc3706ba1e..173e25c520 100644
--- a/lib/kernel/test/interactive_shell_SUITE.erl
+++ b/lib/kernel/test/interactive_shell_SUITE.erl
@@ -23,7 +23,8 @@
init_per_group/2,end_per_group/2,
get_columns_and_rows/1, exit_initial/1, job_control_local/1,
job_control_remote/1,
- job_control_remote_noshell/1,ctrl_keys/1]).
+ job_control_remote_noshell/1,ctrl_keys/1,
+ get_columns_and_rows_escript/1]).
-export([init_per_testcase/2, end_per_testcase/2]).
%% For spawn
@@ -40,7 +41,8 @@ suite() ->
{timetrap,{minutes,3}}].
all() ->
- [get_columns_and_rows, exit_initial, job_control_local,
+ [get_columns_and_rows_escript,get_columns_and_rows,
+ exit_initial, job_control_local,
job_control_remote, job_control_remote_noshell,
ctrl_keys].
@@ -72,6 +74,60 @@ end_per_group(_GroupName, Config) ->
-define(dbg(Data),noop).
-endif.
+string_to_term(Str) ->
+ {ok,Tokens,_EndLine} = erl_scan:string(Str ++ "."),
+ {ok,AbsForm} = erl_parse:parse_exprs(Tokens),
+ {value,Value,_Bs} = erl_eval:exprs(AbsForm, erl_eval:new_bindings()),
+ Value.
+
+run_unbuffer_escript(Rows, Columns, EScript, NoTermStdIn, NoTermStdOut) ->
+ DataDir = filename:join(filename:dirname(code:which(?MODULE)), "interactive_shell_SUITE_data"),
+ TmpFile = filename:join(DataDir, "tmp"),
+ ok = file:write_file(TmpFile, <<>>),
+ CommandModifier =
+ case {NoTermStdIn, NoTermStdOut} of
+ {false, false} -> "";
+ {true, false} -> io_lib:format(" < ~s", [TmpFile]);
+ {false, true} -> io_lib:format(" > ~s ; cat ~s", [TmpFile, TmpFile]);
+ {true, true} -> io_lib:format(" > ~s < ~s ; cat ~s", [TmpFile, TmpFile, TmpFile])
+ end,
+ Command = io_lib:format("unbuffer -p bash -c \"stty rows ~p; stty columns ~p; escript ~s ~s\"",
+ [Rows, Columns, EScript, CommandModifier]),
+ %% io:format("Command: ~s ~n", [Command]),
+ Out = os:cmd(Command),
+ %% io:format("Out: ~p ~n", [Out]),
+ string_to_term(Out).
+
+get_columns_and_rows_escript(Config) when is_list(Config) ->
+ ExpectUnbufferInstalled =
+ try
+ "79" = string:trim(os:cmd("unbuffer -p bash -c \"stty columns 79 ; tput cols\"")),
+ true
+ catch
+ _:_ -> false
+ end,
+ case ExpectUnbufferInstalled of
+ false ->
+ {skip,
+ "The unbuffer tool (https://core.tcl-lang.org/expect/index) does not seem to be installed.~n"
+ "On Ubuntu/Debian: \"sudo apt-get install expect\""};
+ true ->
+ DataDir = filename:join(filename:dirname(code:which(?MODULE)), "interactive_shell_SUITE_data"),
+ IoColumnsErl = filename:join(DataDir, "io_columns.erl"),
+ IoRowsErl = filename:join(DataDir, "io_rows.erl"),
+ [
+ begin
+ {ok, 42} = run_unbuffer_escript(99, 42, IoColumnsErl, NoTermStdIn, NoTermStdOut),
+ {ok, 99} = run_unbuffer_escript(99, 42, IoRowsErl, NoTermStdIn, NoTermStdOut)
+ end
+ ||
+ {NoTermStdIn, NoTermStdOut} <- [{false, false}, {true, false}, {false, true}]
+ ],
+ {error,enotsup} = run_unbuffer_escript(99, 42, IoRowsErl, true, true),
+ {error,enotsup} = run_unbuffer_escript(99, 42, IoColumnsErl, true, true),
+ ok
+ end.
+
%% Test that the shell can access columns and rows.
get_columns_and_rows(Config) when is_list(Config) ->
case proplists:get_value(default_shell,Config) of
@@ -711,7 +767,7 @@ toerl_loop(Port,Acc) ->
end.
millistamp() ->
- erlang:monotonic_time(milli_seconds).
+ erlang:monotonic_time(millisecond).
get_data_within(Port, X, Acc) when X =< 0 ->
?dbg({get_data_within, X, Acc, ?LINE}),
diff --git a/lib/kernel/test/interactive_shell_SUITE_data/.gitignore b/lib/kernel/test/interactive_shell_SUITE_data/.gitignore
new file mode 100644
index 0000000000..1c2f433de1
--- /dev/null
+++ b/lib/kernel/test/interactive_shell_SUITE_data/.gitignore
@@ -0,0 +1 @@
+tmp \ No newline at end of file
diff --git a/lib/kernel/test/interactive_shell_SUITE_data/io_columns.erl b/lib/kernel/test/interactive_shell_SUITE_data/io_columns.erl
new file mode 100644
index 0000000000..32d0cf25df
--- /dev/null
+++ b/lib/kernel/test/interactive_shell_SUITE_data/io_columns.erl
@@ -0,0 +1,6 @@
+-module(io_columns).
+
+-export([main/1]).
+
+main(_) ->
+ io:format("~p",[io:columns()]).
diff --git a/lib/kernel/test/interactive_shell_SUITE_data/io_rows.erl b/lib/kernel/test/interactive_shell_SUITE_data/io_rows.erl
new file mode 100644
index 0000000000..53ceb464b0
--- /dev/null
+++ b/lib/kernel/test/interactive_shell_SUITE_data/io_rows.erl
@@ -0,0 +1,6 @@
+-module(io_rows).
+
+-export([main/1]).
+
+main(_) ->
+ io:format("~p",[io:rows()]).
diff --git a/lib/kernel/test/kernel.spec b/lib/kernel/test/kernel.spec
index 62afc9f97b..eaa17f3a59 100644
--- a/lib/kernel/test/kernel.spec
+++ b/lib/kernel/test/kernel.spec
@@ -2,3 +2,4 @@
{config, "../test_server/ts.unix.config"}.
{suites,"../kernel_test", all}.
+{skip_suites,"../kernel_test",[logger_stress_SUITE],"Benchmarks only"}.
diff --git a/lib/kernel/test/kernel_SUITE.erl b/lib/kernel/test/kernel_SUITE.erl
index da56359294..3e5ed855b5 100644
--- a/lib/kernel/test/kernel_SUITE.erl
+++ b/lib/kernel/test/kernel_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -30,14 +30,14 @@
-export([init_per_testcase/2, end_per_testcase/2]).
%% Test cases must be exported.
--export([app_test/1, appup_test/1]).
+-export([app_test/1, appup_test/1, refc/1]).
suite() ->
[{ct_hooks,[ts_install_cth]},
{timetrap,{minutes,2}}].
all() ->
- [app_test, appup_test].
+ [app_test, appup_test, refc].
groups() ->
[].
@@ -163,3 +163,68 @@ check_appup([Vsn|Vsns],Instrs,Expected) ->
end;
check_appup([],_,_) ->
ok.
+
+%%% Check that refc module handles the counters as expected
+refc(_Config) ->
+ Enable = fun(Enable) -> erlang:system_flag(scheduler_wall_time, Enable) end,
+ IsOn = fun() -> undefined /= erlang:statistics(scheduler_wall_time) end,
+ Tester = self(),
+ Loop = fun Loop() ->
+ receive
+ die -> normal;
+ {apply, Bool} ->
+ Res = Enable(Bool),
+ Tester ! {self(), Res},
+ Loop()
+ end
+ end,
+
+ %% Counter should be 0
+ false = Enable(false),
+
+ false = Enable(true),
+ true = Enable(true),
+ true = Enable(false),
+ true = Enable(false),
+
+ %% Counter should be 0
+ false = IsOn(),
+
+ P1 = spawn_link(Loop),
+ P1 ! {apply, true},
+ receive {P1, R1} -> false = R1 end,
+
+ %% P1 has turned it on counter should be one
+ true = IsOn(),
+ true = Enable(true),
+ true = Enable(false),
+ true = IsOn(),
+
+ P1 ! {apply, false},
+ receive {P1, R2} -> true = R2 end,
+ false = IsOn(),
+
+ P1 ! {apply, true},
+ receive {P1, R3} -> false = R3 end,
+ true = IsOn(),
+ true = Enable(false),
+
+
+ P1 ! die,
+ timer:sleep(100),
+ false = IsOn(),
+ false = Enable(false),
+
+ P2 = spawn_link(Loop),
+ P2 ! {apply, true},
+ receive {P2, R4} -> false = R4 end,
+ true = IsOn(),
+ P2 ! {apply, true},
+ receive {P2, R5} -> true = R5 end,
+ true = IsOn(),
+
+ P2 ! die,
+ timer:sleep(100),
+ false = IsOn(),
+
+ ok.
diff --git a/lib/kernel/test/kernel_bench.spec b/lib/kernel/test/kernel_bench.spec
new file mode 100644
index 0000000000..898ceb59e0
--- /dev/null
+++ b/lib/kernel/test/kernel_bench.spec
@@ -0,0 +1,3 @@
+{groups,"../kernel_test",zlib_SUITE,[bench]}.
+{groups,"../kernel_test",file_SUITE,[bench]}.
+{suites,"../kernel_test",[logger_stress_SUITE]}.
diff --git a/lib/kernel/test/kernel_config_SUITE.erl b/lib/kernel/test/kernel_config_SUITE.erl
index 9a4578917d..57c44c2498 100644
--- a/lib/kernel/test/kernel_config_SUITE.erl
+++ b/lib/kernel/test/kernel_config_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -21,7 +21,8 @@
-include_lib("common_test/include/ct.hrl").
--export([all/0, suite/0,groups/0,init_per_group/2,end_per_group/2, sync/1]).
+-export([all/0, suite/0,groups/0,init_per_group/2,end_per_group/2,
+ start_distribution_false/1, sync/1]).
-export([init_per_suite/1, end_per_suite/1]).
@@ -30,7 +31,7 @@ suite() ->
{timetrap,{minutes,2}}].
all() ->
- [sync].
+ [sync, start_distribution_false].
groups() ->
[].
@@ -59,12 +60,9 @@ from(H, [H | T]) -> T;
from(H, [_ | T]) -> from(H, T);
from(_, []) -> [].
-%%-----------------------------------------------------------------
-%% Test suite for sync_nodes. This is quite tricky.
-%%
+%% Test sync_nodes. This is quite tricky.
%% Should be started in a CC view with:
%% erl -sname XXX where XX not in [cp1, cp2]
-%%-----------------------------------------------------------------
sync(Conf) when is_list(Conf) ->
%% Write a config file
Dir = proplists:get_value(priv_dir,Conf),
@@ -76,7 +74,7 @@ sync(Conf) when is_list(Conf) ->
%% Reset wall_clock
{T1,_} = erlang:statistics(wall_clock),
io:format("~p~n", [{t1, T1}]),
- Command = lists:concat([lib:progname(),
+ Command = lists:append([ct:get_progname(),
" -detached -sname cp1 ",
"-config ", Config,
" -env ERL_CRASH_DUMP erl_crash_dump.cp1"]),
@@ -106,9 +104,18 @@ wait_for_node(Node) ->
_Other -> wait_for_node(Node)
end.
-
stop_node(Node) ->
M = list_to_atom(lists:concat([Node,
[$@],
from($@,atom_to_list(node()))])),
rpc:cast(M, erlang, halt, []).
+
+start_distribution_false(Config) when is_list(Config) ->
+ %% When distribution is disabled, -sname/-name has no effect
+ Str = os:cmd(ct:get_progname()
+ ++ " -kernel start_distribution false"
+ ++ " -sname no_distribution"
+ ++ " -eval \"erlang:display(node())\""
+ ++ " -noshell -s erlang halt"),
+ "'nonode@nohost'" ++ _ = Str,
+ ok.
diff --git a/lib/kernel/test/logger.cover b/lib/kernel/test/logger.cover
new file mode 100644
index 0000000000..9691aa295e
--- /dev/null
+++ b/lib/kernel/test/logger.cover
@@ -0,0 +1,17 @@
+%% -*- erlang -*-
+{incl_mods,[error_logger,
+ logger,
+ logger_backend,
+ logger_config,
+ logger_disk_log_h,
+ logger_filters,
+ logger_formatter,
+ logger_handler_watcher,
+ logger_h_common,
+ logger_olp,
+ logger_proxy,
+ logger_server,
+ logger_simple_h,
+ logger_std_h,
+ logger_sup]}.
+
diff --git a/lib/kernel/test/logger.spec b/lib/kernel/test/logger.spec
new file mode 100644
index 0000000000..3aec37951d
--- /dev/null
+++ b/lib/kernel/test/logger.spec
@@ -0,0 +1,13 @@
+%% -*-erlang-*-
+{suites,"../kernel_test", [error_logger_SUITE,
+ error_logger_warn_SUITE,
+ logger_SUITE,
+ logger_disk_log_h_SUITE,
+ logger_env_var_SUITE,
+ logger_filters_SUITE,
+ logger_formatter_SUITE,
+ logger_legacy_SUITE,
+ logger_olp_SUITE,
+ logger_proxy_SUITE,
+ logger_simple_h_SUITE,
+ logger_std_h_SUITE]}.
diff --git a/lib/kernel/test/logger_SUITE.erl b/lib/kernel/test/logger_SUITE.erl
new file mode 100644
index 0000000000..f8f3d27778
--- /dev/null
+++ b/lib/kernel/test/logger_SUITE.erl
@@ -0,0 +1,1390 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+
+-define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++
+ ":"++integer_to_list(?LINE)).
+-define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}).
+-define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]).
+
+-define(MY_LOC(N),#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY},
+ file=>?FILE, line=>?LINE-N}).
+
+-define(TRY(X), my_try(fun() -> X end)).
+
+
+suite() ->
+ [{timetrap,{seconds,30}},
+ {ct_hooks,[logger_test_lib]}].
+
+init_per_suite(Config) ->
+ case logger:get_handler_config(?STANDARD_HANDLER) of
+ {ok,StdH} ->
+ ok = logger:remove_handler(?STANDARD_HANDLER),
+ [{default_handler,StdH}|Config];
+ _ ->
+ Config
+ end.
+
+end_per_suite(Config) ->
+ case ?config(default_handler,Config) of
+ #{module:=HMod} = HConfig ->
+ ok = logger:add_handler(?STANDARD_HANDLER,HMod,HConfig);
+ _ ->
+ ok
+ end.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ PC = logger:get_primary_config(),
+ [{logger_config,PC}|Config].
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [start_stop,
+ add_remove_handler,
+ multiple_handlers,
+ add_remove_filter,
+ change_config,
+ set_formatter,
+ log_no_levels,
+ log_all_levels_api,
+ macros,
+ set_level,
+ set_module_level,
+ set_application_level,
+ cache_module_level,
+ format_report,
+ filter_failed,
+ handler_failed,
+ config_sanity_check,
+ log_failed,
+ emulator,
+ via_logger_process,
+ other_node,
+ compare_levels,
+ process_metadata,
+ app_config,
+ kernel_config,
+ pretty_print].
+
+start_stop(_Config) ->
+ S = whereis(logger),
+ true = is_pid(S),
+ ok.
+
+add_remove_handler(_Config) ->
+ register(callback_receiver,self()),
+ Hs0 = logger:get_handler_config(),
+ {error,{not_found,h1}} = logger:get_handler_config(h1),
+ ok = logger:add_handler(h1,?MODULE,#{}),
+ [add] = test_server:messages_get(),
+ Hs = logger:get_handler_config(),
+ Hs0 = lists:filter(fun(#{id:=h1}) -> false; (_) -> true end, Hs),
+ {ok,#{module:=?MODULE,level:=all,filters:=[],filter_default:=log}} = %defaults
+ logger:get_handler_config(h1),
+ ok = logger:set_handler_config(h1,filter_default,stop),
+ [changing_config] = test_server:messages_get(),
+ ?LOG_NOTICE("hello",[]),
+ ok = check_no_log(),
+ ok = logger:set_handler_config(h1,filter_default,log),
+ [changing_config] = test_server:messages_get(),
+ {ok,#{filter_default:=log}} = logger:get_handler_config(h1),
+ ?LOG_NOTICE("hello",[]),
+ ok = check_logged(notice,"hello",[],?MY_LOC(1)),
+ ok = logger:remove_handler(h1),
+ [remove] = test_server:messages_get(),
+ Hs0 = logger:get_handler_config(),
+ {error,{not_found,h1}} = logger:get_handler_config(h1),
+ {error,{not_found,h1}} = logger:remove_handler(h1),
+ logger:notice("hello",[]),
+ ok = check_no_log(),
+ ok.
+
+add_remove_handler(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+multiple_handlers(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}),
+ ok = logger:add_handler(h2,?MODULE,#{level=>error,filter_default=>log}),
+ ?LOG_ERROR("hello",[]),
+ ok = check_logged(error,"hello",[],?MY_LOC(1)),
+ ok = check_logged(error,"hello",[],?MY_LOC(2)),
+ ?LOG_NOTICE("hello",[]),
+ ok = check_logged(notice,"hello",[],?MY_LOC(1)),
+ ok = check_no_log(),
+ ok.
+
+multiple_handlers(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:remove_handler(h2),
+ ok.
+
+add_remove_filter(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}),
+ LF = {fun(Log,_) -> Log#{level=>error} end, []},
+ ok = logger:add_primary_filter(lf,LF),
+ {error,{already_exist,lf}} = logger:add_primary_filter(lf,LF),
+ {error,{already_exist,lf}} = logger:add_primary_filter(lf,{fun(Log,_) ->
+ Log
+ end, []}),
+ ?LOG_NOTICE("hello",[]),
+ ok = check_logged(error,"hello",[],?MY_LOC(1)),
+ ok = check_no_log(),
+
+ ok = logger:add_handler(h2,?MODULE,#{level=>notice,filter_default=>log}),
+ HF = {fun(#{level:=error}=Log,_) ->
+ Log#{level=>mylevel};
+ (_,_) ->
+ ignore
+ end,
+ []},
+ ok = logger:add_handler_filter(h1,hf,HF),
+ {error,{already_exist,hf}} = logger:add_handler_filter(h1,hf,HF),
+ {error,{already_exist,hf}} = logger:add_handler_filter(h1,hf,{fun(Log,_) ->
+ Log
+ end, []}),
+ ?LOG_NOTICE("hello",[]),
+ ok = check_logged(mylevel,"hello",[],?MY_LOC(1)),
+ ok = check_logged(error,"hello",[],?MY_LOC(2)),
+
+ ok = logger:remove_primary_filter(lf),
+ {error,{not_found,lf}} = logger:remove_primary_filter(lf),
+
+ ?LOG_NOTICE("hello",[]),
+ ok = check_logged(notice,"hello",[],?MY_LOC(1)),
+ ok = check_logged(notice,"hello",[],?MY_LOC(2)),
+
+ ?LOG_ERROR("hello",[]),
+ ok = check_logged(mylevel,"hello",[],?MY_LOC(1)),
+ ok = check_logged(error,"hello",[],?MY_LOC(2)),
+
+ ok = logger:remove_handler_filter(h1,hf),
+ {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf),
+ ?LOG_NOTICE("hello",[]),
+ ok = check_logged(notice,"hello",[],?MY_LOC(1)),
+ ok = check_logged(notice,"hello",[],?MY_LOC(2)),
+
+ ?LOG_ERROR("hello",[]),
+ ok = check_logged(error,"hello",[],?MY_LOC(1)),
+ ok = check_logged(error,"hello",[],?MY_LOC(2)),
+ ok.
+
+add_remove_filter(cleanup,_Config) ->
+ logger:remove_primary_filter(lf),
+ logger:remove_handler(h1),
+ logger:remove_handler(h2),
+ ok.
+
+change_config(_Config) ->
+ %% Overwrite handler config - check that defaults are added
+ {error,{not_found,h1}} = logger:set_handler_config(h1,#{}),
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,custom=>custom}),
+ {ok,#{module:=?MODULE,level:=notice,filter_default:=log,custom:=custom}} =
+ logger:get_handler_config(h1),
+ register(callback_receiver,self()),
+ ok = logger:set_handler_config(h1,#{filter_default=>stop}),
+ [changing_config] = test_server:messages_get(),
+ {ok,#{module:=?MODULE,level:=all,filter_default:=stop}=C2} =
+ logger:get_handler_config(h1),
+ false = maps:is_key(custom,C2),
+ {error,fail} = logger:set_handler_config(h1,#{conf_call=>fun() -> {error,fail} end}),
+ {error,{attempting_syncronous_call_to_self,_}} =
+ logger:set_handler_config(
+ h1,#{conf_call=>fun() -> logger:set_handler_config(?MODULE,#{}) end}),
+ ok =
+ logger:set_handler_config(
+ h1,#{conf_call=>fun() -> logger:set_module_level(?MODULE,debug) end}),
+ {ok,C2} = logger:get_handler_config(h1),
+
+ %% Change handler config: Single key
+ {error,fail} = logger:set_handler_config(h1,conf_call,fun() -> {error,fail} end),
+ ok = logger:set_handler_config(h1,custom,custom),
+ [changing_config] = test_server:messages_get(),
+ {ok,#{custom:=custom}=C3} = logger:get_handler_config(h1),
+ C2 = maps:remove(custom,C3),
+
+ %% Change handler config: Map
+ ok = logger:update_handler_config(h1,#{custom=>new_custom}),
+ [changing_config] = test_server:messages_get(),
+ {ok,C4} = logger:get_handler_config(h1),
+ C4 = C3#{custom:=new_custom},
+
+ %% Change handler config: Id and module can not be changed
+ {error,{illegal_config_change,Old,New}} =
+ logger:set_handler_config(h1,id,newid),
+ %% Check that only the faulty field is included in return
+ [{id,h1}] = maps:to_list(Old),
+ [{id,newid}] = maps:to_list(New),
+ %% Check that both fields are included when both are changed
+ {error,{illegal_config_change,
+ #{id:=h1,module:=?MODULE},
+ #{id:=newid,module:=newmodule}}} =
+ logger:set_handler_config(h1,#{id=>newid,module=>newmodule}),
+
+ %% Change primary config: Single key
+ PConfig0 = logger:get_primary_config(),
+ ok = logger:set_primary_config(level,warning),
+ PConfig1 = logger:get_primary_config(),
+ PConfig1 = PConfig0#{level:=warning},
+
+ %% Change primary config: Map
+ ok = logger:update_primary_config(#{level=>error}),
+ PConfig2 = logger:get_primary_config(),
+ PConfig2 = PConfig1#{level:=error},
+
+ %% Overwrite primary config - check that defaults are added
+ ok = logger:set_primary_config(#{filter_default=>stop}),
+ #{level:=notice,filters:=[],filter_default:=stop}=PC1 =
+ logger:get_primary_config(),
+ 3 = maps:size(PC1),
+ %% Check that internal 'handlers' field has not been changed
+ MS = [{{{?HANDLER_KEY,'$1'},'_','_'},[],['$1']}],
+ HIds1 = lists:sort(ets:select(?LOGGER_TABLE,MS)), % dirty, internal data
+ HIds2 = lists:sort(logger:get_handler_ids()),
+ HIds1 = HIds2,
+
+ %% Cleanup
+ ok = logger:set_primary_config(PConfig0),
+ [] = test_server:messages_get(),
+
+ ok.
+
+change_config(cleanup,Config) ->
+ logger:remove_handler(h1),
+ PC = ?config(logger_config,Config),
+ logger:set_primary_config(PC),
+ ok.
+
+set_formatter(_Config) ->
+ {error,{not_found,h1}}=logger:set_handler_config(h1,formatter,{?MODULE,[]}),
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}),
+ ok = logger:set_handler_config(h1,formatter,{?MODULE,[]}),
+ logger:notice("hello",[]),
+ receive
+ {_Log,#{formatter:={?MODULE,[]}}} ->
+ ok
+ after 500 ->
+ ct:fail({timeout,no_log,process_info(self(),messages)})
+ end,
+ ok.
+
+set_formatter(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+log_no_levels(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>all,filter_default=>log}),
+ logger:notice(M1=?map_rep),
+ ok = check_logged(notice,M1,#{}),
+
+ Levels = [emergency,alert,critical,error,warning,notice,info,debug],
+ ok = logger:set_primary_config(level,none),
+ [logger:Level(#{Level=>rep}) || Level <- Levels],
+ ok = check_no_log(),
+
+ ok = logger:set_primary_config(level,all),
+ M2 = ?map_rep,
+ ?LOG_NOTICE(M2),
+ ok = check_logged(notice,M2,#{}),
+
+ ok = logger:set_module_level(?MODULE,none),
+ ?LOG_EMERGENCY(?map_rep),
+ ?LOG_ALERT(?map_rep),
+ ?LOG_CRITICAL(?map_rep),
+ ?LOG_ERROR(?map_rep),
+ ?LOG_WARNING(?map_rep),
+ ?LOG_NOTICE(?map_rep),
+ ?LOG_INFO(?map_rep),
+ ?LOG_DEBUG(?map_rep),
+ ok = check_no_log(),
+
+ ok = logger:unset_module_level(?MODULE),
+ logger:notice(M3=?map_rep),
+ ok = check_logged(notice,M3,#{}),
+
+ ok = logger:set_handler_config(h1,level,none),
+ [logger:Level(#{Level=>rep}) || Level <- Levels],
+ ok = check_no_log(),
+
+ ok.
+log_no_levels(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:set_primary_config(level,notice),
+ logger:unset_module_level(?MODULE),
+ ok.
+
+log_all_levels_api(_Config) ->
+ ok = logger:set_primary_config(level,all),
+ ok = logger:add_handler(h1,?MODULE,#{level=>all,filter_default=>log}),
+ test_api(emergency),
+ test_api(alert),
+ test_api(critical),
+ test_api(error),
+ test_api(warning),
+ test_api(notice),
+ test_api(info),
+ test_api(debug),
+ test_log_function(emergency),
+ ok.
+
+log_all_levels_api(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:set_primary_config(level,notice),
+ ok.
+
+macros(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>all,filter_default=>log}),
+ test_macros(emergency),
+ test_log_macro(alert),
+ ok.
+
+macros(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:unset_module_level(?MODULE),
+ ok.
+
+set_level(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>all,filter_default=>log}),
+ logger:debug(?map_rep),
+ ok = check_no_log(),
+ logger:notice(M1=?map_rep),
+ ok = check_logged(notice,M1,#{}),
+ ok = logger:set_primary_config(level,debug),
+ logger:debug(M2=?map_rep),
+ ok = check_logged(debug,M2,#{}),
+ ok.
+
+set_level(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:set_primary_config(level,notice),
+ ok.
+
+set_module_level(_Config) ->
+ [] = logger:get_module_level([?MODULE,other]),
+ [] = logger:get_module_level(?MODULE),
+ [] = logger:get_module_level(),
+
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}),
+ {error,{invalid_level,bad}} = logger:set_module_level(?MODULE,bad),
+ {error,{not_a_list_of_modules,{bad}}} =
+ logger:set_module_level({bad},warning),
+ {error,{not_a_list_of_modules,[{bad}]}} =
+ logger:set_module_level([{bad}],warning),
+ ok = logger:set_module_level(?MODULE,warning),
+ [{?MODULE,warning}] = logger:get_module_level([?MODULE,other]),
+ [{?MODULE,warning}] = logger:get_module_level(?MODULE),
+ [{?MODULE,warning}] = logger:get_module_level(),
+ logger:notice(?map_rep,?MY_LOC(0)),
+ ok = check_no_log(),
+ logger:warning(M1=?map_rep,?MY_LOC(0)),
+ ok = check_logged(warning,M1,?MY_LOC(1)),
+ ok = logger:set_module_level(?MODULE,notice),
+ [{?MODULE,notice}] = logger:get_module_level([?MODULE,other]),
+ [{?MODULE,notice}] = logger:get_module_level(?MODULE),
+ [{?MODULE,notice}] = logger:get_module_level(),
+ logger:notice(M2=?map_rep,?MY_LOC(0)),
+ ok = check_logged(notice,M2,?MY_LOC(1)),
+
+ {error,{not_a_list_of_modules,{bad}}} = logger:unset_module_level({bad}),
+ {error,{not_a_list_of_modules,[{bad}]}} = logger:unset_module_level([{bad}]),
+ ok = logger:unset_module_level(?MODULE),
+ [] = logger:get_module_level([?MODULE,other]),
+ [] = logger:get_module_level(?MODULE),
+ [] = logger:get_module_level(),
+
+ ok = logger:set_module_level([m1,m2,m3],notice),
+ [{m1,notice},{m2,notice},{m3,notice}] = logger:get_module_level(),
+ ok = logger:unset_module_level(m2),
+ [{m1,notice},{m3,notice}] = logger:get_module_level(),
+ ok = logger:unset_module_level(),
+ [] = logger:get_module_level(),
+
+ ok.
+
+set_module_level(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:unset_module_level(?MODULE),
+ ok.
+
+set_application_level(_Config) ->
+
+ {error,{not_loaded,mnesia}} = logger:set_application_level(mnesia, warning),
+ {error,{not_loaded,mnesia}} = logger:unset_application_level(mnesia),
+
+ case application:load(mnesia) of
+ ok ->
+ {ok, Modules} = application:get_key(mnesia, modules),
+ [] = logger:get_module_level(Modules),
+
+ {error,{invalid_level,warn}} =
+ logger:set_application_level(mnesia, warn),
+
+ ok = logger:set_application_level(mnesia, debug),
+ DebugModules = lists:sort([{M,debug} || M <- Modules]),
+ DebugModules = lists:sort(logger:get_module_level(Modules)),
+
+ ok = logger:set_application_level(mnesia, warning),
+
+ WarnModules = lists:sort([{M,warning} || M <- Modules]),
+ WarnModules = lists:sort(logger:get_module_level(Modules)),
+
+ ok = logger:unset_application_level(mnesia),
+ [] = logger:get_module_level(Modules);
+ {error,{"no such file or directory","mnesia.app"}} ->
+ {skip, "Cannot load mnesia, does not exist"}
+ end.
+
+set_application_level(cleanup,_Config) ->
+ _ = logger:unset_application_level(mnesia),
+ _ = application:unload(mnesia),
+ ok.
+
+cache_module_level(_Config) ->
+ ok = logger:unset_module_level(?MODULE),
+ [] = ets:lookup(?LOGGER_TABLE,?MODULE), %dirty - add API in logger_config?
+ ?LOG_NOTICE(?map_rep),
+ %% Caching is done asynchronously, so wait a bit for the update
+ timer:sleep(100),
+ [_] = ets:lookup(?LOGGER_TABLE,?MODULE), %dirty - add API in logger_config?
+ ok = logger:unset_module_level(?MODULE),
+ [] = ets:lookup(?LOGGER_TABLE,?MODULE), %dirty - add API in logger_config?
+ ok.
+
+cache_module_level(cleanup,_Config) ->
+ logger:unset_module_level(?MODULE),
+ ok.
+
+format_report(_Config) ->
+ {"~ts",["string"]} = logger:format_report("string"),
+ {"~tp",[term]} = logger:format_report(term),
+ {"~tp",[[]]} = logger:format_report([]),
+ {" ~tp: ~tp",[key,value]} = logger:format_report([{key,value}]),
+ KeyVals = [{key1,value1},{key2,"value2"},{key3,[]}],
+ KeyValRes =
+ {" ~tp: ~tp\n ~tp: ~ts\n ~tp: ~tp",
+ [key1,value1,key2,"value2",key3,[]]} =
+ logger:format_report(KeyVals),
+ KeyValRes = logger:format_report(maps:from_list(KeyVals)),
+ KeyValRes = logger:format_otp_report(#{label=>{?MODULE,test},report=>KeyVals}),
+ {" ~tp: ~tp\n ~tp: ~tp",
+ [label,{?MODULE,test},report,KeyVals]} =
+ logger:format_report(#{label=>{?MODULE,test},report=>KeyVals}),
+
+ {" ~tp: ~tp\n ~tp",[key1,value1,term]} =
+ logger:format_report([{key1,value1},term]),
+
+ {" ~tp: ~tp\n ~tp",[key1,value1,[]]} =
+ logger:format_report([{key1,value1},[]]),
+
+ {"~tp",[[]]} = logger:format_report([[],[],[]]),
+
+ ok.
+
+filter_failed(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}),
+
+ %% Logger filters
+ {error,{invalid_filter,_}} =
+ logger:add_primary_filter(lf,{fun(_) -> ok end,args}),
+ ok = logger:add_primary_filter(lf,
+ {fun(_,_) ->
+ erlang:error({badmatch,b})
+ end,
+ args}),
+ #{filters:=[_]} = logger:get_primary_config(),
+ ok = logger:notice(M1=?map_rep),
+ ok = check_logged(notice,M1,#{}),
+ {error,{not_found,lf}} = logger:remove_primary_filter(lf),
+
+ ok = logger:add_primary_filter(lf,{fun(_,_) -> faulty_return end,args}),
+ #{filters:=[_]} = logger:get_primary_config(),
+ ok = logger:notice(M2=?map_rep),
+ ok = check_logged(notice,M2,#{}),
+ {error,{not_found,lf}} = logger:remove_primary_filter(lf),
+
+ %% Handler filters
+ {error,{not_found,h0}} =
+ logger:add_handler_filter(h0,hf,{fun(_,_) -> ignore end,args}),
+ {error,{not_found,h0}} = logger:remove_handler_filter(h0,hf),
+ {error,{invalid_filter,_}} =
+ logger:add_handler_filter(h1,hf,{fun(_) -> ok end,args}),
+ ok = logger:add_handler_filter(h1,hf,
+ {fun(_,_) ->
+ erlang:error({badmatch,b})
+ end,
+ args}),
+ {ok,#{filters:=[_]}} = logger:get_handler_config(h1),
+ ok = logger:notice(M3=?map_rep),
+ ok = check_logged(notice,M3,#{}),
+ {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf),
+
+ ok = logger:add_handler_filter(h1,hf,{fun(_,_) -> faulty_return end,args}),
+ {ok,#{filters:=[_]}} = logger:get_handler_config(h1),
+ ok = logger:notice(M4=?map_rep),
+ ok = check_logged(notice,M4,#{}),
+ {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf),
+
+ ok.
+
+filter_failed(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+handler_failed(_Config) ->
+ register(callback_receiver,self()),
+ {error,{invalid_id,1}} = logger:add_handler(1,?MODULE,#{}),
+ {error,{invalid_module,"nomodule"}} = logger:add_handler(h1,"nomodule",#{}),
+ {error,{invalid_config,bad}} = logger:add_handler(h1,?MODULE,bad),
+ {error,{invalid_filters,false}} =
+ logger:add_handler(h1,?MODULE,#{filters=>false}),
+ {error,{invalid_filter_default,true}} =
+ logger:add_handler(h1,?MODULE,#{filter_default=>true}),
+ {error,{invalid_formatter,[]}} =
+ logger:add_handler(h1,?MODULE,#{formatter=>[]}),
+ {error,{invalid_handler,_}} = logger:add_handler(h1,nomodule,#{filter_default=>log}),
+ logger:notice(?map_rep),
+ check_no_log(),
+ H1 = logger:get_handler_config(),
+ false = lists:search(fun(#{id:=h1}) -> true; (_) -> false end,H1),
+ {error,{not_found,h1}} = logger:remove_handler(h1),
+
+ ok = logger:add_handler(h2,?MODULE,
+ #{filter_default => log,
+ log_call => fun() ->
+ erlang:error({badmatch,b})
+ end}),
+ {error,{already_exist,h2}} = logger:add_handler(h2,othermodule,#{}),
+ [add] = test_server:messages_get(),
+
+ logger:notice(?map_rep),
+ [remove] = test_server:messages_get(),
+ H2 = logger:get_handler_config(),
+ false = lists:search(fun(#{id:=h2}) -> true; (_) -> false end,H2),
+ {error,{not_found,h2}} = logger:remove_handler(h2),
+
+ CallAddHandler = fun() -> logger:add_handler(h2,?MODULE,#{}) end,
+ CrashHandler = fun() -> erlang:error({badmatch,b}) end,
+ KillHandler = fun() -> exit(self(), die) end,
+
+ {error,{handler_not_added,{attempting_syncronous_call_to_self,_}}} =
+ logger:add_handler(h1,?MODULE,#{add_call=>CallAddHandler}),
+ {error,{handler_not_added,{callback_crashed,_}}} =
+ logger:add_handler(h1,?MODULE,#{add_call=>CrashHandler}),
+ {error,{handler_not_added,{logger_process_exited,_,die}}} =
+ logger:add_handler(h1,?MODULE,#{add_call=>KillHandler}),
+
+ check_no_log(),
+ ok = logger:add_handler(h1,?MODULE,#{}),
+ {error,{attempting_syncronous_call_to_self,_}} =
+ logger:set_handler_config(h1,#{conf_call=>CallAddHandler}),
+ {error,{callback_crashed,_}} =
+ logger:set_handler_config(h1,#{conf_call=>CrashHandler}),
+ {error,{logger_process_exited,_,die}} =
+ logger:set_handler_config(h1,#{conf_call=>KillHandler}),
+
+ {error,{attempting_syncronous_call_to_self,_}} =
+ logger:set_handler_config(h1,conf_call,CallAddHandler),
+ {error,{callback_crashed,_}} =
+ logger:set_handler_config(h1,conf_call,CrashHandler),
+ {error,{logger_process_exited,_,die}} =
+ logger:set_handler_config(h1,conf_call,KillHandler),
+
+ ok = logger:remove_handler(h1),
+ [add,remove] = test_server:messages_get(),
+
+ check_no_log(),
+ ok = logger:add_handler(h1,?MODULE,#{rem_call=>CallAddHandler}),
+ ok = logger:remove_handler(h1),
+ ok = logger:add_handler(h1,?MODULE,#{rem_call=>CrashHandler}),
+ ok = logger:remove_handler(h1),
+ ok = logger:add_handler(h1,?MODULE,#{rem_call=>KillHandler}),
+ ok = logger:remove_handler(h1),
+ [add,add,add] = test_server:messages_get(),
+
+ ok.
+
+handler_failed(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:remove_handler(h2),
+ ok.
+
+config_sanity_check(_Config) ->
+ %% Primary config
+ {error,{invalid_config,bad}} = logger:set_primary_config(bad),
+ {error,{invalid_filter_default,bad}} =
+ logger:set_primary_config(filter_default,bad),
+ {error,{invalid_level,bad}} = logger:set_primary_config(level,bad),
+ {error,{invalid_filters,bad}} = logger:set_primary_config(filters,bad),
+ {error,{invalid_filter,bad}} = logger:set_primary_config(filters,[bad]),
+ {error,{invalid_filter,{_,_}}} =
+ logger:set_primary_config(filters,[{id,bad}]),
+ {error,{invalid_filter,{_,{_,_}}}} =
+ logger:set_primary_config(filters,[{id,{bad,args}}]),
+ {error,{invalid_filter,{_,{_,_}}}} =
+ logger:set_primary_config(filters,[{id,{fun() -> ok end,args}}]),
+ {error,{invalid_primary_config,{bad,bad}}} =
+ logger:set_primary_config(bad,bad),
+
+ %% Handler config
+ {error,{not_found,h1}} = logger:set_handler_config(h1,a,b),
+ ok = logger:add_handler(h1,?MODULE,#{}),
+ {error,{invalid_filter_default,bad}} =
+ logger:set_handler_config(h1,filter_default,bad),
+ {error,{invalid_level,bad}} = logger:set_handler_config(h1,level,bad),
+ {error,{invalid_filters,bad}} = logger:set_handler_config(h1,filters,bad),
+ {error,{invalid_filter,bad}} = logger:set_handler_config(h1,filters,[bad]),
+ {error,{invalid_filter,{_,_}}} =
+ logger:set_handler_config(h1,filters,[{id,bad}]),
+ {error,{invalid_filter,{_,{_,_}}}} =
+ logger:set_handler_config(h1,filters,[{id,{bad,args}}]),
+ {error,{invalid_filter,{_,{_,_}}}} =
+ logger:set_handler_config(h1,filters,[{id,{fun() -> ok end,args}}]),
+ {error,{invalid_formatter,bad}} =
+ logger:set_handler_config(h1,formatter,bad),
+ {error,{invalid_module,{bad}}} =
+ logger:set_handler_config(h1,formatter,{{bad},cfg}),
+ {error,{invalid_formatter_config,logger_formatter,bad}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,bad}),
+ {error,{invalid_formatter_config,logger_formatter,{bad,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,#{bad=>bad}}),
+ {error,{invalid_formatter_template,logger_formatter,bad}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{template=>bad}}),
+ {error,{invalid_formatter_template,logger_formatter,[1]}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{template=>[1]}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{template=>[]}}),
+ {error,{invalid_formatter_config,logger_formatter,{single_line,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{single_line=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{single_line=>true}}),
+ {error,{invalid_formatter_config,logger_formatter,{legacy_header,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{legacy_header=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{legacy_header=>true}}),
+ {error,{invalid_formatter_config,logger_formatter,{report_cb,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{report_cb=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{report_cb=>fun(R) ->
+ {"~p",[R]}
+ end}}),
+ {error,{invalid_formatter_config,logger_formatter,{chars_limit,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{chars_limit=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{chars_limit=>unlimited}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{chars_limit=>4}}),
+ {error,{invalid_formatter_config,logger_formatter,{depth,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{depth=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{depth=>unlimited}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{depth=>4}}),
+ {error,{invalid_formatter_config,logger_formatter,{max_size,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{max_size=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{max_size=>unlimited}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{max_size=>4}}),
+ ok = logger:set_handler_config(h1,formatter,{module,config}),
+ {error,{callback_crashed,{error,{badmatch,3},[{?MODULE,check_config,1,_}]}}} =
+ logger:set_handler_config(h1,formatter,{?MODULE,crash}),
+ ok = logger:set_handler_config(h1,custom,custom),
+
+ %% Old utc parameter is no longer allowed (replaced by time_offset)
+ {error,{invalid_formatter_config,logger_formatter,{utc,true}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{utc=>true}}),
+ {error,{invalid_formatter_config,logger_formatter,{time_offset,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>0}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>""}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>"Z"}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>"z"}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>"-0:0"}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>"+10:13"}}),
+
+ {error,{invalid_formatter_config,logger_formatter,{time_offset,"+0"}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_offset=>"+0"}}),
+
+ {error,{invalid_formatter_config,logger_formatter,{time_designator,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_designator=>bad}}),
+ {error,{invalid_formatter_config,logger_formatter,{time_designator,"s"}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_designator=>"s"}}),
+ {error,{invalid_formatter_config,logger_formatter,{time_designator,0}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_designator=>0}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{time_designator=>$\s}}),
+ ok.
+
+config_sanity_check(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+log_failed(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}),
+ {error,function_clause} = ?TRY(logger:log(bad,?map_rep)),
+ {error,function_clause} = ?TRY(logger:log(notice,?map_rep,bad)),
+ {error,function_clause} = ?TRY(logger:log(notice,fun() -> ?map_rep end,bad)),
+ {error,function_clause} = ?TRY(logger:log(notice,fun() -> ?map_rep end,bad,#{})),
+ {error,function_clause} = ?TRY(logger:log(notice,bad,bad,bad)),
+ {error,function_clause} = ?TRY(logger:log(notice,bad,bad,#{})),
+ check_no_log(),
+ ok = logger:log(notice,M1=?str,#{}),
+ check_logged(notice,M1,#{}),
+ ok = logger:log(notice,M2=?map_rep,#{}),
+ check_logged(notice,M2,#{}),
+ ok = logger:log(notice,M3=?keyval_rep,#{}),
+ check_logged(notice,M3,#{}),
+
+ %% Should we check report input more thoroughly?
+ ok = logger:log(notice,M4=?keyval_rep++[other,stuff,in,list],#{}),
+ check_logged(notice,M4,#{}),
+
+ %% This might break a handler since it is assumed to be a format
+ %% string and args, so it depends how the handler protects itself
+ %% against something like io_lib:format("ok","ok")
+ ok = logger:log(notice,"ok","ok",#{}),
+ check_logged(notice,"ok","ok",#{}),
+
+ ok.
+
+log_failed(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+emulator(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log,
+ tc_proc=>self()}),
+ Msg = "Error in process ~p on node ~p with exit value:~n~p~n",
+ Error = {badmatch,4},
+ Stack = [{module, function, 2, []}],
+ Pid = spawn(?MODULE, generate_error, [Error, Stack]),
+ check_logged(error, Msg, [Pid, node(), {Error, Stack}],
+ #{gl=>group_leader(),
+ error_logger=>#{tag=>error,emulator=>true}}),
+ ok.
+
+emulator(cleanup, _Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+generate_error(Error, Stack) ->
+ erlang:raise(error, Error, Stack).
+
+via_logger_process(Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log,
+ tc_proc=>self()}),
+
+ %% Explicitly send a message to the logger process
+ %% This is used by code_server, erl_prim_loader, init, prim_file, ...
+ Msg = ?str,
+ logger ! {log,error,Msg,[],#{}},
+ check_logged(error, Msg, [], #{}),
+
+ case os:type() of
+ {win32,_} ->
+ %% Skip this part on windows - cant change file mode"
+ ok;
+ _ ->
+ %% This should trigger the same thing from erl_prim_loader
+ Dir = filename:join(?config(priv_dir,Config),"dummydir"),
+ ok = file:make_dir(Dir),
+ ok = file:change_mode(Dir,8#0222),
+ error = erl_prim_loader:list_dir(Dir),
+ check_logged(error,
+ #{report=>"File operation error: eacces. Target: " ++
+ Dir ++". Function: list_dir. "},
+ #{pid=>self(),
+ gl=>group_leader(),
+ error_logger=>#{tag=>error_report,
+ type=>std_error}}),
+ ok
+ end.
+
+via_logger_process(cleanup, Config) ->
+ Dir = filename:join(?config(priv_dir,Config),"dummydir"),
+ _ = file:change_mode(Dir,8#0664),
+ _ = file:del_dir(Dir),
+ logger:remove_handler(h1),
+ ok.
+
+other_node(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log,
+ tc_proc=>self()}),
+ {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]),
+ rpc:call(Node,logger,error,[Msg=?str,#{}]),
+ check_logged(error,Msg,#{}),
+ ok.
+
+other_node(cleanup,_Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes],
+ logger:remove_handler(h1),
+ ok.
+
+compare_levels(_Config) ->
+ Levels = [none,emergency,alert,critical,error,warning,notice,info,debug,all],
+ ok = compare(Levels),
+ {error,badarg} = ?TRY(logger:compare_levels(bad,bad)),
+ {error,badarg} = ?TRY(logger:compare_levels({bad},notice)),
+ {error,badarg} = ?TRY(logger:compare_levels(notice,"bad")),
+ ok.
+
+compare([L|Rest]) ->
+ eq = logger:compare_levels(L,L),
+ [gt = logger:compare_levels(L,L1) || L1 <- Rest],
+ [lt = logger:compare_levels(L1,L) || L1 <- Rest],
+ compare(Rest);
+compare([]) ->
+ ok.
+
+process_metadata(_Config) ->
+ undefined = logger:get_process_metadata(),
+ {error,badarg} = ?TRY(logger:set_process_metadata(bad)),
+ ok = logger:add_handler(h1,?MODULE,#{level=>notice,filter_default=>log}),
+ Time = logger:timestamp(),
+ ProcMeta = #{time=>Time,line=>0,custom=>proc},
+ ok = logger:set_process_metadata(ProcMeta),
+ S1 = ?str,
+ ?LOG_NOTICE(S1,#{custom=>macro}),
+ check_logged(notice,S1,#{time=>Time,line=>0,custom=>macro}),
+
+ Time2 = logger:timestamp(),
+ S2 = ?str,
+ ?LOG_NOTICE(S2,#{time=>Time2,line=>1,custom=>macro}),
+ check_logged(notice,S2,#{time=>Time2,line=>1,custom=>macro}),
+
+ logger:notice(S3=?str,#{custom=>func}),
+ check_logged(notice,S3,#{time=>Time,line=>0,custom=>func}),
+
+ ProcMeta = logger:get_process_metadata(),
+ ok = logger:update_process_metadata(#{custom=>changed,custom2=>added}),
+ Expected = ProcMeta#{custom:=changed,custom2=>added},
+ Expected = logger:get_process_metadata(),
+ ok = logger:unset_process_metadata(),
+ undefined = logger:get_process_metadata(),
+
+ ok = logger:update_process_metadata(#{custom=>added_again}),
+ {error,badarg} = ?TRY(logger:update_process_metadata(bad)),
+ #{custom:=added_again} = logger:get_process_metadata(),
+
+ ok.
+
+process_metadata(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+app_config(Config) ->
+ %% Start a node with default configuration
+ {ok,_,Node} = logger_test_lib:setup(Config,[]),
+
+ App1Name = app1,
+ App1 = {application, App1Name,
+ [{description, "Test of app with logger config"},
+ {applications, [kernel]}]},
+ ok = rpc:call(Node,application,load,[App1]),
+ ok = rpc:call(Node,application,set_env,
+ [App1Name,logger,[{handler,default,logger_std_h,#{}}]]),
+
+ %% Try to add an own default handler
+ {error,{bad_config,{handler,{app1,{already_exist,default}}}}} =
+ rpc:call(Node,logger,add_handlers,[App1Name]),
+
+ %% Add a different handler
+ ok = rpc:call(Node,application,set_env,[App1Name,logger,
+ [{handler,myh,logger_std_h,#{}}]]),
+ ok = rpc:call(Node,logger,add_handlers,[App1Name]),
+
+ {ok,#{filters:=DF}} = rpc:call(Node,logger,get_handler_config,[default]),
+ {ok,#{filters:=[]}} = rpc:call(Node,logger,get_handler_config,[myh]),
+
+ true = test_server:stop_node(Node),
+
+ %% Start a node with no default handler, then add an own default handler
+ {ok,#{handlers:=[#{id:=simple}]},Node} =
+ logger_test_lib:setup(Config,[{logger,[{handler,default,undefined}]}]),
+
+ ok = rpc:call(Node,application,load,[App1]),
+ ok = rpc:call(Node,application,set_env,
+ [App1Name,logger,[{handler,default,logger_std_h,#{}}]]),
+ ok = rpc:call(Node,logger,add_handlers,[App1Name]),
+
+ #{handlers:=[#{id:=default,filters:=DF}]} =
+ rpc:call(Node,logger,get_config,[]),
+
+ true = test_server:stop_node(Node),
+
+ %% Start a silent node, then add an own default handler
+ {ok,#{handlers:=[]},Node} =
+ logger_test_lib:setup(Config,[{error_logger,silent}]),
+
+ {error,{bad_config,{handler,[{some,bad,config}]}}} =
+ rpc:call(Node,logger,add_handlers,[[{some,bad,config}]]),
+ ok = rpc:call(Node,logger,add_handlers,
+ [[{handler,default,logger_std_h,#{}}]]),
+
+ #{handlers:=[#{id:=default,filters:=DF}]} =
+ rpc:call(Node,logger,get_config,[]),
+
+ ok.
+
+%% This test case is maintly to see code coverage. Note that
+%% logger_env_var_SUITE tests a lot of the same, and checks the
+%% functionality more thoroughly, but since it all happens at node
+%% start, it is not possible to see code coverage in that test.
+kernel_config(Config) ->
+ %% Start a node with simple handler only, then simulate kernel
+ %% start by calling internally exported
+ %% internal_init_logger(). This is to test all variants of kernel
+ %% config, including bad config, and see the code coverage.
+ {ok,#{handlers:=[#{id:=simple,filters:=DF}]}=LC,Node} =
+ logger_test_lib:setup(Config,[{error_logger,false}]),
+
+ %% Same once more, to get coverage
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ LC = rpc:call(Node,logger,get_config,[]),
+
+ %% This shall mean the same as above, but using 'logger' parameter
+ %% instead of 'error_logger'
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{handler,default,undefined}]]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ LC = rpc:call(Node,logger,get_config,[]),
+
+ %% Silent
+ ok = rpc:call(Node,application,unset_env,[kernel,logger]),
+ ok = rpc:call(Node,application,set_env,[kernel,error_logger,silent]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% Default
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ ok = rpc:call(Node,application,unset_env,[kernel,logger]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,config:=#{type:=standard_io}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% error_logger=tty (same as default)
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ ok = rpc:call(Node,application,set_env,[kernel,error_logger,tty]),
+ ok = rpc:call(Node,application,unset_env,[kernel,logger]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,config:=#{type:=standard_io}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% error_logger={file,File}
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ F = filename:join(?config(priv_dir,Config),
+ atom_to_list(?FUNCTION_NAME)++".log"),
+ ok = rpc:call(Node,application,set_env,[kernel,error_logger,{file,F}]),
+ ok = rpc:call(Node,application,unset_env,[kernel,logger]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,
+ config:=#{type:=file,file:=F,modes:=Modes}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+ [append,delayed_write,raw] = lists:sort(Modes),
+
+
+ %% Same, but using 'logger' parameter instead of 'error_logger'
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ ok = rpc:call(Node,application,set_env,[kernel,logger,
+ [{handler,default,logger_std_h,
+ #{config=>#{type=>{file,F}}}}]]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,
+ config:=#{type:=file,file:=F,modes:=Modes}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% Same, but with type={file,File,Modes}
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ M = [raw,write],
+ ok = rpc:call(Node,application,set_env,[kernel,logger,
+ [{handler,default,logger_std_h,
+ #{config=>#{type=>{file,F,M}}}}]]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,
+ config:=#{type:=file,file:=F,modes:=[delayed_write|M]}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% Same, but with disk_log handler
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ ok = rpc:call(Node,application,set_env,[kernel,logger,
+ [{handler,default,logger_disk_log_h,
+ #{config=>#{file=>F}}}]]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=log,filters:=[]},
+ handlers:=[#{id:=default,filters:=DF,config:=#{file:=F}}],
+ module_levels:=[]} = rpc:call(Node,logger,get_config,[]),
+
+ %% Set primary filters and module level. No default handler.
+ ok = rpc:call(Node,logger,remove_handler,[default]),% so it can be added again
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{handler,default,undefined},
+ {filters,stop,[{f1,{fun(_,_) -> log end,ok}}]},
+ {module_level,debug,[?MODULE]}]]),
+ ok = rpc:call(Node,logger,internal_init_logger,[]),
+ ok = rpc:call(Node,logger,add_handlers,[kernel]),
+ #{primary:=#{filter_default:=stop,filters:=[_]},
+ handlers:=[],
+ module_levels:=[{?MODULE,debug}]} = rpc:call(Node,logger,get_config,[]),
+
+ %% Bad config
+ ok = rpc:call(Node,application,unset_env,[kernel,logger]),
+
+ ok = rpc:call(Node,application,set_env,[kernel,error_logger,bad]),
+ {error,{bad_config,{kernel,{error_logger,bad}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,unset_env,[kernel,error_logger]),
+ ok = rpc:call(Node,application,set_env,[kernel,logger_level,bad]),
+ {error,{bad_config,{kernel,{logger_level,bad}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,unset_env,[kernel,logger_level]),
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{filters,stop,[bad]}]]),
+ {error,{bad_config,{kernel,{invalid_filters,[bad]}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{filters,stop,[bad]}]]),
+ {error,{bad_config,{kernel,{invalid_filters,[bad]}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{filters,stop,[{f1,bad}]}]]),
+ {error,{bad_config,{kernel,{invalid_filter,{f1,bad}}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,MF=[{filters,stop,[]},{filters,log,[]}]]),
+ {error,{bad_config,{kernel,{multiple_filters,MF}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok = rpc:call(Node,application,set_env,
+ [kernel,logger,[{module_level,bad,[?MODULE]}]]),
+ {error,{bad_config,{kernel,{invalid_level,bad}}}} =
+ rpc:call(Node,logger,internal_init_logger,[]),
+
+ ok.
+
+pretty_print(Config) ->
+ ok = logger:add_handler(?FUNCTION_NAME,logger_std_h,#{}),
+ ok = logger:set_module_level([module1,module2],debug),
+
+ ct:capture_start(),
+ logger:i(),
+ ct:capture_stop(),
+ I0 = ct:capture_get(),
+
+ ct:capture_start(),
+ logger:i(primary),
+ ct:capture_stop(),
+ IPrim = ct:capture_get(),
+
+ ct:capture_start(),
+ logger:i(handlers),
+ ct:capture_stop(),
+ IHs = ct:capture_get(),
+
+ ct:capture_start(),
+ logger:i(proxy),
+ ct:capture_stop(),
+ IProxy = ct:capture_get(),
+
+ ct:capture_start(),
+ logger:i(modules),
+ ct:capture_stop(),
+ IMs = ct:capture_get(),
+
+ I02 = lists:append([IPrim,IHs,IProxy,IMs]),
+ %% ct:log("~p~n",[I0]),
+ %% ct:log("~p~n",[I02]),
+ I0 = I02,
+
+ ct:capture_start(),
+ logger:i(handlers),
+ ct:capture_stop(),
+ IHs = ct:capture_get(),
+
+ Ids = logger:get_handler_ids(),
+ IHs2 =
+ lists:append(
+ [begin
+ ct:capture_start(),
+ logger:i(Id),
+ ct:capture_stop(),
+ [_|IH] = ct:capture_get(),
+ IH
+ end || Id <- Ids]),
+
+ %% ct:log("~p~n",[IHs]),
+ %% ct:log("~p~n",[["Handler configuration: \n"|IHs2]]),
+ IHs = ["Handler configuration: \n"|IHs2],
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+check_logged(Level,Format,Args,Meta) ->
+ do_check_logged(Level,{Format,Args},Meta).
+
+check_logged(Level,Msg,Meta) when ?IS_REPORT(Msg) ->
+ do_check_logged(Level,{report,Msg},Meta);
+check_logged(Level,Msg,Meta) when ?IS_STRING(Msg) ->
+ do_check_logged(Level,{string,Msg},Meta).
+
+do_check_logged(Level,Msg0,Meta0) ->
+ receive
+ {#{level:=Level,msg:=Msg,meta:=Meta},_} ->
+ check_msg(Msg0,Msg),
+ check_maps(Meta0,Meta,meta)
+ after 500 ->
+ ct:fail({timeout,no_log,process_info(self(),messages)})
+ end.
+
+check_no_log() ->
+ receive
+ X -> ct:fail({got_unexpected_log,X})
+ after 500 ->
+ ok
+ end.
+
+check_msg(Msg,Msg) ->
+ ok;
+check_msg({report,Expected},{report,Got}) when is_map(Expected), is_map(Got) ->
+ check_maps(Expected,Got,msg);
+check_msg(Expected,Got) ->
+ ct:fail({unexpected,msg,Expected,Got}).
+
+check_maps(Expected,Got,What) ->
+ case maps:merge(Got,Expected) of
+ Got ->
+ ok;
+ _ ->
+ ct:fail({unexpected,What,Expected,Got})
+ end.
+
+%% Handler
+adding_handler(#{add_call:=Fun}) ->
+ Fun();
+adding_handler(Config) ->
+ maybe_send(add),
+ {ok,Config}.
+
+removing_handler(#{rem_call:=Fun}) ->
+ Fun();
+removing_handler(_Config) ->
+ maybe_send(remove),
+ ok.
+changing_config(_Old,#{conf_call:=Fun}) ->
+ Fun();
+changing_config(_Old,Config) ->
+ maybe_send(changing_config),
+ {ok,Config}.
+
+maybe_send(Msg) ->
+ case whereis(callback_receiver) of
+ undefined -> ok;
+ Pid -> Pid ! Msg
+ end.
+
+log(_Log,#{log_call:=Fun}) ->
+ Fun();
+log(Log,Config) ->
+ TcProc = maps:get(tc_proc,Config,self()),
+ TcProc ! {Log,Config},
+ ok.
+
+test_api(Level) ->
+ logger:Level(#{Level=>rep}),
+ ok = check_logged(Level,#{Level=>rep},#{}),
+ logger:Level(#{Level=>rep},#{my=>meta}),
+ ok = check_logged(Level,#{Level=>rep},#{my=>meta}),
+ logger:Level("~w: ~w",[Level,fa]),
+ ok = check_logged(Level,"~w: ~w",[Level,fa],#{}),
+ logger:Level("~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ logger:Level(fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end,x,
+ #{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],#{my=>meta}),
+ logger:Level(fun(x) -> #{Level=>fun_to_r,meta=>true} end,x,
+ #{my=>meta}),
+ ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},#{my=>meta}),
+ logger:Level(fun(x) -> <<"fun_to_s">> end,x,#{}),
+ ok = check_logged(Level,<<"fun_to_s">>,#{}),
+ logger:Level(F1=fun(x) -> {fun_to_bad} end,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{F1,x},{fun_to_bad}],#{}),
+ logger:Level(F2=fun(x) -> erlang:error(fun_that_crashes) end,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{F2,x},{error,fun_that_crashes}],#{}),
+ ok.
+
+test_log_function(Level) ->
+ logger:log(Level,#{Level=>rep}),
+ ok = check_logged(Level,#{Level=>rep},#{}),
+ logger:log(Level,#{Level=>rep},#{my=>meta}),
+ ok = check_logged(Level,#{Level=>rep},#{my=>meta}),
+ logger:log(Level,"~w: ~w",[Level,fa]),
+ ok = check_logged(Level,"~w: ~w",[Level,fa],#{}),
+ logger:log(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ logger:log(Level,fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end,
+ x, #{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],#{my=>meta}),
+ logger:log(Level,fun(x) -> #{Level=>fun_to_r,meta=>true} end,
+ x, #{my=>meta}),
+ ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},#{my=>meta}),
+ logger:log(Level,fun(x) -> <<"fun_to_s">> end,x,#{}),
+ ok = check_logged(Level,<<"fun_to_s">>,#{}),
+ logger:log(Level,F1=fun(x) -> {fun_to_bad} end,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{F1,x},{fun_to_bad}],#{}),
+ logger:log(Level,F2=fun(x) -> erlang:error(fun_that_crashes) end,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{F2,x},{error,fun_that_crashes}],#{}),
+ ok.
+
+test_macros(emergency=Level) ->
+ ?LOG_EMERGENCY(#{Level=>rep}),
+ ok = check_logged(Level,#{Level=>rep},?MY_LOC(1)),
+ ?LOG_EMERGENCY(#{Level=>rep},#{my=>meta}),
+ ok = check_logged(Level,#{Level=>rep},(?MY_LOC(1))#{my=>meta}),
+ ?LOG_EMERGENCY("~w: ~w",[Level,fa]),
+ ok = check_logged(Level,"~w: ~w",[Level,fa],?MY_LOC(1)),
+ ?LOG_EMERGENCY("~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],(?MY_LOC(1))#{my=>meta}),
+ ?LOG_EMERGENCY(fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end,
+ x, #{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],
+ (?MY_LOC(3))#{my=>meta}),
+ ?LOG_EMERGENCY(fun(x) -> #{Level=>fun_to_r,meta=>true} end, x, #{my=>meta}),
+ ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},
+ (?MY_LOC(2))#{my=>meta}),
+ ?LOG_EMERGENCY(fun(x) -> <<"fun_to_s">> end,x,#{}),
+ ok = check_logged(Level,<<"fun_to_s">>,?MY_LOC(1)),
+ F1=fun(x) -> {fun_to_bad} end,
+ ?LOG_EMERGENCY(F1,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{F1,x},{fun_to_bad}],#{}),
+ F2=fun(x) -> erlang:error(fun_that_crashes) end,
+ ?LOG_EMERGENCY(F2,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{F2,x},{error,fun_that_crashes}],#{}),
+ ok.
+
+test_log_macro(Level) ->
+ ?LOG(Level,#{Level=>rep}),
+ ok = check_logged(Level,#{Level=>rep},?MY_LOC(1)),
+ ?LOG(Level,#{Level=>rep},#{my=>meta}),
+ ok = check_logged(Level,#{Level=>rep},(?MY_LOC(1))#{my=>meta}),
+ ?LOG(Level,"~w: ~w",[Level,fa]),
+ ok = check_logged(Level,"~w: ~w",[Level,fa],?MY_LOC(1)),
+ ?LOG(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],(?MY_LOC(1))#{my=>meta}),
+ ?LOG(Level,fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end,
+ x, #{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],
+ (?MY_LOC(3))#{my=>meta}),
+ ?LOG(Level,fun(x) -> #{Level=>fun_to_r,meta=>true} end, x, #{my=>meta}),
+ ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},
+ (?MY_LOC(2))#{my=>meta}),
+ ?LOG(Level,fun(x) -> <<"fun_to_s">> end,x,#{}),
+ ok = check_logged(Level,<<"fun_to_s">>,?MY_LOC(1)),
+ F1=fun(x) -> {fun_to_bad} end,
+ ?LOG(Level,F1,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{F1,x},{fun_to_bad}],#{}),
+ F2=fun(x) -> erlang:error(fun_that_crashes) end,
+ ?LOG(Level,F2,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{F2,x},{error,fun_that_crashes}],#{}),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Called by macro ?TRY(X)
+my_try(Fun) ->
+ try Fun() catch C:R -> {C,R} end.
+
+check_config(crash) ->
+ erlang:error({badmatch,3});
+check_config(_) ->
+ ok.
diff --git a/lib/kernel/test/logger_disk_log_h_SUITE.erl b/lib/kernel/test/logger_disk_log_h_SUITE.erl
new file mode 100644
index 0000000000..36d98eaa25
--- /dev/null
+++ b/lib/kernel/test/logger_disk_log_h_SUITE.erl
@@ -0,0 +1,1668 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_disk_log_h_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+-include_lib("kernel/src/logger_olp.hrl").
+-include_lib("kernel/src/logger_h_common.hrl").
+-include_lib("stdlib/include/ms_transform.hrl").
+-include_lib("kernel/include/file.hrl").
+
+-define(check_no_log, [] = test_server:messages_get()).
+
+-define(check(Expected),
+ receive {log,Expected} ->
+ [] = test_server:messages_get()
+ after 1000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {got,test_server:messages_get()}})
+ end).
+
+-define(msg,"Log from "++atom_to_list(?FUNCTION_NAME)++
+ ":"++integer_to_list(?LINE)).
+-define(bin(Msg), list_to_binary(Msg++"\n")).
+-define(log_no(File,N), lists:concat([File,".",N])).
+-define(domain,#{domain=>[?MODULE]}).
+
+suite() ->
+ [{timetrap,{seconds,30}},
+ {ct_hooks,[logger_test_lib]}].
+
+init_per_suite(Config) ->
+ timer:start(), % to avoid progress report
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(TestHooksCase, Config) when
+ TestHooksCase == write_failure;
+ TestHooksCase == sync_failure ->
+ case (fun() -> ?TEST_HOOKS_TAB == undefined end)() of
+ true ->
+ {skip,"Define the TEST_HOOKS macro to run this test"};
+ false ->
+ ct:print("********** ~w **********", [TestHooksCase]),
+ Config
+ end;
+init_per_testcase(TestCase, Config) ->
+ ct:print("********** ~w **********", [TestCase]),
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [start_stop_handler,
+ create_log,
+ open_existing_log,
+ disk_log_opts,
+ default_formatter,
+ logging,
+ filter_config,
+ errors,
+ formatter_fail,
+ config_fail,
+ bad_input,
+ reconfig,
+ sync,
+ disk_log_full,
+ disk_log_wrap,
+ disk_log_events,
+ write_failure,
+ sync_failure,
+ op_switch_to_sync,
+ op_switch_to_drop,
+ op_switch_to_flush,
+ limit_burst_disabled,
+ limit_burst_enabled_one,
+ limit_burst_enabled_period,
+ kill_disabled,
+ qlen_kill_new,
+ %% qlen_kill_std,
+ mem_kill_new,
+ %% mem_kill_std,
+ restart_after,
+ handler_requests_under_load
+ ].
+
+start_stop_handler(_Config) ->
+ ok = logger:add_handler(?MODULE, logger_disk_log_h, #{}),
+ {error,{already_exist,?MODULE}} =
+ logger:add_handler(?MODULE, logger_disk_log_h, #{}),
+ true = is_pid(whereis(h_proc_name())),
+ ok = logger:remove_handler(?MODULE),
+ timer:sleep(500),
+ undefined = whereis(h_proc_name()).
+start_stop_handler(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+create_log(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ %% test new handler
+ Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_A"])),
+ LogFile1 = filename:join(PrivDir, Name1),
+ ok = start_and_add(Name1, #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}},
+ #{file=>LogFile1}),
+ logger:notice("hello", ?domain),
+ logger_disk_log_h:filesync(Name1),
+ ct:pal("Checking contents of ~p", [?log_no(LogFile1,1)]),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"hello\n">>}, 5000),
+
+ %% test second handler
+ Name2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_B"])),
+ DLName = lists:concat([?FUNCTION_NAME,"_B_log"]),
+ LogFile2 = filename:join(PrivDir, DLName),
+ ok = start_and_add(Name2, #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}},
+ #{file=>LogFile2}),
+ logger:notice("dummy", ?domain),
+ logger_disk_log_h:filesync(Name2),
+ ct:pal("Checking contents of ~p", [?log_no(LogFile2,1)]),
+ try_read_file(?log_no(LogFile2,1), {ok,<<"dummy\n">>}, 5000),
+
+ remove_and_stop(Name1),
+ remove_and_stop(Name2),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"hello\ndummy\n">>}, 1),
+ try_read_file(?log_no(LogFile2,1), {ok,<<"dummy\n">>}, 5000),
+ ok.
+
+open_existing_log(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ %% test new handler
+ HName = ?FUNCTION_NAME,
+ DLName = lists:concat([?FUNCTION_NAME,"_log"]),
+ LogFile1 = filename:join(PrivDir, DLName),
+ ok = start_and_add(HName, #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}},
+ #{file=>LogFile1}),
+ logger:notice("one", ?domain),
+ logger_disk_log_h:filesync(HName),
+ ct:pal("Checking contents of ~p", [?log_no(LogFile1,1)]),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"one\n">>}, 5000),
+ logger:notice("two", ?domain),
+ ok = remove_and_stop(HName),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\n">>}, 5000),
+
+ logger:notice("two and a half", ?domain),
+
+ ok = start_and_add(HName, #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}},
+ #{file=>LogFile1}),
+ logger:notice("three", ?domain),
+ logger_disk_log_h:filesync(HName),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\nthree\n">>}, 5000),
+ remove_and_stop(HName),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\nthree\n">>}, 5000).
+
+disk_log_opts(Config) ->
+ Get = fun(Key, PL) -> proplists:get_value(Key, PL) end,
+ PrivDir = ?config(priv_dir,Config),
+ WName = list_to_atom(lists:concat([?FUNCTION_NAME,"_W"])),
+ WFile = lists:concat([?FUNCTION_NAME,"_W_log"]),
+ Size = length("12345"),
+ ConfigW = #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter => {?MODULE,no_nl}},
+ WFileFull = filename:join(PrivDir, WFile),
+ DLOptsW = #{file => WFileFull,
+ type => wrap,
+ max_no_bytes => Size,
+ max_no_files => 2},
+ ok = start_and_add(WName, ConfigW, DLOptsW),
+ WInfo1 = disk_log:info(WName),
+ ct:log("Fullname = ~s", [WFileFull]),
+ {WFileFull,wrap,{Size,2},1} = {Get(file,WInfo1),Get(type,WInfo1),
+ Get(size,WInfo1),Get(current_file,WInfo1)},
+ logger:notice("123", ?domain),
+ logger_disk_log_h:filesync(WName),
+ timer:sleep(500),
+ 1 = Get(current_file, disk_log:info(WName)),
+
+ logger:notice("45", ?domain),
+ logger_disk_log_h:filesync(WName),
+ timer:sleep(500),
+ 1 = Get(current_file, disk_log:info(WName)),
+
+ logger:notice("6", ?domain),
+ logger_disk_log_h:filesync(WName),
+ timer:sleep(500),
+ 2 = Get(current_file, disk_log:info(WName)),
+
+ logger:notice("7890", ?domain),
+ logger_disk_log_h:filesync(WName),
+ timer:sleep(500),
+ 2 = Get(current_file, disk_log:info(WName)),
+
+ HName1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_H1"])),
+ HFile1 = lists:concat([?FUNCTION_NAME,"_H1_log"]),
+ ConfigH = #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter => {?MODULE,no_nl}},
+ HFile1Full = filename:join(PrivDir, HFile1),
+ DLOptsH1 = #{file => HFile1Full,
+ type => halt},
+ ok = start_and_add(HName1, ConfigH, DLOptsH1),
+ HInfo1 = disk_log:info(HName1),
+ ct:log("Fullname = ~s", [HFile1Full]),
+ {HFile1Full,halt,infinity} = {Get(file,HInfo1),Get(type,HInfo1),
+ Get(size,HInfo1)},
+ logger:notice("12345", ?domain),
+ logger_disk_log_h:filesync(HName1),
+ timer:sleep(500),
+ 1 = Get(no_written_items, disk_log:info(HName1)),
+
+ HName2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_H2"])),
+ HFile2 = lists:concat([?FUNCTION_NAME,"_H2_log"]),
+ HFile2Full = filename:join(PrivDir, HFile2),
+ DLOptsH2 = DLOptsH1#{file => HFile2Full,
+ max_no_bytes => 1000},
+ ok = start_and_add(HName2, ConfigH, DLOptsH2),
+ HInfo3 = disk_log:info(HName2),
+ ct:log("Fullname = ~s", [HFile2Full]),
+ {HFile2Full,halt,1000} = {Get(file,HInfo3),Get(type,HInfo3),
+ Get(size,HInfo3)},
+
+ remove_and_stop(WName),
+ remove_and_stop(HName1),
+ remove_and_stop(HName2),
+ ok.
+
+default_formatter(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ LogFile = filename:join(PrivDir,atom_to_list(?FUNCTION_NAME)),
+ HandlerConfig = #{config => #{file=>LogFile},
+ filter_default=>log},
+ ct:pal("Log: ~p", [LogFile]),
+ ok = logger:add_handler(?MODULE, logger_disk_log_h, HandlerConfig),
+ ok = logger:set_handler_config(?MODULE,formatter,
+ {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}),
+ LogName = lists:concat([LogFile, ".1"]),
+ logger:notice("dummy"),
+ wait_until_written(LogName),
+ {ok,Bin} = file:read_file(LogName),
+ match = re:run(Bin, "=NOTICE REPORT====.*\ndummy", [{capture,none}]),
+ ok.
+default_formatter(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+logging(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ %% test new handler
+ Name = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ LogFile = filename:join(PrivDir, Name),
+ ok = start_and_add(Name, #{filter_default=>log,
+ formatter=>{?MODULE,self()}},
+ #{file => LogFile}),
+ MsgFormatter = fun(Term) -> {"Term:~p",[Term]} end,
+ logger:notice([{x,y}], #{report_cb => MsgFormatter}),
+ logger:notice([{x,y}], #{}),
+ ct:pal("Checking contents of ~p", [?log_no(LogFile,1)]),
+ try_read_file(?log_no(LogFile,1), {ok,<<"Term:[{x,y}]\n x: y\n">>}, 5000).
+
+logging(cleanup, _Config) ->
+ Name = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ remove_and_stop(Name).
+
+filter_config(_Config) ->
+ ok = logger:add_handler(?MODULE,logger_disk_log_h,#{}),
+ {ok,#{config:=HConfig}=Config} = logger:get_handler_config(?MODULE),
+ HConfig = maps:without([olp],HConfig),
+
+ FakeFullHConfig = HConfig#{olp=>{regname,self(),erlang:make_ref()}},
+ #{config:=HConfig} =
+ logger_disk_log_h:filter_config(Config#{config=>FakeFullHConfig}),
+ ok.
+
+filter_config(cleanup,_Config) ->
+ logger:remove_handler(?MODULE),
+ ok.
+
+errors(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ LogFile1 = filename:join(PrivDir,Name1),
+ HandlerConfig = #{config=>#{file=>LogFile1},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}},
+ ok = logger:add_handler(Name1, logger_disk_log_h, HandlerConfig),
+ {error,{already_exist,Name1}} =
+ logger:add_handler(Name1, logger_disk_log_h, #{}),
+
+ %%! TODO:
+ %%! Check how bad log_opts are handled!
+
+ {error,{illegal_config_change,
+ logger_disk_log_h,
+ #{type:=wrap},
+ #{type:=halt}}} =
+ logger:update_handler_config(Name1,
+ config,
+ #{type=>halt,
+ file=>LogFile1}),
+
+ {error,{illegal_config_change,
+ logger_disk_log_h,
+ #{file:=LogFile1},
+ #{file:="newfilename"}}} =
+ logger:update_handler_config(Name1,
+ config,
+ #{file=>"newfilename"}),
+
+ %% Read-only fields may (accidentially) be included in the change,
+ %% but it won't take effect
+ {ok,C} = logger:get_handler_config(Name1),
+ ok = logger:set_handler_config(Name1,config,#{olp=>dummyvalue}),
+ {ok,C} = logger:get_handler_config(Name1),
+
+
+ ok = logger:remove_handler(Name1),
+ {error,{not_found,Name1}} = logger:remove_handler(Name1),
+ ok.
+
+errors(cleanup, _Config) ->
+ Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ _ = logger:remove_handler(Name1).
+
+formatter_fail(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ Name = ?FUNCTION_NAME,
+ LogFile = filename:join(PrivDir,Name),
+ ct:pal("Log = ~p", [LogFile]),
+ HandlerConfig = #{config => #{file=>LogFile},
+ filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE])},
+ %% no formatter!
+ logger:add_handler(Name, logger_disk_log_h, HandlerConfig),
+ Pid = whereis(h_proc_name(Name)),
+ true = is_pid(Pid),
+ H = logger:get_handler_ids(),
+ true = lists:member(Name,H),
+
+ %% Formatter is added automatically
+ {ok,#{formatter:={logger_formatter,_}}} = logger:get_handler_config(Name),
+ logger:notice(M1=?msg,?domain),
+ Got1 = try_match_file(?log_no(LogFile,1),"[0-9\\+\\-T:\\.]* notice: "++M1,5000),
+
+ ok = logger:set_handler_config(Name,formatter,{nonexistingmodule,#{}}),
+ logger:notice(M2=?msg,?domain),
+ Got2 = try_match_file(?log_no(LogFile,1),
+ escape(Got1)++"[0-9\\+\\-T:\\.]* notice: FORMATTER CRASH: .*"++M2,
+ 5000),
+
+ ok = logger:set_handler_config(Name,formatter,{?MODULE,crash}),
+ logger:notice(M3=?msg,?domain),
+ Got3 = try_match_file(?log_no(LogFile,1),
+ escape(Got2)++"[0-9\\+\\-T:\\.]* notice: FORMATTER CRASH: .*"++M3,
+ 5000),
+
+ ok = logger:set_handler_config(Name,formatter,{?MODULE,bad_return}),
+ logger:notice(?msg,?domain),
+ try_match_file(?log_no(LogFile,1),
+ escape(Got3)++"FORMATTER ERROR: bad return value",
+ 5000),
+
+ %% Check that handler is still alive and was never dead
+ Pid = whereis(h_proc_name(Name)),
+ H = logger:get_handler_ids(),
+ ok.
+
+formatter_fail(cleanup,_Config) ->
+ _ = logger:remove_handler(?FUNCTION_NAME),
+ ok.
+
+config_fail(_Config) ->
+ {error,{handler_not_added,{invalid_config,logger_disk_log_h,#{bad:=bad}}}} =
+ logger:add_handler(?MODULE,logger_disk_log_h,
+ #{config => #{bad => bad},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+
+ {error,{handler_not_added,{invalid_olp_levels,#{drop_mode_qlen:=1}}}} =
+ logger:add_handler(?MODULE,logger_disk_log_h,
+ #{config => #{drop_mode_qlen=>1}}),
+ {error,{handler_not_added,{invalid_olp_levels,#{sync_mode_qlen:=43,
+ drop_mode_qlen:=42}}}} =
+ logger:add_handler(?MODULE,logger_disk_log_h,
+ #{config => #{sync_mode_qlen=>43,
+ drop_mode_qlen=>42}}),
+ {error,{handler_not_added,{invalid_olp_levels,#{drop_mode_qlen:=43,
+ flush_qlen:=42}}}} =
+ logger:add_handler(?MODULE,logger_disk_log_h,
+ #{config => #{drop_mode_qlen=>43,
+ flush_qlen=>42}}),
+
+ ok = logger:add_handler(?MODULE,logger_disk_log_h,
+ #{filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ %% can't change the disk log options for a log already in use
+ {error,{illegal_config_change,logger_disk_log_h,_,_}} =
+ logger:update_handler_config(?MODULE,config,
+ #{max_no_files=>2}),
+ %% incorrect values of OP params
+ {ok,#{config := HConfig}} = logger:get_handler_config(?MODULE),
+ {error,{invalid_olp_levels,_}} =
+ logger:update_handler_config(?MODULE,config,
+ HConfig#{sync_mode_qlen=>100,
+ flush_qlen=>99}),
+ %% invalid name of config parameter
+ {error,{invalid_config,logger_disk_log_h,#{filesync_rep_int:=2000}}} =
+ logger:update_handler_config(?MODULE, config,
+ HConfig#{filesync_rep_int => 2000}),
+ ok.
+config_fail(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+bad_input(_Config) ->
+ {error,{badarg,{filesync,["BadType"]}}} =
+ logger_disk_log_h:filesync("BadType").
+
+reconfig(Config) ->
+ Dir = ?config(priv_dir,Config),
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ #{%id := ?MODULE,
+ sync_mode_qlen := ?SYNC_MODE_QLEN,
+ drop_mode_qlen := ?DROP_MODE_QLEN,
+ flush_qlen := ?FLUSH_QLEN,
+ burst_limit_enable := ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable := ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
+ cb_state :=
+ #{handler_state :=
+ #{log_opts := #{type := ?DISK_LOG_TYPE,
+ max_no_files := ?DISK_LOG_MAX_NO_FILES,
+ max_no_bytes := ?DISK_LOG_MAX_NO_BYTES,
+ file := DiskLogFile}},
+ filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL}} =
+ logger_olp:info(h_proc_name()),
+ {ok,#{config :=
+ #{sync_mode_qlen := ?SYNC_MODE_QLEN,
+ drop_mode_qlen := ?DROP_MODE_QLEN,
+ flush_qlen := ?FLUSH_QLEN,
+ burst_limit_enable := ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable := ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
+ filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL,
+ file := DiskLogFile,
+ max_no_files := ?DISK_LOG_MAX_NO_FILES,
+ max_no_bytes := ?DISK_LOG_MAX_NO_BYTES,
+ type := wrap} = HConfig0}} =
+ logger:get_handler_config(?MODULE),
+
+ HConfig1 = HConfig0#{sync_mode_qlen => 1,
+ drop_mode_qlen => 2,
+ flush_qlen => 3,
+ burst_limit_enable => false,
+ burst_limit_max_count => 10,
+ burst_limit_window_time => 10,
+ overload_kill_enable => true,
+ overload_kill_qlen => 100000,
+ overload_kill_mem_size => 10000000,
+ overload_kill_restart_after => infinity,
+ filesync_repeat_interval => no_repeat},
+ ok = logger:set_handler_config(?MODULE, config, HConfig1),
+ #{%id := ?MODULE,
+ sync_mode_qlen := 1,
+ drop_mode_qlen := 2,
+ flush_qlen := 3,
+ burst_limit_enable := false,
+ burst_limit_max_count := 10,
+ burst_limit_window_time := 10,
+ overload_kill_enable := true,
+ overload_kill_qlen := 100000,
+ overload_kill_mem_size := 10000000,
+ overload_kill_restart_after := infinity,
+ cb_state := #{filesync_repeat_interval := no_repeat}} =
+ logger_olp:info(h_proc_name()),
+ {ok,#{config:=HConfig1}} = logger:get_handler_config(?MODULE),
+
+ ok = logger:update_handler_config(?MODULE, config,
+ #{flush_qlen => ?FLUSH_QLEN}),
+ {ok,#{config:=C1}} = logger:get_handler_config(?MODULE),
+ ct:log("C1: ~p",[C1]),
+ C1 = HConfig1#{flush_qlen => ?FLUSH_QLEN},
+
+ ok = logger:set_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C2}} = logger:get_handler_config(?MODULE),
+ ct:log("C2: ~p",[C2]),
+ C2 = HConfig0#{sync_mode_qlen => 1},
+
+ ok = logger:set_handler_config(?MODULE, config, #{drop_mode_qlen => 100}),
+ {ok,#{config:=C3}} = logger:get_handler_config(?MODULE),
+ ct:log("C3: ~p",[C3]),
+ C3 = HConfig0#{drop_mode_qlen => 100},
+
+ ok = logger:update_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C4}} = logger:get_handler_config(?MODULE),
+ ct:log("C4: ~p",[C4]),
+ C4 = HConfig0#{sync_mode_qlen => 1,
+ drop_mode_qlen => 100},
+
+ ok = logger:remove_handler(?MODULE),
+
+ File = filename:join(Dir, "logfile"),
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()},
+ config=>
+ #{type => halt,
+ max_no_files => 1,
+ max_no_bytes => 1024,
+ file => File}}),
+ #{cb_state :=
+ #{handler_state :=
+ #{log_opts := #{type := halt,
+ max_no_files := 1,
+ max_no_bytes := 1024,
+ file := File}}}} =
+ logger_olp:info(h_proc_name()),
+ {ok,#{config :=
+ #{type := halt,
+ max_no_files := 1,
+ max_no_bytes := 1024,
+ file := File}=HaltHConfig} = Config2} =
+ logger:get_handler_config(?MODULE),
+
+ ok = logger:update_handler_config(?MODULE, level, notice),
+ {ok,C5} = logger:get_handler_config(?MODULE),
+ ct:log("C5: ~p",[C5]),
+ C5 = Config2#{level => notice},
+
+ ok = logger:set_handler_config(?MODULE, level, info),
+ {ok,C6} = logger:get_handler_config(?MODULE),
+ ct:log("C6: ~p",[C6]),
+ C6 = Config2#{level => info},
+
+ %% You are not allowed to actively set the write once fields
+ %% (type, max_no_files, max_no_bytes, file) in runtime.
+ {error, {illegal_config_change,_,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{type=>wrap}),
+ {error, {illegal_config_change,_,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{max_no_files=>2}),
+ {error, {illegal_config_change,_,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{max_no_bytes=>2048}),
+ {error, {illegal_config_change,_,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{file=>"otherfile.log"}),
+ {ok,C7} = logger:get_handler_config(?MODULE),
+ ct:log("C7: ~p",[C7]),
+ C7 = C6,
+
+ %% ... but if you don't specify the write once fields, then
+ %% set_handler_config shall NOT reset them to their default value
+ ok = logger:set_handler_config(?MODULE,config,#{sync_mode_qlen=>1}),
+ {ok,#{config:=C8}} = logger:get_handler_config(?MODULE),
+ ct:log("C8: ~p",[C8]),
+ C8 = HaltHConfig#{sync_mode_qlen=>1},
+ ok.
+
+reconfig(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+sync(Config) ->
+ Dir = ?config(priv_dir,Config),
+ File = filename:join(Dir, ?FUNCTION_NAME),
+ Log = lists:concat([File,".1"]),
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{config => #{file => File},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,nl}}),
+
+ start_tracer([{logger_disk_log_h,disk_log_write,3},
+ {disk_log,sync,1}],
+ [{logger_disk_log_h,disk_log_write,<<"first\n">>},
+ {disk_log,sync}]),
+
+ logger:notice("first", ?domain),
+ %% wait for automatic disk_log_sync
+ check_tracer(?FILESYNC_REPEAT_INTERVAL*2),
+
+ %% check that if there's no repeated filesync active,
+ %% a disk_log_sync is still performed when handler goes idle
+ {ok,#{config := HConfig}} = logger:get_handler_config(?MODULE),
+ HConfig1 = HConfig#{filesync_repeat_interval => no_repeat},
+ ok = logger:update_handler_config(?MODULE, config, HConfig1),
+ no_repeat = maps:get(filesync_repeat_interval,
+ maps:get(cb_state,logger_olp:info(h_proc_name()))),
+
+ start_tracer([{logger_disk_log_h,disk_log_write,3},
+ {disk_log,sync,1}],
+ [{logger_disk_log_h,disk_log_write,<<"second\n">>},
+ {disk_log,sync},
+ {logger_disk_log_h,disk_log_write,<<"third\n">>},
+ {disk_log,sync}]),
+
+ logger:notice("second", ?domain),
+ timer:sleep(?IDLE_DETECT_TIME*2),
+ logger:notice("third", ?domain),
+ %% wait for automatic disk_log_sync
+ check_tracer(?IDLE_DETECT_TIME*2),
+
+ try_read_file(Log, {ok,<<"first\nsecond\nthird\n">>}, 1000),
+
+ %% switch repeated filesync on and verify that the looping works
+ SyncInt = 1000,
+ WaitT = 4500,
+ OneSync = {logger_h_common,handle_cast,repeated_filesync},
+ %% receive 1 repeated_filesync per sec
+ start_tracer([{{logger_h_common,handle_cast,2},
+ [{[repeated_filesync,'_'],[],[{message,{caller}}]}]}],
+ [OneSync || _ <- lists:seq(1, trunc(WaitT/SyncInt))]),
+
+ HConfig2 = HConfig#{filesync_repeat_interval => SyncInt},
+ ok = logger:update_handler_config(?MODULE, config, HConfig2),
+
+ SyncInt = maps:get(filesync_repeat_interval,
+ maps:get(cb_state,logger_olp:info(h_proc_name()))),
+ timer:sleep(WaitT),
+ HConfig3 = HConfig#{filesync_repeat_interval => no_repeat},
+ ok = logger:update_handler_config(?MODULE, config, HConfig3),
+ check_tracer(100),
+ ok.
+sync(cleanup,_Config) ->
+ dbg:stop_clear(),
+ logger:remove_handler(?MODULE).
+
+disk_log_wrap(Config) ->
+ Get = fun(Key, PL) -> proplists:get_value(Key, PL) end,
+ Dir = ?config(priv_dir,Config),
+ File = filename:join(Dir, ?FUNCTION_NAME),
+ ct:pal("Log = ~p", [File]),
+ MaxFiles = 3,
+ MaxBytes = 5,
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()},
+ config=>
+ #{type => wrap,
+ max_no_files => MaxFiles,
+ max_no_bytes => MaxBytes,
+ file => File}}),
+ Info = disk_log:info(?MODULE),
+ {File,wrap,{MaxBytes,MaxFiles},1} =
+ {Get(file,Info),Get(type,Info),Get(size,Info),Get(current_file,Info)},
+ Tester = self(),
+ TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) ->
+ Pid ! {trace,Mod,Func,Details},
+ Pid
+ end,
+ {ok,_} = dbg:tracer(process, {TraceFun, Tester}),
+ {ok,_} = dbg:p(whereis(h_proc_name()), [c]),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 3, []),
+
+ Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,MaxBytes)],
+ ct:pal("String = ~p (~w)", [Text, erts_debug:size(Text)]),
+ %% fill first file
+ lists:foreach(fun(N) ->
+ Log = lists:concat([File,".",N]),
+ logger:notice(Text, ?domain),
+ wait_until_written(Log),
+ ct:pal("N = ~w",
+ [N = Get(current_file,
+ disk_log:info(?MODULE))])
+ end, lists:seq(1,MaxFiles)),
+
+ %% wait for trace messages
+ timer:sleep(1000),
+ dbg:stop_clear(),
+ Received = lists:flatmap(fun({trace,_M,handle_info,
+ [_,{disk_log,_Node,_Name,What},_]}) ->
+ [{trace,What}];
+ ({log,_}) ->
+ []
+ end, test_server:messages_get()),
+ ct:pal("Trace =~n~p", [Received]),
+ Received = [{trace,{wrap,0}} || _ <- lists:seq(1,MaxFiles-1)],
+ ok.
+
+disk_log_wrap(cleanup,_Config) ->
+ dbg:stop_clear(),
+ logger:remove_handler(?MODULE).
+
+disk_log_full(Config) ->
+ Dir = ?config(priv_dir,Config),
+ File = filename:join(Dir, ?FUNCTION_NAME),
+ ct:pal("Log = ~p", [File]),
+ MaxBytes = 50,
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()},
+ config=>
+ #{type => halt,
+ max_no_files => 1,
+ max_no_bytes => MaxBytes,
+ file => File}}),
+
+ Tester = self(),
+ TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) ->
+ Pid ! {trace,Mod,Func,Details},
+ Pid
+ end,
+ {ok,_} = dbg:tracer(process, {TraceFun, Tester}),
+ {ok,_} = dbg:p(whereis(h_proc_name()), [c]),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 3, []),
+
+ NoOfChars = 5,
+ Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,NoOfChars)],
+ [logger:notice(Text, ?domain) || _ <- lists:seq(1,trunc(MaxBytes/NoOfChars)+1)],
+
+ %% wait for trace messages
+ timer:sleep(2000),
+ dbg:stop_clear(),
+ Received = lists:flatmap(fun({trace,_M,handle_info,
+ [_,{disk_log,_Node,_Name,What},_]}) ->
+ [{trace,What}];
+ ({log,_}) ->
+ []
+ end, test_server:messages_get()),
+ ct:pal("Trace =~n~p", [Received]),
+
+ %% The tail here could be an error_status notification, if the
+ %% last write was synchronous, but in most cases it will not be
+ [{trace,full}|_] = Received,
+ %% [{trace,full},
+ %% {trace,{error_status,{error,{full,_}}}}] = Received,
+ ok.
+disk_log_full(cleanup, _Config) ->
+ dbg:stop_clear(),
+ logger:remove_handler(?MODULE).
+
+disk_log_events(_Config) ->
+ Node = node(),
+ Log = ?MODULE,
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+
+ %% Events copied from disk_log API
+ Events =
+ [{disk_log, Node, Log, {wrap, 0}},
+ {disk_log, Node, Log, {truncated, 0}},
+ {disk_log, Node, Log, {read_only, 42}},
+ {disk_log, Node, Log, {blocked_log, 42}},
+ {disk_log, Node, Log, {format_external, 42}},
+ {disk_log, Node, Log, full},
+ {disk_log, Node, Log, {error_status, ok}}],
+
+ Tester = self(),
+ TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) ->
+ Pid ! {trace,Mod,Func,Details},
+ Pid
+ end,
+ {ok,_} = dbg:tracer(process, {TraceFun, Tester}),
+ {ok,_} = dbg:p(whereis(h_proc_name()), [c]),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 3, []),
+
+ [whereis(h_proc_name()) ! E || E <- Events],
+ %% wait for trace messages
+ timer:sleep(2000),
+ dbg:stop_clear(),
+ Received = lists:map(fun({trace,_M,handle_info,
+ [_,Got,_]}) -> Got
+ end, test_server:messages_get()),
+ ct:pal("Trace =~n~p", [Received]),
+ NoOfEvents = length(Events),
+ NoOfEvents = length(Received),
+ lists:foreach(fun(Event) ->
+ true = lists:member(Event, Received)
+ end, Received),
+ ok.
+disk_log_events(cleanup, _Config) ->
+ dbg:stop_clear(),
+ logger:remove_handler(?MODULE).
+
+write_failure(Config) ->
+ Dir = ?config(priv_dir, Config),
+ File = filename:join(Dir, ?FUNCTION_NAME),
+ Log = lists:concat([File,".1"]),
+ ct:pal("Log = ~p", [Log]),
+
+ Node = start_h_on_new_node(Config, File),
+ false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
+ rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
+ rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
+ rpc:call(Node, ?MODULE, set_result, [disk_log_write,ok]),
+ HState = rpc:call(Node, logger_olp, info, [h_proc_name(?STANDARD_HANDLER)]),
+ LogOpts = maps:get(log_opts,
+ maps:get(handler_state,
+ maps:get(cb_state,HState))),
+ ct:pal("LogOpts = ~p", [LogOpts]),
+
+ %% ?check and ?check_no_log in this test only check for internal log events
+ ok = log_on_remote_node(Node, "Logged1"),
+ rpc:call(Node, logger_disk_log_h, filesync, [?STANDARD_HANDLER]),
+ ?check_no_log, % no internal log when write ok
+
+ SyncRepInt = case (fun() -> is_atom(?FILESYNC_REPEAT_INTERVAL) end)() of
+ true -> 5500;
+ false -> ?FILESYNC_REPEAT_INTERVAL + 500
+ end,
+
+ try_read_file(Log, {ok,<<"Logged1\n">>}, SyncRepInt),
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_write,{error,no_such_log}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+
+ %% this should have caused an internal log
+ ?check({error,{?STANDARD_HANDLER,log,LogOpts,{error,no_such_log}}}),
+
+ ok = log_on_remote_node(Node, "No second error printout"),
+ ?check_no_log, % but don't log same error twice
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_write,
+ {error,{full,?STANDARD_HANDLER}}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+ %% this was a different error, so it should be logged
+ ?check({error,{?STANDARD_HANDLER,log,LogOpts,
+ {error,{full,?STANDARD_HANDLER}}}}),
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_write,ok]),
+ ok = log_on_remote_node(Node, "Logged2"),
+ rpc:call(Node, logger_disk_log_h, filesync, [?STANDARD_HANDLER]),
+ ?check_no_log, % no internal log when write ok
+ try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, SyncRepInt),
+ ok.
+write_failure(cleanup, _Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+
+sync_failure(Config) ->
+ Dir = ?config(priv_dir, Config),
+ FileName = lists:concat([?MODULE,"_",?FUNCTION_NAME]),
+ File = filename:join(Dir, FileName),
+
+
+ Node = start_h_on_new_node(Config, File),
+ false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
+ rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
+ rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
+ rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]),
+ HState = rpc:call(Node, logger_olp, info, [h_proc_name(?STANDARD_HANDLER)]),
+ LogOpts = maps:get(log_opts, maps:get(handler_state,
+ maps:get(cb_state,HState))),
+
+ SyncInt = 500,
+ ok = rpc:call(Node, logger, update_handler_config,
+ [?STANDARD_HANDLER, config,
+ #{filesync_repeat_interval => SyncInt}]),
+ Info = rpc:call(Node, logger_olp, info, [h_proc_name(?STANDARD_HANDLER)]),
+ SyncInt = maps:get(filesync_repeat_interval, maps:get(cb_state, Info)),
+
+ ok = log_on_remote_node(Node, "Logged1"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_sync,{error,no_such_log}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+
+ ?check({error,{?STANDARD_HANDLER,filesync,LogOpts,{error,no_such_log}}}),
+
+ ok = log_on_remote_node(Node, "No second error printout"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result,
+ [disk_log_sync,{error,{blocked_log,?STANDARD_HANDLER}}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+ ?check({error,{?STANDARD_HANDLER,filesync,LogOpts,
+ {error,{blocked_log,?STANDARD_HANDLER}}}}),
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]),
+ ok = log_on_remote_node(Node, "Logged2"),
+ ?check_no_log,
+ ok.
+sync_failure(cleanup, _Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+start_h_on_new_node(Config, File) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(
+ Config,
+ [{logger,[{handler,default,logger_disk_log_h,
+ #{ config => #{ file => File }}}]}]),
+ ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter,
+ {?MODULE,nl}]),
+ Node.
+
+log_on_remote_node(Node,Msg) ->
+ _ = spawn_link(Node,
+ fun() -> erlang:group_leader(whereis(user),self()),
+ logger:notice(Msg)
+ end),
+ ok.
+
+%% functions for test hook macros to be called by rpc
+set_internal_log(_Mod, _Func) ->
+ ?set_internal_log({_Mod,_Func}).
+set_result(_Op, _Result) ->
+ ?set_result(_Op, _Result).
+set_defaults() ->
+ ?set_defaults().
+
+%% internal log function that sends the term to the test case process
+internal_log(Type, Term) ->
+ [{tester,Tester}] = ets:lookup(?TEST_HOOKS_TAB, tester),
+ Tester ! {log,{Type,Term}},
+ logger:internal_log(Type, Term),
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% Overload protection tests
+
+op_switch_to_sync(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NumOfReqs = 500,
+ NewHConfig =
+ HConfig#{config => DLHConfig#{sync_mode_qlen => 2,
+ drop_mode_qlen => NumOfReqs+1,
+ flush_qlen => 2*NumOfReqs,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Lines = count_lines(Log),
+ NumOfReqs = Lines,
+ ok = file_delete(Log),
+ ok.
+op_switch_to_sync(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_drop() ->
+ [{timetrap,{seconds,180}}].
+op_switch_to_drop(Config) ->
+ Test =
+ fun() ->
+ {Log,HConfig,DLHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NumOfReqs = 300,
+ Procs = 2,
+ Bursts = 10,
+ NewHConfig =
+ HConfig#{config =>
+ DLHConfig#{sync_mode_qlen => 1,
+ drop_mode_qlen => 2,
+ flush_qlen => Procs*NumOfReqs*Bursts,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ %% It sometimes happens that the handler either gets
+ %% the requests in a slow enough pace so that dropping
+ %% never occurs. Therefore, lets generate a number of
+ %% bursts to increase the chance of message buildup.
+ [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice) ||
+ _ <- lists:seq(1, Bursts)],
+ Logged = count_lines(Log),
+ ok = stop_handler(?MODULE),
+ ct:pal("Number of messages dropped = ~w (~w)",
+ [Procs*NumOfReqs*Bursts-Logged,Procs*NumOfReqs*Bursts]),
+ true = (Logged < (Procs*NumOfReqs*Bursts)),
+ true = (Logged > 0),
+ _ = file_delete(Log),
+ ok
+ end,
+ %% As it's tricky to get the timing right in only one go, we perform the
+ %% test repeatedly, hoping that will generate a successful result.
+ case repeat_until_ok(Test, 10) of
+ {ok,{Failures,_Result}} ->
+ ct:log("Failed ~w times before success!", [Failures]);
+ {fails,Reason} ->
+ ct:fail(Reason)
+ end.
+op_switch_to_drop(cleanup, _Config) ->
+ _ = stop_handler(?MODULE).
+
+op_switch_to_flush() ->
+ [{timetrap,{minutes,3}}].
+op_switch_to_flush(Config) ->
+ Test =
+ fun() ->
+ {Log,HConfig,DLHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+
+ %% NOTE: it's important that both async and sync
+ %% requests have been queued when the flush happens
+ %% (verify with coverage of flush_log_requests/2)
+
+ NewHConfig =
+ HConfig#{config =>
+ DLHConfig#{sync_mode_qlen => 2,
+ %% disable drop mode
+ drop_mode_qlen => 300,
+ flush_qlen => 300,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 1500,
+ Procs = 10,
+ Bursts = 10,
+ %% It sometimes happens that the handler either gets
+ %% the requests in a slow enough pace so that flushing
+ %% never occurs, or it gets all messages at once,
+ %% causing all messages to get flushed (no dropping of
+ %% sync messages gets tested). Therefore, lets
+ %% generate a number of bursts to increase the chance
+ %% of message buildup in some random fashion.
+ [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice) ||
+ _ <- lists:seq(1,Bursts)],
+ Logged = count_lines(Log),
+ ok= stop_handler(?MODULE),
+ ct:pal("Number of messages flushed/dropped = ~w (~w)",
+ [NumOfReqs*Procs*Bursts-Logged,NumOfReqs*Procs*Bursts]),
+ true = (Logged < (NumOfReqs*Procs*Bursts)),
+ true = (Logged > 0),
+ _ = file_delete(Log),
+ ok
+ end,
+ %% As it's tricky to get the timing right in only one go, we perform the
+ %% test repeatedly, hoping that will generate a successful result.
+ case repeat_until_ok(Test, 10) of
+ {ok,{Failures,_Result}} ->
+ ct:log("Failed ~w times before success!", [Failures]);
+ {fails,Reason} ->
+ ct:fail(Reason)
+ end.
+op_switch_to_flush(cleanup, _Config) ->
+ _ = stop_handler(?MODULE).
+
+
+limit_burst_disabled(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{config => DLHConfig#{burst_limit_enable => false,
+ burst_limit_max_count => 10,
+ burst_limit_window_time => 2000,
+ drop_mode_qlen => 200,
+ flush_qlen => 300}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ NumOfReqs = Logged,
+ ok = file_delete(Log),
+ ok.
+limit_burst_disabled(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_enabled_one(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ReqLimit = 10,
+ NewHConfig =
+ HConfig#{config => DLHConfig#{burst_limit_enable => true,
+ burst_limit_max_count => ReqLimit,
+ burst_limit_window_time => 2000,
+ drop_mode_qlen => 200,
+ flush_qlen => 300}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ReqLimit = Logged,
+ ok = file_delete(Log),
+ ok.
+limit_burst_enabled_one(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_enabled_period(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ReqLimit = 10,
+ BurstTWin = 1000,
+ NewHConfig =
+ HConfig#{config => DLHConfig#{burst_limit_enable => true,
+ burst_limit_max_count => ReqLimit,
+ burst_limit_window_time => BurstTWin,
+ drop_mode_qlen => 20000,
+ flush_qlen => 20001}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+
+ Windows = 3,
+ Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w",
+ [Sent,Logged]),
+ true = (Logged > (ReqLimit*Windows)) andalso
+ (Logged < (ReqLimit*(Windows+2))),
+ ok = file_delete(Log),
+ ok.
+limit_burst_enabled_period(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+kill_disabled(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{config=>DLHConfig#{overload_kill_enable=>false,
+ overload_kill_qlen=>10,
+ overload_kill_mem_size=>100}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ok = file_delete(Log),
+ true = is_pid(whereis(h_proc_name())),
+ ok.
+kill_disabled(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+qlen_kill_new(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ Pid0 = whereis(h_proc_name()),
+ {_,Mem0} = process_info(Pid0, memory),
+ RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER,
+ NewHConfig =
+ HConfig#{config =>
+ DLHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>10,
+ overload_kill_mem_size=>Mem0+50000,
+ overload_kill_restart_after=>RestartAfter}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ MRef = erlang:monitor(process, Pid0),
+ NumOfReqs = 100,
+ Procs = 4,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
+ %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ receive
+ {'DOWN', MRef, _, _, Info} ->
+ case Info of
+ {shutdown,{overloaded,QLen,Mem}} ->
+ ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
+ killed ->
+ ct:pal("Slow shutdown, handler process was killed!", [])
+ end,
+ file_delete(Log),
+ {ok,_} = wait_for_process_up(RestartAfter * 3),
+ ok
+ after
+ 5000 ->
+ Info = logger_olp:info(h_proc_name()),
+ ct:pal("Handler state = ~p", [Info]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end.
+qlen_kill_new(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+mem_kill_new(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ Pid0 = whereis(h_proc_name()),
+ {_,Mem0} = process_info(Pid0, memory),
+ RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER,
+ NewHConfig =
+ HConfig#{config =>
+ DLHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>50000,
+ overload_kill_mem_size=>Mem0+500,
+ overload_kill_restart_after=>RestartAfter}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ MRef = erlang:monitor(process, Pid0),
+ NumOfReqs = 100,
+ Procs = 4,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
+ %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ receive
+ {'DOWN', MRef, _, _, Info} ->
+ case Info of
+ {shutdown,{overloaded,QLen,Mem}} ->
+ ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
+ killed ->
+ ct:pal("Slow shutdown, handler process was killed!", [])
+ end,
+ file_delete(Log),
+ {ok,_} = wait_for_process_up(RestartAfter * 3),
+ ok
+ after
+ 5000 ->
+ Info = logger_olp:info(h_proc_name()),
+ ct:pal("Handler state = ~p", [Info]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end.
+mem_kill_new(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+restart_after() ->
+ [{timetrap,{minutes,2}}].
+restart_after(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig1 =
+ HConfig#{config=>DLHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>4,
+ overload_kill_restart_after=>infinity}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig1),
+ MRef1 = erlang:monitor(process, whereis(h_proc_name())),
+ %% kill handler
+ send_burst({n,100}, {spawn,5,0}, {chars,79}, notice),
+ receive
+ {'DOWN', MRef1, _, _, _Reason1} ->
+ file_delete(Log),
+ error = wait_for_process_up(?OVERLOAD_KILL_RESTART_AFTER * 3),
+ ok
+ after
+ 5000 ->
+ Info1 = logger_olp:info(h_proc_name()),
+ ct:pal("Handler state = ~p", [Info1]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end,
+
+ {Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER,
+ NewHConfig2 =
+ HConfig#{config=>DLHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>4,
+ overload_kill_restart_after=>RestartAfter}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig2),
+ Pid0 = whereis(h_proc_name()),
+ MRef2 = erlang:monitor(process, Pid0),
+ %% kill handler
+ send_burst({n,100}, {spawn,5,0}, {chars,79}, notice),
+ receive
+ {'DOWN', MRef2, _, _, _Reason2} ->
+ file_delete(Log),
+ {ok,Pid1} = wait_for_process_up(RestartAfter * 3),
+ false = (Pid1 == Pid0),
+ ok
+ after
+ 5000 ->
+ Info2 = logger_olp:info(h_proc_name()),
+ ct:pal("Handler state = ~p", [Info2]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end,
+ ok.
+restart_after(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+%% send handler requests (sync, info, reset, change_config)
+%% during high load to verify that sync, dropping and flushing is
+%% handled correctly.
+handler_requests_under_load() ->
+ [{timetrap,{minutes,5}}].
+handler_requests_under_load(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{config => DLHConfig#{sync_mode_qlen => 2,
+ drop_mode_qlen => 1000,
+ flush_qlen => 2000,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ Pid = spawn_link(
+ fun() -> send_requests(1,[{logger_disk_log_h,filesync,[?MODULE],[]},
+ {logger_olp,info,[h_proc_name()],[]},
+ {logger_olp,reset,[h_proc_name()],[]},
+ {logger,update_handler_config,
+ [?MODULE, config,
+ #{overload_kill_enable => false}],
+ []}])
+ end),
+ Procs = 100,
+ Sent = Procs * send_burst({n,5000}, {spawn,Procs,10}, {chars,79}, notice),
+ Pid ! {self(),finish},
+ ReqResult = receive {Pid,Result} -> Result end,
+ Logged = count_lines(Log),
+ ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w",
+ [Sent,Logged]),
+ FindError = fun(Res) ->
+ [E || E <- Res,
+ is_tuple(E) andalso (element(1,E) == error)]
+ end,
+ Errors = [{Func,FindError(Res)} || {_,Func,_,Res} <- ReqResult],
+ NoOfReqs = lists:foldl(fun({_,_,_,Res}, N) -> N + length(Res) end,
+ 0, ReqResult),
+ ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]),
+ ok = file_delete(Log).
+handler_requests_under_load(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+send_requests(TO, Reqs = [{Mod,Func,Args,Res}|Rs]) ->
+ receive
+ {From,finish} ->
+ From ! {self(),Reqs}
+ after
+ TO ->
+ Result = apply(Mod,Func,Args),
+ send_requests(TO, Rs ++ [{Mod,Func,Args,[Result|Res]}])
+ end.
+
+%%%-----------------------------------------------------------------
+%%%
+start_handler(Name, FuncName, Config) ->
+ Dir = ?config(priv_dir,Config),
+ File = filename:join(Dir, FuncName),
+ ct:pal("Logging to ~tp", [File]),
+ FullFile = lists:concat([File,".1"]),
+ _ = file_delete(FullFile),
+ ok = logger:add_handler(Name,
+ logger_disk_log_h,
+ #{config=>#{file => File,
+ max_no_files => 1,
+ max_no_bytes => 100000000},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([Name]),
+ formatter=>{?MODULE,op}}),
+ {ok,HConfig = #{config := DLHConfig}} = logger:get_handler_config(Name),
+ {FullFile,HConfig,DLHConfig}.
+
+stop_handler(Name) ->
+ ct:pal("Stopping handler ~p!", [Name]),
+ logger:remove_handler(Name).
+
+send_burst(NorT, Type, {chars,Sz}, Class) ->
+ Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,Sz)],
+ case NorT of
+ {n,N} ->
+ %% process_flag(priority, high),
+ send_n_burst(N, Type, Text, Class),
+ %% process_flag(priority, normal),
+ N;
+ {t,T} ->
+ ct:pal("Sending messages sequentially for ~w ms", [T]),
+ T0 = erlang:monotonic_time(millisecond),
+ send_t_burst(T0, T, Text, Class, 0)
+ end.
+
+send_n_burst(0, _, _Text, _Class) ->
+ ok;
+send_n_burst(N, seq, Text, Class) ->
+ ok = logger:Class(Text, ?domain),
+ send_n_burst(N-1, seq, Text, Class);
+send_n_burst(N, {spawn,Ps,TO}, Text, Class) ->
+ ct:pal("~w processes each sending ~w messages", [Ps,N]),
+ MRefs = [begin if TO == 0 -> ok; true -> timer:sleep(TO) end,
+ monitor(process,spawn_link(per_proc_fun(N,Text,Class,X)))
+ end || X <- lists:seq(1,Ps)],
+ lists:foreach(fun(MRef) ->
+ receive
+ {'DOWN', MRef, _, _, _} ->
+ ok
+ end
+ end, MRefs),
+ ct:pal("Message burst sent", []),
+ ok.
+
+send_t_burst(T0, T, Text, Class, N) ->
+ T1 = erlang:monotonic_time(millisecond),
+ if (T1-T0) > T ->
+ N;
+ true ->
+ ok = logger:Class(Text, ?domain),
+ send_t_burst(T0, T, Text, Class, N+1)
+ end.
+
+per_proc_fun(N,Text,Class,X) when X rem 2 == 0 ->
+ fun() ->
+ process_flag(priority,high),
+ send_n_burst(N, seq, Text, Class)
+ end;
+per_proc_fun(N,Text,Class,_) ->
+ fun() ->
+ send_n_burst(N, seq, Text, Class)
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Formatter callback
+%%% Using this to send the formatted string back to the test case
+%%% process - so it can check for logged events.
+format(_,bad_return) ->
+ bad_return;
+format(_,crash) ->
+ erlang:error(formatter_crashed);
+format(#{msg:={report,R},meta:=#{report_cb:=Fun}}=Log,Config) ->
+ format(Log#{msg=>Fun(R)},Config);
+format(#{msg:={string,String0}},no_nl) ->
+ String = unicode:characters_to_list(String0),
+ String;
+format(#{msg:={string,String0}},nl) ->
+ String = unicode:characters_to_list(String0),
+ String++"\n";
+format(#{msg:={string,String0}},op) ->
+ String = unicode:characters_to_list(String0),
+ String++"\n";
+format(#{msg:={report,#{label:={supervisor,progress}}}},op) ->
+ "";
+format(#{msg:={report,#{label:={gen_server,terminate}}}},op) ->
+ "";
+format(#{msg:={report,#{label:={proc_lib,crash}}}},op) ->
+ "";
+format(#{msg:={F,A}},OpOrPid) when is_list(F), is_list(A) ->
+ String = lists:flatten(io_lib:format(F,A)),
+ if is_pid(OpOrPid) -> OpOrPid ! {log,String};
+ true -> ok
+ end,
+ String++"\n";
+format(#{msg:={string,String0}},Pid) ->
+ String = unicode:characters_to_list(String0),
+ Pid ! {log,String},
+ String++"\n";
+format(Msg,Tag) ->
+ Error = {unexpected_format,Msg,Tag},
+ erlang:display(Error),
+ exit(Error).
+
+start_and_add(Name, Config, LogOpts) ->
+ HConfig = maps:get(config, Config, #{}),
+ HConfig1 = maps:merge(HConfig, LogOpts),
+ Config1 = Config#{config=>HConfig1},
+ ct:pal("Adding handler ~w with: ~p", [Name,Config1]),
+ ok = logger:add_handler(Name, logger_disk_log_h, Config1),
+ Pid = whereis(h_proc_name(Name)),
+ true = is_pid(Pid),
+ Name = proplists:get_value(name, disk_log:info(Name)),
+ ok.
+
+remove_and_stop(Handler) ->
+ ok = logger:remove_handler(Handler),
+ timer:sleep(500),
+ undefined = whereis(h_proc_name(Handler)),
+ ok.
+
+try_read_file(FileName, Expected, Time) ->
+ try_read_file(FileName, Expected, Time, undefined).
+
+try_read_file(FileName, Expected, Time, _) when Time > 0 ->
+ case file:read_file(FileName) of
+ Expected ->
+ ok;
+ Error = {error,_Reason} ->
+ erlang:error(Error);
+ SomethingElse ->
+ ct:pal("try_read_file read unexpected: ~p~n", [SomethingElse]),
+ timer:sleep(500),
+ try_read_file(FileName, Expected, Time-500, SomethingElse)
+ end;
+
+try_read_file(_, _, _, Incorrect) ->
+ ct:pal("try_read_file got incorrect pattern: ~p~n", [Incorrect]),
+ erlang:error({error,not_matching_pattern,Incorrect}).
+
+try_match_file(FileName, Pattern, Time) ->
+ try_match_file(FileName, Pattern, Time, <<>>).
+
+try_match_file(FileName, Pattern, Time, _) when Time > 0 ->
+ case file:read_file(FileName) of
+ {ok, Bin} ->
+ case re:run(Bin,Pattern,[{capture,none}]) of
+ match ->
+ unicode:characters_to_list(Bin);
+ _ ->
+ timer:sleep(100),
+ try_match_file(FileName, Pattern, Time-100, Bin)
+ end;
+ Error ->
+ erlang:error(Error)
+ end;
+try_match_file(_,Pattern,_,Incorrect) ->
+ ct:pal("try_match_file did not match pattern: ~p~nGot: ~p~n",
+ [Pattern,Incorrect]),
+ erlang:error({error,not_matching_pattern,Pattern,Incorrect}).
+
+count_lines(File) ->
+ wait_until_written(File),
+ count_lines1(File).
+
+wait_until_written(File) ->
+ wait_until_written(File, -1).
+
+wait_until_written(File, Sz) ->
+ timer:sleep(2000),
+ case file:read_file_info(File) of
+ {ok,#file_info{size = Sz}} ->
+ timer:sleep(1000),
+ case file:read_file_info(File) of
+ {ok,#file_info{size = Sz}} ->
+ ok;
+ {ok,#file_info{size = Sz1}} ->
+ wait_until_written(File, Sz1)
+ end;
+ {ok,#file_info{size = Sz1}} ->
+ wait_until_written(File, Sz1)
+ end.
+
+count_lines1(File) ->
+ {_,Dev} = file:open(File, [read]),
+ Lines = count_lines2(Dev, 0),
+ file:close(Dev),
+ Lines.
+
+count_lines2(Dev, LC) ->
+ case file:read_line(Dev) of
+ {ok,"Handler logger_disk_log_h_SUITE " ++_} ->
+ %% Not counting handler info
+ count_lines2(Dev,LC);
+ {ok,_} ->
+ count_lines2(Dev,LC+1);
+ eof -> LC
+ end.
+
+repeat_until_ok(Fun, N) ->
+ repeat_until_ok(Fun, 0, N, undefined).
+
+repeat_until_ok(_Fun, Stop, Stop, Reason) ->
+ {fails,Reason};
+
+repeat_until_ok(Fun, C, Stop, FirstReason) ->
+ if C > 0 -> timer:sleep(5000);
+ true -> ok
+ end,
+ try Fun() of
+ Result ->
+ {ok,{C,Result}}
+ catch
+ _:Reason:Stack ->
+ ct:pal("Test fails: ~p (~p)~n", [Reason,hd(Stack)]),
+ if FirstReason == undefined ->
+ repeat_until_ok(Fun, C+1, Stop, {Reason,Stack});
+ true ->
+ repeat_until_ok(Fun, C+1, Stop, FirstReason)
+ end
+ end.
+
+start_tracer(Trace,Expected) ->
+ Pid = self(),
+ dbg:tracer(process,{fun tracer/2,{Pid,Expected}}),
+ dbg:p(h_proc_name(),[c]),
+ tpl(Trace),
+ ok.
+
+tpl([{M,F,A}|Trace]) ->
+ tpl([{{M,F,A},c}|Trace]);
+tpl([{{M,F,A},MS}|Trace]) ->
+ {ok,Match} = dbg:tpl(M,F,A,MS),
+ case lists:keyfind(matched,1,Match) of
+ {_,_,1} ->
+ ok;
+ _ ->
+ dbg:stop_clear(),
+ throw({skip,"Can't trace "++atom_to_list(M)++":"++
+ atom_to_list(F)++"/"++integer_to_list(A)})
+ end,
+ tpl(Trace);
+tpl([]) ->
+ ok.
+
+tracer({trace,_,call,{logger_h_common,handle_cast,[Op|_]},Caller},
+ {Pid,[{Mod,Func,Op}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func,Op},Caller);
+tracer({trace,_,call,{Mod=logger_disk_log_h,Func=disk_log_write,[_,_,Data]},Caller}, {Pid,[{Mod,Func,Data}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func,Data},Caller);
+tracer({trace,_,call,{Mod,Func,_},Caller}, {Pid,[{Mod,Func}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func},Caller);
+tracer({trace,_,call,Call,Caller}, {Pid,Expected}) ->
+ ct:log("Tracer got unexpected: ~p~nCaller: ~p~nExpected: ~p~n",[Call,Caller,Expected]),
+ Pid ! {tracer_got_unexpected,Call,Expected},
+ {Pid,Expected}.
+
+maybe_tracer_done(Pid,[],Got,Caller) ->
+ ct:log("Tracer got: ~p~nCaller: ~p~n",[Got,Caller]),
+ Pid ! tracer_done;
+maybe_tracer_done(Pid,Expected,Got,Caller) ->
+ ct:log("Tracer got: ~p~nCaller: ~p~n",[Got,Caller]),
+ {Pid,Expected}.
+
+check_tracer(T) ->
+ receive
+ tracer_done ->
+ dbg:stop_clear(),
+ ok;
+ {tracer_got_unexpected,Got,Expected} ->
+ dbg:stop_clear(),
+ ct:fail({tracer_got_unexpected,Got,Expected})
+ after T ->
+ dbg:stop_clear(),
+ ct:fail({timeout,tracer})
+ end.
+
+escape([$+|Rest]) ->
+ [$\\,$+|escape(Rest)];
+escape([H|T]) ->
+ [H|escape(T)];
+escape([]) ->
+ [].
+
+h_proc_name() ->
+ h_proc_name(?MODULE).
+h_proc_name(Name) ->
+ list_to_atom(lists:concat([logger_disk_log_h,"_",Name])).
+
+wait_for_process_up(T) ->
+ wait_for_process_up(?MODULE, h_proc_name(), T).
+
+wait_for_process_up(Name, RegName, T) ->
+ N = (T div 500) + 1,
+ wait_for_process_up1(Name, RegName, N).
+
+wait_for_process_up1(_Name, _RegName, 0) ->
+ error;
+wait_for_process_up1(Name, RegName, N) ->
+ timer:sleep(500),
+ case whereis(RegName) of
+ Pid when is_pid(Pid) ->
+ case logger:get_handler_config(Name) of
+ {ok,_} ->
+ %% ct:pal("Process ~p up (~p tries left)",[Name,N]),
+ {ok,Pid};
+ _ ->
+ wait_for_process_up1(Name, RegName, N-1)
+ end;
+ undefined ->
+ %% ct:pal("Waiting for process ~p (~p tries left)",[Name,N]),
+ wait_for_process_up1(Name, RegName, N-1)
+ end.
+
+file_delete(Log) ->
+ file:delete(Log).
diff --git a/lib/kernel/test/logger_env_var_SUITE.erl b/lib/kernel/test/logger_env_var_SUITE.erl
new file mode 100644
index 0000000000..9d2ad11be8
--- /dev/null
+++ b/lib/kernel/test/logger_env_var_SUITE.erl
@@ -0,0 +1,697 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_env_var_SUITE).
+
+-compile(export_all).
+
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+
+-import(logger_test_lib,[setup/2,log/3,sync_and_read/3]).
+
+suite() ->
+ [{timetrap,{seconds,60}},
+ {ct_hooks,[logger_test_lib]}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+groups() ->
+ [{error_logger,[],[error_logger_tty,
+ error_logger_tty_sasl_compatible,
+ error_logger_false,
+ error_logger_false_progress,
+ error_logger_false_sasl_compatible,
+ error_logger_silent,
+ error_logger_silent_sasl_compatible,
+ error_logger_file]},
+ {logger,[],[logger_file,
+ logger_file_sasl_compatible,
+ logger_file_log_progress,
+ logger_file_no_filter,
+ logger_file_no_filter_level,
+ logger_file_formatter,
+ logger_filters,
+ logger_filters_stop,
+ logger_module_level,
+ logger_disk_log,
+ logger_disk_log_formatter,
+ logger_undefined,
+ logger_many_handlers_default_first,
+ logger_many_handlers_default_last,
+ logger_many_handlers_default_last_broken_filter,
+ logger_proxy
+ ]},
+ {bad,[],[bad_error_logger,
+ bad_level,
+ bad_sasl_compatibility]}].
+
+all() ->
+ [default,
+ default_sasl_compatible,
+ sasl_compatible_false,
+ sasl_compatible_false_no_progress,
+ sasl_compatible,
+ all_logger_level,
+ {group,bad},
+ {group,error_logger},
+ {group,logger}
+ ].
+
+default(Config) ->
+ {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},_Node} = setup(Config,[]),
+ notice = maps:get(level,P),
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+default_sasl_compatible(Config) ->
+ {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},_Node} =
+ setup(Config,[{logger_sasl_compatible,true}]),
+ info = maps:get(level,P),
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ true = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+error_logger_tty(Config) ->
+ {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},_Node} =
+ setup(Config,[{error_logger,tty}]),
+ notice = maps:get(level,P),
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+error_logger_tty_sasl_compatible(Config) ->
+ {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},_Node} =
+ setup(Config,
+ [{error_logger,tty},
+ {logger_sasl_compatible,true}]),
+ info = maps:get(level,P),
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ true = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+error_logger_false(Config) ->
+ {ok,#{handlers:=Hs,primary:=P,module_levels:=ML},_Node} =
+ setup(Config,
+ [{error_logger,false},
+ {logger_level,notice}]),
+ false = exists(?STANDARD_HANDLER,Hs),
+ #{module:=logger_simple_h} = SimpleC = find(simple,Hs),
+ all = maps:get(level,SimpleC),
+ notice = maps:get(level,P),
+ SimpleFilters = maps:get(filters,SimpleC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,SimpleFilters),
+ false = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+error_logger_false_progress(Config) ->
+ {ok,#{handlers:=Hs,primary:=P,module_levels:=ML},_Node} =
+ setup(Config,
+ [{error_logger,false},
+ {logger_level,notice}]),
+ false = exists(?STANDARD_HANDLER,Hs),
+ #{module:=logger_simple_h} = SimpleC = find(simple,Hs),
+ all = maps:get(level,SimpleC),
+ notice = maps:get(level,P),
+ SimpleFilters = maps:get(filters,SimpleC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,SimpleFilters),
+ false = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+error_logger_false_sasl_compatible(Config) ->
+ {ok,#{handlers:=Hs,primary:=P,module_levels:=ML},_Node} =
+ setup(Config,
+ [{error_logger,false},
+ {logger_level,notice},
+ {logger_sasl_compatible,true}]),
+ false = exists(?STANDARD_HANDLER,Hs),
+ #{module:=logger_simple_h} = SimpleC = find(simple,Hs),
+ all = maps:get(level,SimpleC),
+ info = maps:get(level,P),
+ SimpleFilters = maps:get(filters,SimpleC),
+ {domain,{_,{log,super,[otp]}}} = lists:keyfind(domain,1,SimpleFilters),
+ true = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+error_logger_silent(Config) ->
+ {ok,#{handlers:=Hs},_Node} = setup(Config,
+ [{error_logger,silent}]),
+ false = exists(?STANDARD_HANDLER,Hs),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ ok.
+
+error_logger_silent_sasl_compatible(Config) ->
+ {ok,#{handlers:=Hs},_Node} = setup(Config,
+ [{error_logger,silent},
+ {logger_sasl_compatible,true}]),
+ false = exists(?STANDARD_HANDLER,Hs),
+ false = exists(simple,Hs),
+ true = exists(sasl,Hs),
+ ok.
+
+
+error_logger_file(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,_,Node} = setup(Config,
+ [{error_logger,{file,Log}}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0),% progress in std logger
+ ok.
+
+
+logger_file(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{config=>#{type=>{file,Log}}}}]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0),% progress in std logger
+
+ notice = maps:get(level,P),
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+logger_file_sasl_compatible(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},Node}
+ = setup(Config,
+ [{logger_sasl_compatible,true},
+ {logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{config=>#{type=>{file,Log}}}}]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0),% progress in std logger
+
+ info = maps:get(level,P),
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ true = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+logger_file_log_progress(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{primary:=P,handlers:=Hs,module_levels:=ML},Node}
+ = setup(Config,
+ [{logger_level,info},
+ {logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{config=>#{type=>{file,Log}}}}]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 6,% progress in std logger
+ info),
+
+ info = maps:get(level,P),
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ [] = ML,
+ ok.
+
+logger_file_no_filter(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{filter_default=>log,filters=>[],
+ config=>#{type=>{file,Log}}}}]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 6),% progress in std logger
+
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ [] = maps:get(filters,StdC),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+
+ ok.
+
+logger_file_no_filter_level(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{filters=>[],level=>error,
+ config=>#{type=>{file,Log}}}}]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0,% progress in std logger
+ error),% level
+
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ error = maps:get(level,StdC),
+ [] = maps:get(filters,StdC),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+
+ ok.
+
+logger_file_formatter(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{filters=>[],
+ formatter=>{logger_formatter,#{}},
+ config=>#{type=>{file,Log}}}}]}]),
+ check_single_log(Node,Log,
+ file,% dest
+ 6),% progress in std logger
+
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ [] = maps:get(filters,StdC),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+
+ ok.
+
+logger_filters(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs,primary:=P},Node}
+ = setup(Config,
+ [{logger_level,info},
+ {logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{config=>#{type=>{file,Log}}}},
+ {filters,log,[{stop_progress,{fun logger_filters:progress/2,stop}}]}
+ ]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0,% progress in std logger
+ info),
+
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ LoggerFilters = maps:get(filters,P),
+ true = lists:keymember(stop_progress,1,LoggerFilters),
+
+ ok.
+
+logger_filters_stop(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs,primary:=P},Node}
+ = setup(Config,
+ [{logger_level,info},
+ {logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{filters=>[],
+ config=>#{type=>{file,Log}}}},
+ {filters,stop,[{log_error,{fun logger_filters:level/2,{log,gt,info}}}]}
+ ]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0,% progress in std logger
+ info),
+
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ [] = maps:get(filters,StdC),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ LoggerFilters = maps:get(filters,P),
+ true = lists:keymember(log_error,1,LoggerFilters),
+
+ ok.
+
+logger_module_level(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs,module_levels:=ModuleLevels},Node}
+ = setup(Config,
+ [{logger_level,info},
+ {logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{config=>#{type=>{file,Log}}}},
+ {module_level,error,[supervisor]}
+ ]}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 3,% progress in std logger
+ info),
+
+ #{module:=logger_std_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+ [{supervisor,error}] = ModuleLevels,
+ ok.
+
+logger_disk_log(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_disk_log_h,
+ #{config=>#{file=>Log}}}]}]),
+ check_default_log(Node,Log,
+ disk_log,% dest
+ 0),% progress in std logger
+
+ #{module:=logger_disk_log_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,StdFilters),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+
+ ok.
+
+logger_disk_log_formatter(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,#{handlers:=Hs},Node}
+ = setup(Config,
+ [{logger,
+ [{handler,?STANDARD_HANDLER,logger_disk_log_h,
+ #{filters=>[],
+ formatter=>{logger_formatter,#{}},
+ config=>#{file=>Log}}}]}]),
+ check_single_log(Node,Log,
+ disk_log,% dest
+ 6),% progress in std logger
+
+ #{module:=logger_disk_log_h} = StdC = find(?STANDARD_HANDLER,Hs),
+ all = maps:get(level,StdC),
+ [] = maps:get(filters,StdC),
+ false = exists(simple,Hs),
+ false = exists(sasl,Hs),
+
+ ok.
+
+logger_undefined(Config) ->
+ {ok,#{handlers:=Hs,primary:=P},_Node} =
+ setup(Config,[{logger,[{handler,?STANDARD_HANDLER,undefined}]}]),
+ false = exists(?STANDARD_HANDLER,Hs),
+ #{module:=logger_simple_h} = SimpleC = find(simple,Hs),
+ all = maps:get(level,SimpleC),
+ notice = maps:get(level,P),
+ SimpleFilters = maps:get(filters,SimpleC),
+ {domain,{_,{log,super,[otp,sasl]}}} = lists:keyfind(domain,1,SimpleFilters),
+ false = exists(sasl,Hs),
+ ok.
+
+
+%% Test that we can add multiple handlers with the default first
+logger_many_handlers_default_first(Config) ->
+ LogErr = file(Config,logger_many_handlers_default_first_error),
+ LogInfo = file(Config,logger_many_handlers_default_first_info),
+
+ logger_many_handlers(
+ Config,[{logger,
+ [{handler,?STANDARD_HANDLER,logger_std_h,
+ #{level=>error,
+ filters=>[],
+ formatter=>{logger_formatter,#{}},
+ config=>#{type=>{file,LogErr}}}
+ },
+ {handler,info,logger_std_h,
+ #{level=>info,
+ filters=>[{level,{fun logger_filters:level/2,{stop,gteq,error}}}],
+ config=>#{type=>{file,LogInfo}}}
+ }
+ ]},
+ {logger_level,info}], LogErr, LogInfo, 6).
+
+%% Test that we can add multiple handlers with the default last
+logger_many_handlers_default_last(Config) ->
+ LogErr = file(Config,logger_many_handlers_default_last_error),
+ LogInfo = file(Config,logger_many_handlers_default_last_info),
+ logger_many_handlers(
+ Config,[{logger,
+ [{handler,info,logger_std_h,
+ #{level=>info,
+ filters=>[{level,{fun logger_filters:level/2,{stop,gteq,error}}}],
+ config=>#{type=>{file,LogInfo}}}
+ },
+ {handler,?STANDARD_HANDLER,logger_std_h,
+ #{level=>error,
+ filters=>[],
+ formatter=>{logger_formatter,#{}},
+ config=>#{type=>{file,LogErr}}}
+ }
+ ]},
+ {logger_level,info}], LogErr, LogInfo, 7).
+
+%% Check that we can handle that an added logger has a broken filter
+%% This used to cause a deadlock.
+logger_many_handlers_default_last_broken_filter(Config) ->
+ LogErr = file(Config,logger_many_handlers_default_first_broken_filter_error),
+ LogInfo = file(Config,logger_many_handlers_default_first_broken_filter_info),
+
+ logger_many_handlers(
+ Config,[{logger,
+ [{handler,info,logger_std_h,
+ #{level=>info,
+ filters=>[{broken,{fun logger_filters:level/2,broken_state}},
+ {level,{fun logger_filters:level/2,{stop,gteq,error}}}],
+ config=>#{type=>{file,LogInfo}}}
+ },
+ {handler,?STANDARD_HANDLER,logger_std_h,
+ #{level=>error,
+ filters=>[],
+ formatter=>{logger_formatter,#{}},
+ config=>#{type=>{file,LogErr}}}
+ }
+ ]},
+ {logger_level,info}], LogErr, LogInfo, 7).
+
+logger_many_handlers(Config, Env, LogErr, LogInfo, NumProgress) ->
+ {ok,_,Node} = setup(Config,Env),
+ check_single_log(Node,LogErr,
+ file,% dest
+ 0,% progress in std logger
+ error), % level
+ ok = rpc:call(Node,logger_std_h,filesync,[info]),
+ {ok, Bin} = file:read_file(LogInfo),
+ ct:log("Log content:~n~s",[Bin]),
+ match(Bin,<<"info:">>,NumProgress,info,info),
+ match(Bin,<<"notice:">>,1,notice,info),
+ match(Bin,<<"alert:">>,0,alert,info),
+
+ ok.
+
+logger_proxy(Config) ->
+ %% assume current node runs with default settings
+ DefOpts = logger_olp:get_opts(logger_proxy),
+ {ok,_,Node} = setup(Config,
+ [{logger,[{proxy,#{sync_mode_qlen=>0,
+ drop_mode_qlen=>2}}]}]),
+ Expected = DefOpts#{sync_mode_qlen:=0,
+ drop_mode_qlen:=2},
+ Expected = rpc:call(Node,logger_olp,get_opts,[logger_proxy]),
+ Expected = rpc:call(Node,logger,get_proxy_config,[]),
+
+ ok.
+
+sasl_compatible_false(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,_,Node} = setup(Config,
+ [{error_logger,{file,Log}},
+ {logger_sasl_compatible,false},
+ {logger_level,info}]), % to get progress
+ check_default_log(Node,Log,
+ file,% dest
+ 6,% progress in std logger
+ info),
+ ok.
+
+sasl_compatible_false_no_progress(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,_,Node} = setup(Config,
+ [{error_logger,{file,Log}},
+ {logger_sasl_compatible,false}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0),% progress in std logger
+ ok.
+
+sasl_compatible(Config) ->
+ Log = file(Config,?FUNCTION_NAME),
+ {ok,_,Node} = setup(Config,
+ [{error_logger,{file,Log}},
+ {sasl_compatible,true}]),
+ check_default_log(Node,Log,
+ file,% dest
+ 0),% progress in std logger
+ ok.
+
+all_logger_level(Config) ->
+ [all_logger_level(Config,Level) || Level <- [none,
+ emergency,
+ alert,
+ critical,
+ error,
+ warning,
+ notice,
+ info,
+ debug,
+ all]],
+ ok.
+
+all_logger_level(Config,Level) ->
+ {ok,#{primary:=#{level:=Level}},Node} = setup(Config,[{logger_level,Level}]),
+ true = test_server:stop_node(Node),
+ ok.
+
+bad_error_logger(Config) ->
+ error = setup(Config,[{error_logger,baddest}]).
+
+bad_level(Config) ->
+ error = setup(Config,[{logger_level,badlevel}]).
+
+bad_sasl_compatibility(Config) ->
+ error = setup(Config,[{logger_sasl_compatible,badcomp}]).
+
+%%%-----------------------------------------------------------------
+%%% Internal
+file(Config,Func) ->
+ filename:join(proplists:get_value(priv_dir,Config),
+ lists:concat([Func,".log"])).
+
+check_default_log(Node,Log,Dest,NumProgress) ->
+ check_default_log(Node,Log,Dest,NumProgress,notice).
+check_default_log(Node,Log,Dest,NumProgress,Level) ->
+
+ {ok,Bin1,Bin2} = check_log(Node,Log,Dest),
+
+ match(Bin1,<<"PROGRESS REPORT">>,NumProgress,info,Level),
+ match(Bin1,<<"ALERT REPORT">>,1,alert,Level),
+ match(Bin1,<<"INFO REPORT">>,0,notice,Level),
+ match(Bin1,<<"DEBUG REPORT">>,0,debug,Level),
+
+ match(Bin2,<<"INFO REPORT">>,1,notice,Level),
+ match(Bin2,<<"DEBUG REPORT">>,0,debug,Level),
+ ok.
+
+check_single_log(Node,Log,Dest,NumProgress) ->
+ check_single_log(Node,Log,Dest,NumProgress,notice).
+check_single_log(Node,Log,Dest,NumProgress,Level) ->
+
+ {ok,Bin1,Bin2} = check_log(Node,Log,Dest),
+
+ match(Bin1,<<"info:">>,NumProgress,info,Level),
+ match(Bin1,<<"alert:">>,1,alert,Level),
+ match(Bin1,<<"debug:">>,0,debug,Level),
+
+ match(Bin2,<<"info:">>,NumProgress+1,info,Level),
+ match(Bin2,<<"debug:">>,0,debug,Level),
+
+ ok.
+
+check_log(Node,Log,Dest) ->
+
+ ok = log(Node,alert,["dummy1"]),
+ ok = log(Node,debug,["dummy1"]),
+
+ %% Check that there are progress reports (supervisor and
+ %% application_controller) and an error report (the call above) in
+ %% the log. There should not be any info reports yet.
+ {ok,Bin1} = sync_and_read(Node,Dest,Log),
+ ct:log("Log content:~n~s",[Bin1]),
+
+ %% Then stop sasl and see that the info report from
+ %% application_controller is there
+ ok = rpc:call(Node,application,stop,[sasl]),
+ {ok,Bin2} = sync_and_read(Node,Dest,Log),
+ ct:log("Log content:~n~s",[Bin2]),
+ {ok,Bin1,Bin2}.
+
+match(Bin,Pattern,0,_,_) ->
+ nomatch = re:run(Bin,Pattern,[{capture,none}]);
+match(Bin,Pattern,N,LogLevel,ConfLevel) ->
+ case logger:compare_levels(LogLevel,ConfLevel) of
+ lt -> match(Bin,Pattern,0,LogLevel,ConfLevel);
+ _ ->
+ {match,M} = re:run(Bin,Pattern,[{capture,all},global]),
+ N = length(M)
+ end.
+
+find(Id,Handlers) ->
+ case lists:search(fun(#{id:=Id0}) when Id0=:=Id-> true;
+ (_) -> false end,
+ Handlers) of
+ {value,Config} ->
+ Config;
+ false ->
+ false
+ end.
+
+exists(Id,Handlers) ->
+ case find(Id,Handlers) of
+ false ->
+ false;
+ _ ->
+ true
+ end.
diff --git a/lib/kernel/test/logger_filters_SUITE.erl b/lib/kernel/test/logger_filters_SUITE.erl
new file mode 100644
index 0000000000..11cce8fd20
--- /dev/null
+++ b/lib/kernel/test/logger_filters_SUITE.erl
@@ -0,0 +1,227 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_filters_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+
+-define(ndlog,
+ #{level=>info,msg=>{"Line: ~p",[?LINE]},meta=>#{}}).
+-define(dlog(Domain),
+ #{level=>info,msg=>{"Line: ~p",[?LINE]},meta=>#{domain=>Domain}}).
+-define(llog(Level),
+ #{level=>Level,msg=>{"Line: ~p",[?LINE]},meta=>#{}}).
+-define(plog,
+ #{level=>info,
+ msg=>{report,#{label=>{?MODULE,progress}}},
+ meta=>#{line=>?LINE}}).
+-define(rlog(Node),
+ #{level=>info,
+ msg=>{"Line: ~p",[?LINE]},
+ meta=>#{gl=>rpc:call(Node,erlang,whereis,[user])}}).
+
+-define(TRY(X), my_try(fun() -> X end)).
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [domain,
+ level,
+ progress,
+ remote_gl].
+
+domain(_Config) ->
+ L1 = logger_filters:domain(L1=?dlog([]),{log,super,[]}),
+ stop = logger_filters:domain(?dlog([]),{stop,super,[]}),
+ L2 = logger_filters:domain(L2=?dlog([]),{log,sub,[]}),
+ stop = logger_filters:domain(?dlog([]),{stop,sub,[]}),
+ L3 = logger_filters:domain(L3=?dlog([]),{log,equal,[]}),
+ stop = logger_filters:domain(?dlog([]),{stop,equal,[]}),
+ ignore = logger_filters:domain(?dlog([]),{log,not_equal,[]}),
+ ignore = logger_filters:domain(?dlog([]),{stop,not_equal,[]}),
+ ignore = logger_filters:domain(?dlog([]),{log,undefined,[]}),
+ ignore = logger_filters:domain(?dlog([]),{stop,undefined,[]}),
+
+ L4 = logger_filters:domain(L4=?dlog([a]),{log,super,[a,b]}),
+ stop = logger_filters:domain(?dlog([a]),{stop,super,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{log,sub,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{stop,sub,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{log,equal,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{stop,equal,[a,b]}),
+ L5 = logger_filters:domain(L5=?dlog([a]),{log,not_equal,[a,b]}),
+ stop = logger_filters:domain(?dlog([a]),{stop,not_equal,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{log,undefined,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{stop,undefined,[a,b]}),
+
+ ignore = logger_filters:domain(?dlog([a,b]),{log,super,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{stop,super,[a]}),
+ L6 = logger_filters:domain(L6=?dlog([a,b]),{log,sub,[a]}),
+ stop = logger_filters:domain(?dlog([a,b]),{stop,sub,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{log,equal,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{stop,equal,[a]}),
+ L7 = logger_filters:domain(L7=?dlog([a,b]),{log,not_equal,[a]}),
+ stop = logger_filters:domain(?dlog([a,b]),{stop,not_equal,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{log,undefined,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{stop,undefined,[a]}),
+
+ ignore = logger_filters:domain(?ndlog,{log,super,[a]}),
+ ignore = logger_filters:domain(?ndlog,{stop,super,[a]}),
+ ignore = logger_filters:domain(?ndlog,{log,sub,[a]}),
+ ignore = logger_filters:domain(?ndlog,{stop,sub,[a]}),
+ ignore = logger_filters:domain(?ndlog,{log,equal,[a]}),
+ ignore = logger_filters:domain(?ndlog,{stop,equal,[a]}),
+ L8 = logger_filters:domain(L8=?ndlog,{log,not_equal,[a]}),
+ stop = logger_filters:domain(?ndlog,{stop,not_equal,[a]}),
+ L9 = logger_filters:domain(L9=?ndlog,{log,undefined,[a]}),
+ stop = logger_filters:domain(?ndlog,{stop,undefined,[a]}),
+
+ L10 = logger_filters:domain(L10=?dlog([a,b,c,d]),{log,super,[a,b,c,d]}),
+ stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,super,[a,b,c,d]}),
+ L11 = logger_filters:domain(L11=?dlog([a,b,c,d]),{log,sub,[a,b,c,d]}),
+ stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,sub,[a,b,c,d]}),
+ L12 = logger_filters:domain(L12=?dlog([a,b,c,d]),{log,equal,[a,b,c,d]}),
+ stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,equal,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog([a,b,c,d]),{log,not_equal,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog([a,b,c,d]),{stop,not_equal,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog([a,b,c,d]),{log,undefined,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog([a,b,c,d]),{stop,undefined,[a,b,c,d]}),
+
+ %% A domain field in meta which is not a list is allowed by the
+ %% filter, but since MatchDomain is always a list of atoms, only
+ %% Action=not_equal can ever match.
+ ignore = logger_filters:domain(?dlog(dummy),{log,super,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,super,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{log,sub,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,sub,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{log,equal,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,equal,[a,b,c,d]}),
+ L13 = logger_filters:domain(L13=?dlog(dummy),{log,not_equal,[a,b,c,d]}),
+ stop = logger_filters:domain(?dlog(dummy),{stop,not_equal,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{log,undefined,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,undefined,[a,b,c,d]}),
+
+ {error,badarg} = ?TRY(logger_filters:domain(?ndlog,bad)),
+ {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{bad,super,[]})),
+ {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,bad,[]})),
+ {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,super,bad})),
+
+ ok.
+
+level(_Config) ->
+ ignore = logger_filters:level(?llog(info),{log,lt,info}),
+ ignore = logger_filters:level(?llog(info),{stop,lt,info}),
+ ignore = logger_filters:level(?llog(info),{log,gt,info}),
+ ignore = logger_filters:level(?llog(info),{stop,gt,info}),
+ L1 = logger_filters:level(L1=?llog(info),{log,lteq,info}),
+ stop = logger_filters:level(?llog(info),{stop,lteq,info}),
+ L2 = logger_filters:level(L2=?llog(info),{log,gteq,info}),
+ stop = logger_filters:level(?llog(info),{stop,gteq,info}),
+ L3 = logger_filters:level(L3=?llog(info),{log,eq,info}),
+ stop = logger_filters:level(?llog(info),{stop,eq,info}),
+ ignore = logger_filters:level(?llog(info),{log,neq,info}),
+ ignore = logger_filters:level(?llog(info),{stop,neq,info}),
+
+ ignore = logger_filters:level(?llog(error),{log,lt,info}),
+ ignore = logger_filters:level(?llog(error),{stop,lt,info}),
+ L4 = logger_filters:level(L4=?llog(error),{log,gt,info}),
+ stop = logger_filters:level(?llog(error),{stop,gt,info}),
+ ignore = logger_filters:level(?llog(error),{log,lteq,info}),
+ ignore = logger_filters:level(?llog(error),{stop,lteq,info}),
+ L5 = logger_filters:level(L5=?llog(error),{log,gteq,info}),
+ stop = logger_filters:level(?llog(error),{stop,gteq,info}),
+ ignore = logger_filters:level(?llog(error),{log,eq,info}),
+ ignore = logger_filters:level(?llog(error),{stop,eq,info}),
+ L6 = logger_filters:level(L6=?llog(error),{log,neq,info}),
+ stop = logger_filters:level(?llog(error),{stop,neq,info}),
+
+ L7 = logger_filters:level(L7=?llog(info),{log,lt,error}),
+ stop = logger_filters:level(?llog(info),{stop,lt,error}),
+ ignore = logger_filters:level(?llog(info),{log,gt,error}),
+ ignore = logger_filters:level(?llog(info),{stop,gt,error}),
+ L8 = logger_filters:level(L8=?llog(info),{log,lteq,error}),
+ stop = logger_filters:level(?llog(info),{stop,lteq,error}),
+ ignore = logger_filters:level(?llog(info),{log,gteq,error}),
+ ignore = logger_filters:level(?llog(info),{stop,gteq,error}),
+ ignore = logger_filters:level(?llog(info),{log,eq,error}),
+ ignore = logger_filters:level(?llog(info),{stop,eq,error}),
+ L9 = logger_filters:level(L9=?llog(info),{log,neq,error}),
+ stop = logger_filters:level(?llog(info),{stop,neq,error}),
+
+ {error,badarg} = ?TRY(logger_filters:level(?llog(info),bad)),
+ {error,badarg} = ?TRY(logger_filters:level(?llog(info),{bad,eq,info})),
+ {error,badarg} = ?TRY(logger_filters:level(?llog(info),{log,bad,info})),
+ {error,badarg} = ?TRY(logger_filters:level(?llog(info),{log,eq,bad})),
+
+ ok.
+
+progress(_Config) ->
+ L1 = logger_filters:progress(L1=?plog,log),
+ stop = logger_filters:progress(?plog,stop),
+ ignore = logger_filters:progress(?ndlog,log),
+ ignore = logger_filters:progress(?ndlog,stop),
+
+ {error,badarg} = ?TRY(logger_filters:progress(?plog,bad)),
+
+ ok.
+
+remote_gl(_Config) ->
+ {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]),
+ L1 = logger_filters:remote_gl(L1=?rlog(Node),log),
+ stop = logger_filters:remote_gl(?rlog(Node),stop),
+ ignore = logger_filters:remote_gl(?ndlog,log),
+ ignore = logger_filters:remote_gl(?ndlog,stop),
+
+ {error,badarg} = ?TRY(logger_filters:remote_gl(?rlog(Node),bad)),
+ ok.
+
+remote_gl(cleanup,_Config) ->
+ [test_server:stop_node(N) || N<-nodes()].
+
+%%%-----------------------------------------------------------------
+%%% Called by macro ?TRY(X)
+my_try(Fun) ->
+ try Fun() catch C:R -> {C,R} end.
diff --git a/lib/kernel/test/logger_formatter_SUITE.erl b/lib/kernel/test/logger_formatter_SUITE.erl
new file mode 100644
index 0000000000..83e3e6c40a
--- /dev/null
+++ b/lib/kernel/test/logger_formatter_SUITE.erl
@@ -0,0 +1,886 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_formatter_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+
+-define(TRY(X), my_try(fun() -> X end)).
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [default,
+ legacy_header,
+ error_logger_notice_header,
+ single_line,
+ template,
+ format_msg,
+ report_cb,
+ max_size,
+ depth,
+ chars_limit,
+ format_mfa,
+ format_time,
+ level_or_msg_in_meta,
+ faulty_log,
+ faulty_config,
+ faulty_msg,
+ check_config,
+ update_config].
+
+default(_Config) ->
+ String1 = format(info,{"~p",[term]},#{},#{}),
+ ct:log(String1),
+ [_DateTime,"info:","term\n"] = string:lexemes(String1," "),
+
+ Time = timestamp(),
+ ExpectedTimestamp = default_time_format(Time),
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{}),
+ ct:log(String2),
+ " info: term\n" = string:prefix(String2,ExpectedTimestamp),
+ ok.
+
+legacy_header(_Config) ->
+ Time = timestamp(),
+ String1 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>true,
+ single_line=>false}),
+ ct:log(String1),
+ "=INFO REPORT==== "++Rest = String1,
+ [Timestamp,"\nterm\n"] = string:lexemes(Rest," ="),
+ [D,M,Y,H,Min,S,Micro] = string:lexemes(Timestamp,"-:."),
+ integer(D,31),
+ integer(Y,2018,infinity),
+ integer(H,23),
+ integer(Min,59),
+ integer(S,59),
+ integer(Micro,999999),
+ true = lists:member(M,["Jan","Feb","Mar","Apr","May","Jun",
+ "Jul","Aug","Sep","Oct","Nov","Dec"]),
+
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>false,
+ single_line=>false}),
+ ct:log(String2),
+ ExpectedTimestamp = default_time_format(Time),
+ " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp),
+
+ String3 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>bad,
+ single_line=>false}),
+ ct:log(String3),
+ String3 = String2,
+
+ String4 = format(info,{"~p",[term]},#{time=>Time},
+ #{legacy_header=>true,
+ single_line=>true}), % <---ignored
+ ct:log(String4),
+ String4 = String1,
+
+ String5 = format(info,{"~p",[term]},#{}, % <--- no time
+ #{legacy_header=>true,
+ single_line=>false}),
+ ct:log(String5),
+ "=INFO REPORT==== "++_ = String5,
+ ok.
+
+error_logger_notice_header(_Config) ->
+ Meta1 = #{error_logger=>#{tag => info_report,type => std_info}},
+ String1 = format(notice,{"~p",[term]},Meta1,
+ #{legacy_header=>true,
+ error_logger_notice_header=>notice}),
+ ct:log(String1),
+ "=NOTICE REPORT==== "++_ = String1,
+
+ String2 = format(notice,{"~p",[term]},Meta1,
+ #{legacy_header=>true,
+ error_logger_notice_header=>info}),
+ ct:log(String2),
+ "=INFO REPORT==== "++_ = String2,
+
+ String3 = format(notice,{"~p",[term]},#{},
+ #{legacy_header=>true,
+ error_logger_notice_header=>notice}),
+ ct:log(String3),
+ "=NOTICE REPORT==== "++_ = String3,
+
+ String4 = format(notice,{"~p",[term]},#{},
+ #{legacy_header=>true,
+ error_logger_notice_header=>info}),
+ ct:log(String4),
+ "=NOTICE REPORT==== "++_ = String4,
+
+ ok.
+
+single_line(_Config) ->
+ Time = timestamp(),
+ ExpectedTimestamp = default_time_format(Time),
+ String1 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>true}),
+ ct:log(String1),
+ " info: term\n" = string:prefix(String1,ExpectedTimestamp),
+
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>false}),
+ ct:log(String2),
+ " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp),
+
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>bad}),
+
+
+ %% Test that no extra commas/spaces are added when removing
+ %% newlines, especially not after "=>" in a map association (as
+ %% was the case in OTP-21.0, when the only single_line adjustment
+ %% was done by regexp replacement of "\n" by ", ").
+ Prefix =
+ "Some characters to fill the line ------------------------------------- ",
+ String3 = format(info,{"~s~p~n~s~p~n",[Prefix,
+ lists:seq(1,10),
+ Prefix,
+ #{a=>map,with=>a,few=>accociations}]},
+ #{time=>Time},
+ #{single_line=>true}),
+ ct:log(String3),
+ match = re:run(String3,"\\[1,2,3,4,5,6,7,8,9,10\\]",[{capture,none}]),
+ match = re:run(String3,
+ "#{a => map,few => accociations,with => a}",
+ [{capture,none}]),
+
+ %% This part is added to make sure that the previous test made
+ %% sense, i.e. that there would actually be newlines inside the
+ %% list and map.
+ String4 = format(info,{"~s~p~n~s~p~n",[Prefix,
+ lists:seq(1,10),
+ Prefix,
+ #{a=>map,with=>a,few=>accociations}]},
+ #{time=>Time},
+ #{single_line=>false}),
+ ct:log(String4),
+ match = re:run(String4,"\\[1,2,3,\n",[global,{capture,none}]),
+ {match,Match4} = re:run(String4,"=>\n",[global,{capture,all}]),
+ 3 = length(Match4),
+
+ %% Test that big metadata fields do not get line breaks
+ String5 = format(info,"",
+ #{mymeta=>lists:seq(1,100)},
+ #{single_line=>true,template=>[mymeta,"\n"]}),
+ ct:log(String5),
+ [_] = string:lexemes(String5,"\n"),
+
+ %% Ensure that the previous test made sense, i.e. that the
+ %% metadata field does produce multiple lines if
+ %% single_line==false.
+ String6 = format(info,"",
+ #{mymeta=>lists:seq(1,100)},
+ #{single_line=>false,template=>[mymeta,"\n"]}),
+ ct:log(String6),
+ [_,_|_] = string:lexemes(String6,"\n"),
+
+ ok.
+
+template(_Config) ->
+ Time = timestamp(),
+
+ Template1 = [msg],
+ String1 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template1}),
+ ct:log(String1),
+ "term" = String1,
+
+ Template2 = [msg,unknown],
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template2}),
+ ct:log(String2),
+ "term" = String2,
+
+ Template3 = ["string"],
+ String3 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template3}),
+ ct:log(String3),
+ "string" = String3,
+
+ Template4 = ["string\nnewline"],
+ String4 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template4,
+ single_line=>true}),
+ ct:log(String4),
+ "string\nnewline" = String4,
+
+ Template5 = [],
+ String5 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template5}),
+ ct:log(String5),
+ "" = String5,
+
+ Ref6 = erlang:make_ref(),
+ Meta6 = #{atom=>some_atom,
+ integer=>632,
+ list=>[list,"string",4321,#{},{tuple}],
+ mfa=>{mod,func,0},
+ pid=>self(),
+ ref=>Ref6,
+ string=>"some string",
+ time=>Time,
+ tuple=>{1,atom,"list"},
+ nested=>#{subkey=>subvalue}},
+ Template6 = lists:join(";",lists:sort(maps:keys(maps:remove(nested,Meta6))) ++
+ [[nested,subkey]]),
+ String6 = format(info,{"~p",[term]},Meta6,#{template=>Template6,
+ single_line=>true}),
+ ct:log(String6),
+ SelfStr = pid_to_list(self()),
+ RefStr6 = ref_to_list(Ref6),
+ ListStr = "[list,\"string\",4321,#{},{tuple}]",
+ ExpectedTime6 = default_time_format(Time),
+ ["some_atom",
+ "632",
+ ListStr,
+ "mod:func/0",
+ SelfStr,
+ RefStr6,
+ "some string",
+ ExpectedTime6,
+ "{1,atom,\"list\"}",
+ "subvalue"] = string:lexemes(String6,";"),
+
+ Meta7 = #{time=>Time,
+ nested=>#{key1=>#{subkey1=>value1},
+ key2=>value2}},
+ Template7 = lists:join(";",[nested,
+ [nested,key1],
+ [nested,key1,subkey1],
+ [nested,key2],
+ [nested,key2,subkey2],
+ [nested,key3],
+ [nested,key3,subkey3]]),
+ String7 = format(info,{"~p",[term]},Meta7,#{template=>Template7,
+ single_line=>true}),
+ ct:log(String7),
+ [MultipleKeysStr7,
+ "#{subkey1 => value1}",
+ "value1",
+ "value2",
+ "",
+ "",
+ ""] = string:split(String7,";",all),
+ %% Order of keys is not fixed
+ case MultipleKeysStr7 of
+ "#{key2 => value2,key1 => #{subkey1 => value1}}" -> ok;
+ "#{key1 => #{subkey1 => value1},key2 => value2}" -> ok;
+ _ -> ct:fail({full_nested_map_unexpected,MultipleKeysStr7})
+ end,
+
+ Meta8 = #{time=>Time,
+ nested=>#{key1=>#{subkey1=>value1},
+ key2=>value2}},
+ Template8 =
+ lists:join(
+ ";",
+ [{nested,["exist:",nested],["noexist"]},
+ {[nested,key1],["exist:",[nested,key1]],["noexist"]},
+ {[nested,key1,subkey1],["exist:",[nested,key1,subkey1]],["noexist"]},
+ {[nested,key2],["exist:",[nested,key2]],["noexist"]},
+ {[nested,key2,subkey2],["exist:",[nested,key2,subkey2]],["noexist"]},
+ {[nested,key3],["exist:",[nested,key3]],["noexist"]},
+ {[nested,key3,subkey3],["exist:",[nested,key3,subkey3]],["noexist"]}]),
+ String8 = format(info,{"~p",[term]},Meta8,#{template=>Template8,
+ single_line=>true}),
+ ct:log(String8),
+ [MultipleKeysStr8,
+ "exist:#{subkey1 => value1}",
+ "exist:value1",
+ "exist:value2",
+ "noexist",
+ "noexist",
+ "noexist"] = string:split(String8,";",all),
+ %% Order of keys is not fixed
+ case MultipleKeysStr8 of
+ "exist:#{key2 => value2,key1 => #{subkey1 => value1}}" -> ok;
+ "exist:#{key1 => #{subkey1 => value1},key2 => value2}" -> ok;
+ _ -> ct:fail({full_nested_map_unexpected,MultipleKeysStr8})
+ end,
+
+ ok.
+
+format_msg(_Config) ->
+ Template = [msg],
+
+ String1 = format(info,{"~p",[term]},#{},#{template=>Template}),
+ ct:log(String1),
+ "term" = String1,
+
+ String2 = format(info,{"list",[term]},#{},#{template=>Template}),
+ ct:log(String2),
+ "FORMAT ERROR: \"list\" - [term]" = String2,
+
+ String3 = format(info,{report,term},#{},#{template=>Template}),
+ ct:log(String3),
+ "term" = String3,
+
+ String4 = format(info,{report,term},
+ #{report_cb=>fun(_)-> {"formatted",[]} end},
+ #{template=>Template}),
+ ct:log(String4),
+ "formatted" = String4,
+
+ String5 = format(info,{report,term},
+ #{report_cb=>fun(_)-> faulty_return end},
+ #{template=>Template}),
+ ct:log(String5),
+ "REPORT_CB/1 ERROR: term; Returned: faulty_return" = String5,
+
+ String6 = format(info,{report,term},
+ #{report_cb=>fun(_)-> erlang:error(fun_crashed) end},
+ #{template=>Template}),
+ ct:log(String6),
+ "REPORT_CB/1 CRASH: term; Reason: {error,fun_crashed,"++_ = String6,
+
+ String7 = format(info,{report,term},
+ #{report_cb=>fun(_,_)-> ['not',a,string] end},
+ #{template=>Template}),
+ ct:log(String7),
+ "REPORT_CB/2 ERROR: term; Returned: ['not',a,string]" = String7,
+
+ String8 = format(info,{report,term},
+ #{report_cb=>fun(_,_)-> faulty_return end},
+ #{template=>Template}),
+ ct:log(String8),
+ "REPORT_CB/2 ERROR: term; Returned: faulty_return" = String8,
+
+ String9 = format(info,{report,term},
+ #{report_cb=>fun(_,_)-> erlang:error(fun_crashed) end},
+ #{template=>Template}),
+ ct:log(String9),
+ "REPORT_CB/2 CRASH: term; Reason: {error,fun_crashed,"++_ = String9,
+
+ %% strings are not formatted
+ String10 = format(info,{string,"string"},
+ #{report_cb=>fun(_)-> {"formatted",[]} end},
+ #{template=>Template}),
+ ct:log(String10),
+ "string" = String10,
+
+ String11 = format(info,{string,['not',printable,list]},
+ #{report_cb=>fun(_)-> {"formatted",[]} end},
+ #{template=>Template}),
+ ct:log("~ts",[String11]), % avoiding ct_log crash
+ "FORMAT ERROR: \"~ts\" - [['not',printable,list]]" = String11,
+
+ String12 = format(info,{string,"string"},#{},#{template=>Template}),
+ ct:log(String12),
+ "string" = String12,
+
+ ok.
+
+report_cb(_Config) ->
+ Template = [msg],
+ MetaFun = fun(_) -> {"meta_rcb",[]} end,
+ ConfigFun = fun(_) -> {"config_rcb",[]} end,
+ "term" = format(info,{report,term},#{},#{template=>Template}),
+ "meta_rcb" =
+ format(info,{report,term},#{report_cb=>MetaFun},#{template=>Template}),
+ "config_rcb" =
+ format(info,{report,term},#{},#{template=>Template,
+ report_cb=>ConfigFun}),
+ "config_rcb" =
+ format(info,{report,term},#{report_cb=>MetaFun},#{template=>Template,
+ report_cb=>ConfigFun}),
+ ok.
+
+max_size(_Config) ->
+ Cfg = #{template=>[msg],
+ single_line=>false},
+ "12345678901234567890" =
+ format(info,{"12345678901234567890",[]},#{},Cfg),
+ %% application:set_env(kernel,logger_max_size,11),
+ %% "12345678901234567890" = % min value is 50, so this is not limited
+ %% format(info,{"12345678901234567890",[]},#{},Cfg),
+ %% "12345678901234567890123456789012345678901234567..." = % 50
+ %% format(info,
+ %% {"123456789012345678901234567890123456789012345678901234567890",
+ %% []},
+ %% #{},
+ %% Cfg),
+ %% application:set_env(kernel,logger_max_size,53),
+ %% "12345678901234567890123456789012345678901234567890..." = %53
+ %% format(info,
+ %% {"123456789012345678901234567890123456789012345678901234567890",
+ %% []},
+ %% #{},
+ %% Cfg),
+ "123456789012..." =
+ format(info,{"12345678901234567890",[]},#{},Cfg#{max_size=>15}),
+ "12345678901234567890" =
+ format(info,{"12345678901234567890",[]},#{},Cfg#{max_size=>unlimited}),
+ %% Check that one newline at the end of the line is kept (if it exists)
+ "12345678901...\n" =
+ format(info,{"12345678901234567890\n",[]},#{},Cfg#{max_size=>15}),
+ "12345678901...\n" =
+ format(info,{"12345678901234567890",[]},#{},Cfg#{template=>[msg,"\n"],
+ max_size=>15}),
+ ok.
+max_size(cleanup,_Config) ->
+ application:unset_env(kernel,logger_max_size),
+ ok.
+
+depth(_Config) ->
+ Template = [msg],
+ "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]" =
+ format(info,
+ {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
+ #{},
+ #{template=>Template}),
+ application:set_env(kernel,error_logger_format_depth,11),
+ "[1,2,3,4,5,6,7,8,9,0|...]" =
+ format(info,
+ {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
+ #{},
+ #{template=>Template}),
+ "[1,2,3,4,5,6,7,8,9,0,1,2|...]" =
+ format(info,
+ {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
+ #{},
+ #{template=>Template,
+ depth=>13}),
+ "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]" =
+ format(info,
+ {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
+ #{},
+ #{template=>Template,
+ depth=>unlimited}),
+ ok.
+depth(cleanup,_Config) ->
+ application:unset_env(kernel,error_logger_format_depth),
+ ok.
+
+chars_limit(_Config) ->
+ FA = {"LoL: ~p~nL: ~p~nMap: ~p~n",
+ [lists:duplicate(10,lists:seq(1,100)),
+ lists:seq(1,100),
+ maps:from_list(lists:zip(lists:seq(1,100),
+ lists:duplicate(100,value)))]},
+ Meta = #{time=>timestamp()},
+ Template = [time," - ", msg, "\n"],
+ FC = #{template=>Template,
+ depth=>unlimited,
+ max_size=>unlimited,
+ chars_limit=>unlimited,
+ single_line=>true},
+ CL1 = 80,
+ String1 = format(info,FA,Meta,FC#{chars_limit=>CL1}),
+ L1 = string:length(String1),
+ ct:log("String1: ~p~nLength1: ~p~n",[lists:flatten(String1),L1]),
+ true = L1 > CL1,
+ true = L1 < CL1 + 15,
+
+ String2 = format(info,FA,Meta,FC#{chars_limit=>CL1,depth=>10}),
+ L2 = string:length(String2),
+ ct:log("String2: ~p~nLength2: ~p~n",[lists:flatten(String2),L2]),
+ String2 = String1,
+
+ CL3 = 200,
+ String3 = format(info,FA,Meta,FC#{chars_limit=>CL3}),
+ L3 = string:length(String3),
+ ct:log("String3: ~p~nLength3: ~p~n",[lists:flatten(String3),L3]),
+ true = L3 > CL3,
+ true = L3 < CL3 + 15,
+
+ String4 = format(info,FA,Meta,FC#{chars_limit=>CL3,depth=>10}),
+ L4 = string:length(String4),
+ ct:log("String4: ~p~nLength4: ~p~n",[lists:flatten(String4),L4]),
+ true = L4 > CL3,
+ true = L4 < CL3 + 15,
+
+ %% Test that max_size truncates the string which is limited by
+ %% depth and chars_limit
+ MS5 = 150,
+ String5 = format(info,FA,Meta,FC#{chars_limit=>CL3,depth=>10,max_size=>MS5}),
+ L5 = string:length(String5),
+ ct:log("String5: ~p~nLength5: ~p~n",[String5,L5]),
+ L5 = MS5,
+ true = lists:prefix(lists:sublist(String5,L5-4),String4),
+
+ %% Test that chars_limit limits string also
+ Str = "123456789012345678901234567890123456789012345678901234567890123456789",
+ CL6 = 80,
+ String6 = format(info,{string,Str},Meta,FC#{chars_limit=>CL6}),
+ L6 = string:length(String6),
+ ct:log("String6: ~p~nLength6: ~p~n",[String6,L6]),
+ L6 = CL6,
+
+ ok.
+
+format_mfa(_Config) ->
+ Template = [mfa],
+
+ Meta1 = #{mfa=>{mod,func,0}},
+ String1 = format(info,{"~p",[term]},Meta1,#{template=>Template}),
+ ct:log(String1),
+ "mod:func/0" = String1,
+
+ Meta2 = #{mfa=>{mod,func,[]}},
+ String2 = format(info,{"~p",[term]},Meta2,#{template=>Template}),
+ ct:log(String2),
+ "mod:func/0" = String2,
+
+ Meta3 = #{mfa=>"mod:func/0"},
+ String3 = format(info,{"~p",[term]},Meta3,#{template=>Template}),
+ ct:log(String3),
+ "mod:func/0" = String3,
+
+ Meta4 = #{mfa=>othermfa},
+ String4 = format(info,{"~p",[term]},Meta4,#{template=>Template}),
+ ct:log(String4),
+ "othermfa" = String4,
+
+ ok.
+
+format_time(_Config) ->
+ Time = timestamp(),
+ Meta = #{time=>Time},
+ FC = #{template=>[time]},
+ Msg = {string,""},
+ ExpectedLocal = default_time_format(Time,false),
+ ExpectedUtc = default_time_format(Time,true),
+
+ %% default - local time
+ ExpectedLocal = format(info,Msg,Meta,FC),
+
+ %% time_offset config parameter to formatter
+ ExpectedLocal = format(info,Msg,Meta,FC#{time_offset=>""}),
+ ExpectedUtc = format(info,Msg,Meta,FC#{time_offset=>"Z"}),
+
+ %% stdlib utc_log works when time_offset parameter is not set
+ application:set_env(stdlib,utc_log,true),
+ ExpectedUtc = format(info,Msg,Meta,FC),
+
+ %% sasl utc_log overwrites stdlib utc_log
+ application:set_env(sasl,utc_log,false),
+ ExpectedLocal = format(info,Msg,Meta,FC),
+
+ %% sasl utc_log overwrites stdlib utc_log
+ application:set_env(sasl,utc_log,true),
+ application:set_env(stdlib,utc_log,false),
+ ExpectedUtc = format(info,Msg,Meta,FC),
+
+ %% time_offset config parameter to formatter
+ %% overwrites sasl and stdlib utc_log
+ application:set_env(sasl,utc_log,false),
+ ExpectedUtc = format(info,Msg,Meta,FC#{time_offset=>"Z"}),
+
+ %% time_offset config parameter to formatter
+ %% overwrites sasl and stdlib utc_log
+ application:set_env(sasl,utc_log,true),
+ application:set_env(stdlib,utc_log,true),
+ ExpectedLocal = format(info,Msg,Meta,FC#{time_offset=>""}),
+
+ %% time_designator config parameter to formatter
+ ExpectedLocalS = default_time_format(Time,false,$\s),
+ ExpectedUtcS = default_time_format(Time,true,$\s),
+
+ ExpectedLocalS = format(info,Msg,Meta,FC#{time_offset=>"",
+ time_designator=>$\s}),
+ ExpectedUtcS = format(info,Msg,Meta,FC#{time_offset=>"Z",
+ time_designator=>$\s}),
+
+ ok.
+
+format_time(cleanup,_Config) ->
+ application:unset_env(sasl,utc_log),
+ application:unset_env(stdlib,utc_log),
+ ok.
+
+level_or_msg_in_meta(_Config) ->
+ %% The template contains atoms to pick out values from meta,
+ %% or level/msg to add these from the log event. What if you have
+ %% a key named 'level' or 'msg' in meta and want to display
+ %% its value?
+ %% For now we simply ignore Meta on this and display the
+ %% actual level and msg from the log event.
+
+ Meta = #{level=>mylevel,
+ msg=>"metamsg"},
+ Template = [level,";",msg],
+ String = format(info,{"~p",[term]},Meta,#{template=>Template}),
+ ct:log(String),
+ "info;term" = String, % so mylevel and "metamsg" are ignored
+
+ ok.
+
+faulty_log(_Config) ->
+ %% Unexpected log (should be type logger:log_event()) - print error
+ {error,
+ function_clause,
+ {logger_formatter,format,[_,_],_}} =
+ ?TRY(logger_formatter:format(unexp_log,#{})),
+ ok.
+
+faulty_config(_Config) ->
+ {error,
+ function_clause,
+ {logger_formatter,format,[_,_],_}} =
+ ?TRY(logger_formatter:format(#{level=>info,
+ msg=>{"~p",[term]},
+ meta=>#{time=>timestamp()}},
+ unexp_config)),
+ ok.
+
+faulty_msg(_Config) ->
+ {error,
+ function_clause,
+ {logger_formatter,_,_,_}} =
+ ?TRY(logger_formatter:format(#{level=>info,
+ msg=>term,
+ meta=>#{time=>timestamp()}},
+ #{})),
+ ok.
+
+-define(cfgerr(X), {error,{invalid_formatter_config,logger_formatter,X}}).
+check_config(_Config) ->
+ ok = logger_formatter:check_config(#{}),
+ ?cfgerr(bad) = logger_formatter:check_config(bad),
+
+ C1 = #{chars_limit => 1,
+ depth => 1,
+ legacy_header => true,
+ error_logger_notice_header => info,
+ max_size => 1,
+ report_cb => fun(R) -> {"~p",[R]} end,
+ single_line => false,
+ template => [],
+ time_designator => $T,
+ time_offset => 0},
+ ok = logger_formatter:check_config(C1),
+
+ ok = logger_formatter:check_config(#{chars_limit => unlimited}),
+ ?cfgerr({chars_limit,bad}) =
+ logger_formatter:check_config(#{chars_limit => bad}),
+
+ ok = logger_formatter:check_config(#{depth => unlimited}),
+ ?cfgerr({depth,bad}) =
+ logger_formatter:check_config(#{depth => bad}),
+
+ ok = logger_formatter:check_config(#{legacy_header => false}),
+ ?cfgerr({legacy_header,bad}) =
+ logger_formatter:check_config(#{legacy_header => bad}),
+
+ ok = logger_formatter:check_config(#{error_logger_notice_header => notice}),
+ ?cfgerr({error_logger_notice_header,bad}) =
+ logger_formatter:check_config(#{error_logger_notice_header => bad}),
+
+ ok = logger_formatter:check_config(#{max_size => unlimited}),
+ ?cfgerr({max_size,bad}) =
+ logger_formatter:check_config(#{max_size => bad}),
+
+ ok =
+ logger_formatter:check_config(#{report_cb => fun(_,_) -> "" end}),
+ ?cfgerr({report_cb,F}) =
+ logger_formatter:check_config(#{report_cb => F=fun(_,_,_) -> {"",[]} end}),
+ ?cfgerr({report_cb,bad}) =
+ logger_formatter:check_config(#{report_cb => bad}),
+
+ ok = logger_formatter:check_config(#{single_line => true}),
+ ?cfgerr({single_line,bad}) =
+ logger_formatter:check_config(#{single_line => bad}),
+
+ Ts = [[key],
+ [[key1,key2]],
+ [{key,[key],[]}],
+ [{[key1,key2],[[key1,key2]],["noexist"]}],
+ ["string"]],
+ [begin
+ ct:log("check template: ~p",[T]),
+ ok = logger_formatter:check_config(#{template => T})
+ end
+ || T <- Ts],
+
+ ETs = [bad,
+ [{key,bad}],
+ [{key,[key],bad}],
+ [{key,[key],"bad"}],
+ "bad",
+ [[key,$a,$b,$c]],
+ [[$a,$b,$c,key]]],
+ [begin
+ ct:log("check template: ~p",[T]),
+ {error,{invalid_formatter_template,logger_formatter,T}} =
+ logger_formatter:check_config(#{template => T})
+ end
+ || T <- ETs],
+
+ ?cfgerr({time_designator,bad}) =
+ logger_formatter:check_config(#{time_designator => bad}),
+ ?cfgerr({time_designator,"b"}) =
+ logger_formatter:check_config(#{time_designator => "b"}),
+
+ ok = logger_formatter:check_config(#{time_offset => -1}),
+ ok = logger_formatter:check_config(#{time_offset => "+02:00"}),
+ ok = logger_formatter:check_config(#{time_offset => "-23:59"}),
+ ok = logger_formatter:check_config(#{time_offset => "+24:00"}),
+ ok = logger_formatter:check_config(#{time_offset => "-25:00"}),
+ ?cfgerr({time_offset,bad}) =
+ logger_formatter:check_config(#{time_offset => bad}),
+ ?cfgerr({time_offset,"02:00"}) =
+ logger_formatter:check_config(#{time_offset => "02:00"}),
+ ?cfgerr({time_offset,"+02"}) =
+ logger_formatter:check_config(#{time_offset => "+02"}),
+
+ ok.
+
+%% Test that formatter config can be changed, and that the default
+%% template is updated accordingly
+update_config(_Config) ->
+ {error,{not_found,?MODULE}} = logger:update_formatter_config(?MODULE,#{}),
+
+ logger:add_handler_filter(default,silence,{fun(_,_) -> stop end,ok}),
+ ok = logger:add_handler(?MODULE,?MODULE,#{}),
+ D = lists:seq(1,1000),
+ logger:notice("~p~n",[D]),
+ {Lines1,C1} = check_log(),
+ [ct:log(L) || L <- Lines1],
+ ct:log("~p",[C1]),
+ [Line1] = Lines1,
+ [_Time,"notice: "++D1] = string:split(Line1," "),
+ true = length(D1)>3000,
+ true = #{}==C1,
+
+ ok = logger:update_formatter_config(?MODULE,single_line,false),
+ logger:notice("~p~n",[D]),
+ {Lines2,C2} = check_log(),
+ [ct:log(L) || L <- Lines2],
+ ct:log("~p",[C2]),
+ true = length(Lines2)>50,
+ true = #{single_line=>false}==C2,
+
+ ok = logger:update_formatter_config(?MODULE,#{legacy_header=>true}),
+ logger:notice("~p~n",[D]),
+ {Lines3,C3} = check_log(),
+ [ct:log(L) || L <- Lines3],
+ ct:log("~p",[C3]),
+ ["=NOTICE REPORT==== "++_|D3] = Lines3,
+ true = length(D3)>50,
+ true = #{legacy_header=>true,single_line=>false}==C3,
+
+ ok = logger:update_formatter_config(?MODULE,single_line,true),
+ logger:notice("~p~n",[D]),
+ {Lines4,C4} = check_log(),
+ [ct:log(L) || L <- Lines4],
+ ct:log("~p",[C4]),
+ ["=NOTICE REPORT==== "++_,D4] = Lines4,
+ true = length(D4)>3000,
+ true = #{legacy_header=>true,single_line=>true}==C4,
+
+ %% Finally, check that error_logger_notice_header works, default=info
+ error_logger:info_msg("~p",[D]),
+ {Lines5,C5} = check_log(),
+ [ct:log(L) || L <- Lines5],
+ ct:log("~p",[C5]),
+ ["=INFO REPORT==== "++_,_D5] = Lines5,
+
+ ok=logger:update_formatter_config(?MODULE,error_logger_notice_header,notice),
+ error_logger:info_msg("~p",[D]),
+ {Lines6,C6} = check_log(),
+ [ct:log(L) || L <- Lines6],
+ ct:log("~p",[C6]),
+ ["=NOTICE REPORT==== "++_,_D6] = Lines6,
+
+ {error,{invalid_formatter_config,bad}} =
+ logger:update_formatter_config(?MODULE,bad),
+ {error,{invalid_formatter_config,logger_formatter,{depth,bad}}} =
+ logger:update_formatter_config(?MODULE,depth,bad),
+
+ ok.
+
+update_config(cleanup,_Config) ->
+ _ = logger:remove_handler(?MODULE),
+ _ = logger:remove_handler_filter(default,silence),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+format(Level,Msg,Meta,Config) ->
+ format(#{level=>Level,msg=>Msg,meta=>add_time(Meta)},Config).
+
+format(Log,Config) ->
+ lists:flatten(logger_formatter:format(Log,Config)).
+
+default_time_format(Timestamp) ->
+ default_time_format(Timestamp,false).
+
+default_time_format(Timestamp,Utc) ->
+ default_time_format(Timestamp,Utc,$T).
+
+default_time_format(Timestamp,Utc,Sep) ->
+ Offset = if Utc -> "Z";
+ true -> ""
+ end,
+ calendar:system_time_to_rfc3339(Timestamp,[{unit,microsecond},
+ {time_designator,Sep},
+ {offset,Offset}]).
+
+integer(Str) ->
+ is_integer(list_to_integer(Str)).
+integer(Str,Max) ->
+ integer(Str,0,Max).
+integer(Str,Min,Max) ->
+ Int = list_to_integer(Str),
+ Int >= Min andalso Int =<Max.
+
+%%%-----------------------------------------------------------------
+%%% Called by macro ?TRY(X)
+my_try(Fun) ->
+ try Fun() catch C:R:S -> {C,R,hd(S)} end.
+
+timestamp() ->
+ logger:timestamp().
+
+%% necessary?
+add_time(#{time:=_}=Meta) ->
+ Meta;
+add_time(Meta) ->
+ Meta#{time=>timestamp()}.
+
+%%%-----------------------------------------------------------------
+%%% handler callback
+log(Log,#{formatter:={M,C}}) ->
+ put(log,{M:format(Log,C),C}),
+ ok.
+
+check_log() ->
+ {S,C} = erase(log),
+ {string:lexemes(S,"\n"),C}.
diff --git a/lib/kernel/test/logger_legacy_SUITE.erl b/lib/kernel/test/logger_legacy_SUITE.erl
new file mode 100644
index 0000000000..c3cab07d81
--- /dev/null
+++ b/lib/kernel/test/logger_legacy_SUITE.erl
@@ -0,0 +1,288 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_legacy_SUITE).
+
+-compile(export_all).
+-compile({nowarn_deprecated_function,[{gen_fsm,start,3},
+ {gen_fsm,send_all_state_event,2}]}).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+
+%%%-----------------------------------------------------------------
+%%% This test suite test that log events from within OTP can be
+%%% delivered to legacy error_logger event handlers on the same format
+%%% as before 'logger' was introduced.
+%%%
+%%% Before changing the expected format of any of the log events in
+%%% this suite, please make sure that the backwards incompatibility it
+%%% introduces is ok.
+%%% -----------------------------------------------------------------
+
+-define(check(Expected),
+ receive Expected ->
+ [] = test_server:messages_get()
+ after 1000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {got,test_server:messages_get()}})
+ end).
+-define(check_no_flush(Expected),
+ receive Expected ->
+ ok
+ after 1000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {got,test_server:messages_get()}})
+ end).
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ logger:add_handler(error_logger,error_logger,
+ #{level=>info,filter_default=>stop}),
+ Config.
+
+end_per_suite(_Config) ->
+ logger:remove_handler(error_logger),
+ ok.
+
+init_per_group(std, Config) ->
+ ok = logger:set_handler_config(
+ error_logger,filters,
+ [{domain,{fun logger_filters:domain/2,{log,super,[otp]}}}]),
+ Config;
+init_per_group(sasl, Config) ->
+ %% Since default level is notice, and progress reports are info,
+ %% we need to raise the global logger level to info in order to
+ %% receive these.
+ ok = logger:set_primary_config(level,info),
+ ok = logger:set_handler_config(
+ error_logger,filters,
+ [{domain,{fun logger_filters:domain/2,{log,super,[otp,sasl]}}}]),
+
+ %% cth_log_redirect checks if sasl is started before displaying
+ %% any sasl reports - so just to see the real sasl reports in tc
+ %% log:
+ {ok,Apps} = application:ensure_all_started(sasl),
+ [{stop_apps,Apps}|Config];
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(sasl, Config) ->
+ Apps = ?config(stop_apps,Config),
+ [application:stop(App) || App <- Apps],
+ ok = logger:set_primary_config(level,notice),
+ ok;
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ error_logger:add_report_handler(?MODULE,{event_handler,self()}),
+ Config.
+
+end_per_testcase(Case, Config) ->
+ %% Using gen_event directly here, instead of
+ %% error_logger:delete_report_handler. This is to avoid
+ %% automatically stopping the error_logger process due to removing
+ %% the last handler.
+ gen_event:delete_handler(error_logger,?MODULE,[]),
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [{std,[],[gen_server,
+ gen_event,
+ gen_fsm,
+ gen_statem]},
+ {sasl,[],[sasl_reports,
+ supervisor_handle_info]}].
+
+all() ->
+ [{group,std},
+ {group,sasl}].
+
+gen_server(_Config) ->
+ {ok,Pid} = gen_server:start(?MODULE,gen_server,[]),
+ Msg = fun() -> erlang:error({badmatch,b}) end,
+ Pid ! Msg,
+ ?check({warning_msg,"** Undefined handle_info in ~p"++_,[?MODULE,Msg]}),
+ ok = gen_server:cast(Pid,Msg),
+ ?check({error,"** Generic server ~tp terminating"++_,
+ [Pid,{'$gen_cast',Msg},gen_server,{{badmatch,b},_}]}).
+
+gen_event(_Config) ->
+ {ok,Pid} = gen_event:start(),
+ ok = gen_event:add_handler(Pid,?MODULE,gen_event),
+ Msg = fun() -> erlang:error({badmatch,b}) end,
+ Pid ! Msg,
+ ?check({warning_msg,"** Undefined handle_info in ~tp"++_,[?MODULE,Msg]}),
+ gen_event:notify(Pid,Msg),
+ ?check({error,"** gen_event handler ~p crashed."++_,
+ [?MODULE,Pid,Msg,gen_event,{{badmatch,b},_}]}).
+
+gen_fsm(_Config) ->
+ {ok,Pid} = gen_fsm:start(?MODULE,gen_fsm,[]),
+ Msg = fun() -> erlang:error({badmatch,b}) end,
+ Pid ! Msg,
+ ?check({warning_msg,"** Undefined handle_info in ~p"++_,[?MODULE,Msg]}),
+ gen_fsm:send_all_state_event(Pid,Msg),
+ ?check({error,"** State machine ~tp terminating"++_,
+ [Pid,Msg,mystate,gen_fsm,{{badmatch,b},_}]}).
+
+gen_statem(_Config) ->
+ {ok,Pid} = gen_statem:start(?MODULE,gen_statem,[]),
+ Msg = fun() -> erlang:error({badmatch,b}) end,
+ Pid ! Msg,
+ ?check({error,"** State machine ~tp terminating"++_,
+ [Pid,{info,Msg},{mystate,gen_statem},error,{badmatch,b}|_]}).
+
+sasl_reports(Config) ->
+ App = {application,?MODULE,[{description, ""},
+ {vsn, "1.0"},
+ {modules, [?MODULE]},
+ {registered, []},
+ {applications, []},
+ {mod, {?MODULE, []}}]},
+ AppStr = io_lib:format("~p.",[App]),
+ Dir = ?config(priv_dir,Config),
+ AppFile = filename:join(Dir,?MODULE_STRING++".app"),
+ ok = file:write_file(AppFile,AppStr),
+ true = code:add_patha(Dir),
+ ok = application:start(?MODULE),
+ SupName = sup_name(),
+ Pid = whereis(SupName),
+ [{ch,ChPid,_,_}] = supervisor:which_children(Pid),
+ Node = node(),
+ ?check_no_flush({info_report,progress,[{application,?MODULE},
+ {started_at,Node}]}),
+ ?check({info_report,progress,[{supervisor,{local,SupName}},
+ {started,[{pid,ChPid}|_]}]}),
+ ok = gen_server:cast(ChPid, fun() ->
+ spawn_link(fun() -> receive x->ok end end)
+ end),
+ Msg = fun() -> erlang:error({badmatch,b}) end,
+ ok = gen_server:cast(ChPid,Msg),
+ ?check_no_flush({error,"** Generic server ~tp terminating"++_,
+ [ChPid,{'$gen_cast',Msg},gen_server,{{badmatch,b},_}]}),
+ ?check_no_flush({error_report,crash_report,
+ [[{initial_call,_},
+ {pid,ChPid},
+ {registered_name,[]},
+ {error_info,{error,{badmatch,b},_}},
+ {ancestors,_},
+ {message_queue_len,_},
+ {messages,_},
+ {links,[Pid,Neighbour]},
+ {dictionary,_},
+ {trap_exit,_},
+ {status,_},
+ {heap_size,_},
+ {stack_size,_},
+ {reductions,_}],
+ [{neighbour,[{pid,Neighbour},
+ {registered_name,_},
+ {initial_call,_},
+ {current_function,_},
+ {ancestors,_},
+ {message_queue_len,_},
+ {links,[ChPid]},
+ {trap_exit,_},
+ {status,_},
+ {heap_size,_},
+ {stack_size,_},
+ {reductions,_},
+ {current_stacktrace,_}]}]]}),
+ ?check_no_flush({error_report,supervisor_report,
+ [{supervisor,{local,SupName}},
+ {errorContext,child_terminated},
+ {reason,{{badmatch,b},_}},
+ {offender,[{pid,ChPid}|_]}]}),
+ ?check({info_report,progress,[{supervisor,{local,SupName}},
+ {started,_}]}),
+
+ ok = application:stop(?MODULE),
+ ?check({info_report,std_info,[{application,?MODULE},
+ {exited,stopped},
+ {type,temporary}]}).
+
+sasl_reports(cleanup,_Config) ->
+ application:stop(?MODULE).
+
+supervisor_handle_info(_Config) ->
+ {ok,Pid} = supervisor:start_link({local,sup_name()},?MODULE,supervisor),
+ ?check({info_report,progress,[{supervisor,_},{started,_}]}),
+ Pid ! msg,
+ ?check({error,"Supervisor received unexpected message: ~tp~n",[msg]}).
+
+supervisor_handle_info(cleanup,_Config) ->
+ Pid = whereis(sup_name()),
+ unlink(Pid),
+ exit(Pid,shutdown).
+
+%%%-----------------------------------------------------------------
+%%% Callbacks for error_logger event handler, gen_server, gen_statem,
+%%% gen_fsm, gen_event, supervisor and application.
+start(_,_) ->
+ supervisor:start_link({local,sup_name()},?MODULE,supervisor).
+
+init(supervisor) ->
+ {ok,{#{},[#{id=>ch,start=>{gen_server,start_link,[?MODULE,gen_server,[]]}}]}};
+init(StateMachine) when StateMachine==gen_statem; StateMachine==gen_fsm ->
+ {ok,mystate,StateMachine};
+init(State) ->
+ {ok,State}.
+
+%% error_logger event handler
+handle_event({Tag,_Gl,{_Pid,Type,Report}},{_,Pid}=State) ->
+ Pid ! {Tag,Type,Report},
+ {ok,State};
+%% other gen_event
+handle_event(Fun,State) when is_function(Fun) ->
+ Fun(),
+ {next_state,State}.
+
+%% gen_fsm
+handle_event(Fun,State,Data) when is_function(Fun) ->
+ Fun(),
+ {next_state,State,Data}.
+
+%% gen_statem
+handle_event(info,Fun,State,Data) when is_function(Fun) ->
+ Fun(),
+ {next_state,State,Data}.
+
+%% gen_server
+handle_cast(Fun,State) when is_function(Fun) ->
+ Fun(),
+ {noreply,State}.
+
+%% gen_statem
+callback_mode() ->
+ handle_event_function.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+sup_name() ->
+ list_to_atom(?MODULE_STRING++"_sup").
diff --git a/lib/kernel/test/logger_olp_SUITE.erl b/lib/kernel/test/logger_olp_SUITE.erl
new file mode 100644
index 0000000000..ea3eec89f5
--- /dev/null
+++ b/lib/kernel/test/logger_olp_SUITE.erl
@@ -0,0 +1,90 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_olp_SUITE).
+
+-compile(export_all).
+
+-include_lib("kernel/src/logger_olp.hrl").
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [idle_timer].
+
+%%%-----------------------------------------------------------------
+%%% Test cases
+idle_timer(_Config) ->
+ {ok,_Pid,Olp} = logger_olp:start_link(?MODULE,?MODULE,self(),#{}),
+ [logger_olp:load(Olp,{msg,N}) || N<-lists:seq(1,3)],
+ timer:sleep(?IDLE_DETECT_TIME*2),
+ [{load,{msg,1}},
+ {load,{msg,2}},
+ {load,{msg,3}},
+ {notify,idle}] = test_server:messages_get(),
+ logger_olp:cast(Olp,hello),
+ timer:sleep(?IDLE_DETECT_TIME*2),
+ [{cast,hello}] = test_server:messages_get(),
+ ok.
+idle_timer(cleanup,_Config) ->
+ unlink(whereis(?MODULE)),
+ logger_olp:stop(?MODULE),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Olp callbacks
+init(P) ->
+ {ok,P}.
+
+handle_load(M,P) ->
+ P ! {load,M},
+ P.
+
+handle_cast(M,P) ->
+ P ! {cast,M},
+ {noreply,P}.
+
+notify(N,P) ->
+ P ! {notify,N},
+ P.
diff --git a/lib/kernel/test/logger_proxy_SUITE.erl b/lib/kernel/test/logger_proxy_SUITE.erl
new file mode 100644
index 0000000000..777531e4ed
--- /dev/null
+++ b/lib/kernel/test/logger_proxy_SUITE.erl
@@ -0,0 +1,274 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_proxy_SUITE).
+
+-compile(export_all).
+
+%% -include_lib("common_test/include/ct.hrl").
+%% -include_lib("kernel/include/logger.hrl").
+%% -include_lib("kernel/src/logger_internal.hrl").
+
+%% -define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++
+%% ":"++integer_to_list(?LINE)).
+%% -define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}).
+%% -define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]).
+
+%% -define(MY_LOC(N),#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY},
+%% file=>?FILE, line=>?LINE-N}).
+
+%% -define(TRY(X), my_try(fun() -> X end)).
+
+
+-define(HNAME,list_to_atom(lists:concat([?MODULE,"_",?FUNCTION_NAME]))).
+-define(LOC,#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY},line=>?LINE}).
+-define(ENSURE_TIME,5000).
+
+suite() ->
+ [{timetrap,{seconds,30}},
+ {ct_hooks,[logger_test_lib]}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [basic,
+ emulator,
+ remote,
+ remote_emulator,
+ config,
+ restart_after,
+ terminate].
+
+%%%-----------------------------------------------------------------
+%%% Test cases
+basic(_Config) ->
+ ok = logger:add_handler(?HNAME,?MODULE,#{config=>self()}),
+ logger_proxy ! {log,notice,"Log from: ~p; ~p",[?FUNCTION_NAME,?LINE],L1=?LOC},
+ ok = ensure(L1),
+ logger_proxy ! {log,notice,[{test_case,?FUNCTION_NAME},{line,?LINE}],L2=?LOC},
+ ok = ensure(L2),
+ logger_proxy:log({remote,node(),{log,notice,
+ "Log from: ~p; ~p",
+ [?FUNCTION_NAME,?LINE],
+ L3=?LOC}}),
+ ok = ensure(L3),
+ logger_proxy:log({remote,node(),{log,notice,
+ [{test_case,?FUNCTION_NAME},
+ {line,?LINE}],
+ L4=?LOC}}),
+ ok = ensure(L4),
+ ok.
+basic(cleanup,_Config) ->
+ ok = logger:remove_handler(?HNAME).
+
+emulator(_Config) ->
+ ok = logger:add_handler(?HNAME,?MODULE,#{config=>self()}),
+ Pid = spawn(fun() -> erlang:error(some_reason) end),
+ ok = ensure(#{pid=>Pid}),
+ ok.
+emulator(cleanup,_Config) ->
+ ok = logger:remove_handler(?HNAME).
+
+remote(Config) ->
+ {ok,_,Node} = logger_test_lib:setup(Config,[{logger,[{proxy,#{}}]}]),
+ ok = logger:add_handler(?HNAME,?MODULE,#{config=>self()}),
+ L1 = ?LOC, spawn(Node,fun() -> logger:notice("Log from ~p; ~p",[?FUNCTION_NAME,?LINE],L1) end),
+ ok = ensure(L1),
+ L2 = ?LOC, spawn(Node,fun() -> logger:notice([{test_case,?FUNCTION_NAME},{line,?LINE}],L2) end),
+ ok = ensure(L2),
+ ok.
+remote(cleanup,_Config) ->
+ ok = logger:remove_handler(?HNAME).
+
+remote_emulator(Config) ->
+ {ok,_,Node} = logger_test_lib:setup(Config,[{logger,[{proxy,#{}}]}]),
+ ok = logger:add_handler(?HNAME,?MODULE,#{config=>self()}),
+ Pid = spawn(Node,fun() -> erlang:error(some_reason) end),
+ ok = ensure(#{pid=>Pid}),
+ ok.
+remote_emulator(cleanup,_Config) ->
+ ok = logger:remove_handler(?HNAME).
+
+config(_Config) ->
+ C1 = #{sync_mode_qlen:=SQ,
+ drop_mode_qlen:=DQ} = logger:get_proxy_config(),
+ C1 = logger_olp:get_opts(logger_proxy),
+
+ %% Update the existing config with these two values
+ SQ1 = SQ+1,
+ DQ1 = DQ+1,
+ ok = logger:update_proxy_config(#{sync_mode_qlen=>SQ1,
+ drop_mode_qlen=>DQ1}),
+ C2 = logger:get_proxy_config(), % reads from ets table
+ C2 = logger_olp:get_opts(logger_proxy), % ensure consistency with process opts
+ C2 = C1#{sync_mode_qlen:=SQ1,
+ drop_mode_qlen:=DQ1},
+
+ %% Update the existing again with only one value
+ SQ2 = SQ+2,
+ ok = logger:update_proxy_config(#{sync_mode_qlen=>SQ2}),
+ C3 = logger:get_proxy_config(),
+ C3 = logger_olp:get_opts(logger_proxy),
+ C3 = C2#{sync_mode_qlen:=SQ2},
+
+ %% Set the config, i.e. merge with defaults
+ ok = logger:set_proxy_config(#{sync_mode_qlen=>SQ1}),
+ C4 = logger:get_proxy_config(),
+ C4 = logger_olp:get_opts(logger_proxy),
+ C4 = C1#{sync_mode_qlen:=SQ1},
+
+ %% Reset to default
+ ok = logger:set_proxy_config(#{}),
+ C5 = logger:get_proxy_config(),
+ C5 = logger_olp:get_opts(logger_proxy),
+ C5 = logger_proxy:get_default_config(),
+
+ %% Errors
+ {error,{invalid_olp_config,_}} =
+ logger:set_proxy_config(#{faulty_key=>1}),
+ {error,{invalid_olp_config,_}} =
+ logger:set_proxy_config(#{sync_mode_qlen=>infinity}),
+ {error,{invalid_config,[]}} = logger:set_proxy_config([]),
+
+ {error,{invalid_olp_config,_}} =
+ logger:update_proxy_config(#{faulty_key=>1}),
+ {error,{invalid_olp_config,_}} =
+ logger:update_proxy_config(#{sync_mode_qlen=>infinity}),
+ {error,{invalid_config,[]}} = logger:update_proxy_config([]),
+
+ C5 = logger:get_proxy_config(),
+ C5 = logger_olp:get_opts(logger_proxy),
+
+ ok.
+config(cleanup,_Config) ->
+ _ = logger:set_logger_proxy(logger_proxy:get_default_config()),
+ ok.
+
+restart_after(_Config) ->
+ Restart = 3000,
+ ok = logger:update_proxy_config(#{overload_kill_enable => true,
+ overload_kill_qlen => 10,
+ overload_kill_restart_after => Restart}),
+ Proxy = whereis(logger_proxy),
+ Proxy = erlang:system_info(system_logger),
+ ProxyConfig = logger:get_proxy_config(),
+ ProxyConfig = logger_olp:get_opts(logger_proxy),
+
+ Ref = erlang:monitor(process,Proxy),
+ spawn(fun() ->
+ [logger_proxy ! {log,debug,
+ [{test_case,?FUNCTION_NAME},
+ {line,?LINE}],
+ ?LOC} || _ <- lists:seq(1,100)]
+ end),
+ receive
+ {'DOWN',Ref,_,_,_Reason} ->
+ undefined = erlang:system_info(system_logger),
+ timer:sleep(Restart),
+ poll_restarted(10)
+ after 5000 ->
+ ct:fail(proxy_not_terminated)
+ end,
+
+ Proxy1 = whereis(logger_proxy),
+ Proxy1 = erlang:system_info(system_logger),
+ ProxyConfig = logger:get_proxy_config(),
+ ProxyConfig = logger_olp:get_opts(logger_proxy),
+
+ ok.
+restart_after(cleanup,_Config) ->
+ _ = logger:set_logger_proxy(logger_proxy:get_default_config()),
+ ok.
+
+%% Test that system_logger flag is set to logger process if
+%% logger_proxy terminates for other reason than overloaded.
+terminate(_Config) ->
+ Logger = whereis(logger),
+ Proxy = whereis(logger_proxy),
+ Proxy = erlang:system_info(system_logger),
+ ProxyConfig = logger:get_proxy_config(),
+ ProxyConfig = logger_olp:get_opts(logger_proxy),
+
+ Ref = erlang:monitor(process,Proxy),
+ ok = logger_olp:stop(Proxy),
+ receive
+ {'DOWN',Ref,_,_,_Reason} ->
+ Logger = erlang:system_info(system_logger),
+ logger_proxy:restart(),
+ poll_restarted(10)
+ after 5000 ->
+ ct:fail(proxy_not_terminated)
+ end,
+
+ Proxy1 = whereis(logger_proxy),
+ Proxy1 = erlang:system_info(system_logger),
+ ProxyConfig = logger:get_proxy_config(),
+ ProxyConfig = logger_olp:get_opts(logger_proxy),
+
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Internal functions
+
+poll_restarted(0) ->
+ ct:fail(proxy_not_restarted);
+poll_restarted(N) ->
+ timer:sleep(1000),
+ case whereis(logger_proxy) of
+ undefined ->
+ poll_restarted(N-1);
+ _Pid ->
+ ok
+ end.
+
+%% Logger handler callback
+log(#{meta:=Meta},#{config:=Pid}) ->
+ Pid ! {logged,Meta}.
+
+%% Check that the log from the logger callback function log/2 is received
+ensure(Match) ->
+ receive {logged,Meta} ->
+ case maps:with(maps:keys(Match),Meta) of
+ Match -> ok;
+ _NoMatch -> {error,Match,Meta,test_server:messages_get()}
+ end
+ after ?ENSURE_TIME -> {error,Match,test_server:messages_get()}
+ end.
diff --git a/lib/kernel/test/logger_simple_h_SUITE.erl b/lib/kernel/test/logger_simple_h_SUITE.erl
new file mode 100644
index 0000000000..e0ad792bdb
--- /dev/null
+++ b/lib/kernel/test/logger_simple_h_SUITE.erl
@@ -0,0 +1,209 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_simple_h_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+
+-import(logger_test_lib, [setup/2, log/3, sync_and_read/3]).
+
+-define(check_no_log,[] = test_server:messages_get()).
+-define(check(Expected),
+ receive {log,Expected} ->
+ [] = test_server:messages_get()
+ after 1000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {expected,Expected},
+ {got,test_server:messages_get()}})
+ end).
+
+-define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++
+ ":"++integer_to_list(?LINE)).
+-define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}).
+-define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]).
+
+suite() ->
+ [{timetrap,{seconds,30}},
+ {ct_hooks, [logger_test_lib]}].
+
+init_per_suite(Config) ->
+ Hs0 = logger:get_handler_config(),
+ Hs = lists:keydelete(cth_log_redirect,1,Hs0),
+ [ok = logger:remove_handler(Id) || {Id,_,_} <- Hs],
+ Env = [{App,Key,application:get_env(App,Key)} ||
+ {App,Key} <- [{kernel,logger_level}]],
+ [{env,Env},{logger,Hs}|Config].
+
+end_per_suite(Config) ->
+ [application:set_env(App,Key,Val) || {App,Key,Val} <- ?config(env,Config)],
+ Hs = ?config(logger,Config),
+ [ok = logger:add_handler(Id,Mod,C) || {Id,Mod,C} <- Hs],
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [start_stop,
+ replace_default,
+ replace_file,
+ replace_disk_log
+ ].
+
+start_stop(_Config) ->
+ undefined = whereis(logger_simple_h),
+ register(logger_simple_h,self()),
+ {error,_} = logger:add_handler(simple,
+ logger_simple_h,
+ #{filter_default=>log}),
+ unregister(logger_simple_h),
+ ok = logger:add_handler(simple,logger_simple_h,#{filter_default=>log}),
+ Pid = whereis(logger_simple_h),
+ true = is_pid(Pid),
+ ok = logger:remove_handler(simple),
+ false = is_pid(whereis(logger_simple_h)),
+ ok.
+start_stop(cleanup,_Config) ->
+ logger:remove_handler(simple).
+
+%% This testcase just tests that it does not crash, the default handler prints
+%% to stdout which we cannot read from in a detached slave.
+replace_default(Config) ->
+
+ {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]),
+ log(Node, emergency, [?str]),
+ log(Node, alert, [?str,[]]),
+ log(Node, error, [?map_rep]),
+ log(Node, info, [?keyval_rep]),
+ log(Node, info, [?keyval_rep++[not_key_val]]),
+ rpc:call(Node, error_logger, error_report, [some_type,?map_rep]),
+ rpc:call(Node, error_logger, warning_report, ["some_type",?map_rep]),
+ log(Node, critical, [?str,[?keyval_rep]]),
+ log(Node, notice, [["fake",string,"line:",?LINE]]),
+
+ ok = rpc:call(Node, logger, add_handlers, [kernel]),
+
+ ok.
+
+replace_file(Config) ->
+
+ {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]),
+ log(Node, emergency, [M1=?str]),
+ log(Node, alert, [M2=?str,[]]),
+ log(Node, error, [?map_rep]),
+ log(Node, warning, [?keyval_rep]),
+ log(Node, warning, [?keyval_rep++[not_key_val]]),
+ log(Node, critical, [?str,[?keyval_rep]]),
+ log(Node, notice, [["fake",string,"line:",?LINE]]),
+
+ File = filename:join(proplists:get_value(priv_dir,Config),
+ atom_to_list(?FUNCTION_NAME)++".log"),
+
+ ok = rpc:call(Node, logger, add_handlers,
+ [[{handler, default, logger_std_h,
+ #{ config => #{ type => {file, File} },
+ formatter => {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}}]]),
+
+ {ok,Bin} = sync_and_read(Node, file, File),
+ Lines = [unicode:characters_to_list(L) ||
+ L <- binary:split(Bin,<<"\n">>,[global,trim])],
+ ["=EMERGENCY REPORT===="++_,
+ M1,
+ "=ALERT REPORT===="++_,
+ M2,
+ "=ERROR REPORT===="++_,
+ _,
+ _,
+ "=WARNING REPORT===="++_,
+ _,
+ _,
+ "=WARNING REPORT===="++_,
+ _,
+ _,
+ _,
+ "=CRITICAL REPORT===="++_,
+ _,
+ _,
+ "=NOTICE REPORT===="++_,
+ _
+ ] = Lines,
+ ok.
+
+replace_disk_log(Config) ->
+
+ {ok, _, Node} = logger_test_lib:setup(Config, [{logger, [{handler, default, undefined}]}]),
+ log(Node, emergency, [M1=?str]),
+ log(Node, alert, [M2=?str,[]]),
+ log(Node, error, [?map_rep]),
+ log(Node, warning, [?keyval_rep]),
+ log(Node, warning, [?keyval_rep++[not_key_val]]),
+ log(Node, critical, [?str,[?keyval_rep]]),
+ log(Node, notice, [["fake",string,"line:",?LINE]]),
+
+ File = filename:join(proplists:get_value(priv_dir,Config),
+ atom_to_list(?FUNCTION_NAME)++".log"),
+
+ ok = rpc:call(Node, logger, add_handlers,
+ [[{handler, default, logger_disk_log_h,
+ #{ config => #{ file => File },
+ formatter => {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}}]]),
+ {ok,Bin} = sync_and_read(Node, disk_log, File),
+ Lines = [unicode:characters_to_list(L) ||
+ L <- binary:split(Bin,<<"\n">>,[global,trim])],
+ ["=EMERGENCY REPORT===="++_,
+ M1,
+ "=ALERT REPORT===="++_,
+ M2,
+ "=ERROR REPORT===="++_,
+ _,
+ _,
+ "=WARNING REPORT===="++_,
+ _,
+ _,
+ "=WARNING REPORT===="++_,
+ _,
+ _,
+ _,
+ "=CRITICAL REPORT===="++_,
+ _,
+ _,
+ "=NOTICE REPORT===="++_,
+ _
+ ] = Lines,
+ ok.
diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl
new file mode 100644
index 0000000000..2b2d509860
--- /dev/null
+++ b/lib/kernel/test/logger_std_h_SUITE.erl
@@ -0,0 +1,2184 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_std_h_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+-include_lib("kernel/src/logger_h_common.hrl").
+-include_lib("kernel/src/logger_olp.hrl").
+-include_lib("stdlib/include/ms_transform.hrl").
+-include_lib("kernel/include/file.hrl").
+
+-define(check_no_log,
+ begin
+ timer:sleep(?IDLE_DETECT_TIME*2),
+ [] = test_server:messages_get()
+ end).
+-define(check(Expected),
+ receive
+ {log,Expected} ->
+ [] = test_server:messages_get()
+ after 5000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {expected,Expected},
+ {got,test_server:messages_get()}})
+ end).
+
+-define(msg,"Log from "++atom_to_list(?FUNCTION_NAME)++
+ ":"++integer_to_list(?LINE)).
+-define(bin(Msg), list_to_binary(Msg++"\n")).
+-define(domain,#{domain=>[?MODULE]}).
+
+suite() ->
+ [{timetrap,{seconds,30}},
+ {ct_hooks,[logger_test_lib]}].
+
+init_per_suite(Config) ->
+ timer:start(), % to avoid progress report
+ {ok,#{formatter:=OrigFormatter}} =
+ logger:get_handler_config(?STANDARD_HANDLER),
+ [{formatter,OrigFormatter}|Config].
+
+end_per_suite(Config) ->
+ {OrigMod,OrigConf} = proplists:get_value(formatter,Config),
+ logger:set_handler_config(?STANDARD_HANDLER,formatter,{OrigMod,OrigConf}),
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(reopen_changed_log=TC, Config) ->
+ case os:type() of
+ {win32,_} ->
+ {skip,"This test can only work with inodes, i.e. not on Windows"};
+ _ ->
+ ct:print("********** ~w **********", [TC]),
+ Config
+ end;
+init_per_testcase(TestHooksCase, Config) when
+ TestHooksCase == write_failure;
+ TestHooksCase == sync_failure ->
+ case (fun() -> ?TEST_HOOKS_TAB == undefined end)() of
+ true ->
+ {skip,"Define the TEST_HOOKS macro to run this test"};
+ false ->
+ ct:print("********** ~w **********", [TestHooksCase]),
+ Config
+ end;
+init_per_testcase(OPCase, Config) when
+ OPCase == qlen_kill_new;
+ OPCase == restart_after ->
+ case re:run(erlang:system_info(system_version),
+ "dirty-schedulers-TEST",
+ [{capture,none}]) of
+ match ->
+ {skip,"Overload protection test skipped on dirty-schedulers-TEST"};
+ nomatch ->
+ ct:print("********** ~w **********", [OPCase]),
+ Config
+ end;
+init_per_testcase(TestCase, Config) ->
+ ct:print("********** ~w **********", [TestCase]),
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [add_remove_instance_tty,
+ add_remove_instance_standard_io,
+ add_remove_instance_standard_error,
+ add_remove_instance_file1,
+ add_remove_instance_file2,
+ add_remove_instance_file3,
+ add_remove_instance_file4,
+ default_formatter,
+ filter_config,
+ errors,
+ formatter_fail,
+ config_fail,
+ crash_std_h_to_file,
+ crash_std_h_to_disk_log,
+ bad_input,
+ reconfig,
+ file_opts,
+ relative_file_path,
+ sync,
+ write_failure,
+ sync_failure,
+ op_switch_to_sync_file,
+ op_switch_to_sync_tty,
+ op_switch_to_drop_file,
+ op_switch_to_drop_tty,
+ op_switch_to_flush_file,
+ op_switch_to_flush_tty,
+ limit_burst_disabled,
+ limit_burst_enabled_one,
+ limit_burst_enabled_period,
+ kill_disabled,
+ qlen_kill_new,
+ qlen_kill_std,
+ mem_kill_new,
+ mem_kill_std,
+ restart_after,
+ handler_requests_under_load,
+ recreate_deleted_log,
+ reopen_changed_log,
+ rotate_size,
+ rotate_size_compressed,
+ rotate_size_reopen,
+ rotation_opts,
+ rotation_opts_restart_handler
+ ].
+
+add_remove_instance_tty(_Config) ->
+ {error,{handler_not_added,{invalid_config,logger_std_h,#{type:=tty}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{type => tty},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ ok.
+
+add_remove_instance_standard_io(_Config) ->
+ add_remove_instance_nofile(standard_io).
+add_remove_instance_standard_io(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_standard_error(_Config) ->
+ add_remove_instance_nofile(standard_error).
+add_remove_instance_standard_error(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file1(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,"stdlog1.txt"),
+ Type = {file,Log},
+ add_remove_instance_file(Log, Type).
+add_remove_instance_file1(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file2(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,"stdlog2.txt"),
+ Type = {file,Log,[raw,append]},
+ add_remove_instance_file(Log, Type).
+add_remove_instance_file2(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file3(_Config) ->
+ Log = atom_to_list(?MODULE),
+ StdHConfig = #{type=>file},
+ add_remove_instance_file(Log, StdHConfig).
+add_remove_instance_file3(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file4(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,"stdlog4.txt"),
+ StdHConfig = #{file=>Log,modes=>[]},
+ add_remove_instance_file(Log, StdHConfig).
+add_remove_instance_file4(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file(Log, Type) when not is_map(Type) ->
+ add_remove_instance_file(Log,#{type=>Type});
+add_remove_instance_file(Log, StdHConfig) when is_map(StdHConfig) ->
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => StdHConfig,
+ filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ Pid = whereis(h_proc_name()),
+ true = is_pid(Pid),
+ logger:notice(M1=?msg,?domain),
+ ?check(M1),
+ B1 = ?bin(M1),
+ try_read_file(Log, {ok,B1}, filesync_rep_int()),
+ ok = logger:remove_handler(?MODULE),
+ timer:sleep(500),
+ undefined = whereis(h_proc_name()),
+ logger:notice(?msg,?domain),
+ ?check_no_log,
+ try_read_file(Log, {ok,B1}, filesync_rep_int()),
+ ok.
+
+default_formatter(_Config) ->
+ ok = logger:set_handler_config(?STANDARD_HANDLER,formatter,
+ {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}),
+ ct:capture_start(),
+ logger:notice(M1=?msg),
+ timer:sleep(100),
+ ct:capture_stop(),
+ [Msg] = ct:capture_get(),
+ match = re:run(Msg,"=NOTICE REPORT====.*\n"++M1,[{capture,none}]),
+ ok.
+
+filter_config(_Config) ->
+ ok = logger:add_handler(?MODULE,logger_std_h,#{}),
+ {ok,#{config:=HConfig}=Config} = logger:get_handler_config(?MODULE),
+ HConfig = maps:without([olp],HConfig),
+
+ FakeFullHConfig = HConfig#{olp=>{regname,self(),erlang:make_ref()}},
+ #{config:=HConfig} =
+ logger_std_h:filter_config(Config#{config=>FakeFullHConfig}),
+ ok.
+
+filter_config(cleanup,_Config) ->
+ logger:remove_handler(?MODULE),
+ ok.
+
+errors(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,?FUNCTION_NAME),
+
+ ok = logger:add_handler(?MODULE,logger_std_h,#{}),
+ {error,{already_exist,?MODULE}} =
+ logger:add_handler(?MODULE,logger_std_h,#{}),
+
+ {error,{not_found,no_such_name}} = logger:remove_handler(no_such_name),
+
+ ok = logger:remove_handler(?MODULE),
+ {error,{not_found,?MODULE}} = logger:remove_handler(?MODULE),
+
+ {error,
+ {handler_not_added,
+ {invalid_config,logger_std_h,#{type:=faulty_type}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{type => faulty_type}}),
+
+ case os:type() of
+ {win32,_} ->
+ %% No use in testing file access on windows
+ ok;
+ _ ->
+ NoDir = lists:concat(["/",?MODULE,"_dir"]),
+ {error,
+ {handler_not_added,{open_failed,NoDir,eacces}}} =
+ logger:add_handler(myh2,logger_std_h,
+ #{config=>#{type=>{file,NoDir}}})
+ end,
+
+ {error,
+ {handler_not_added,
+ {invalid_config,logger_std_h,#{modes:=bad_file_opt}}}} =
+ logger:add_handler(myh3,logger_std_h,
+ #{config=>#{type=>{file,Log,bad_file_opt}}}),
+
+ ok = logger:notice(?msg).
+
+errors(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+formatter_fail(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,?FUNCTION_NAME),
+
+ %% no formatter
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => {file,Log}},
+ filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE])}),
+ Pid = whereis(h_proc_name()),
+ true = is_pid(Pid),
+ H = logger:get_handler_ids(),
+ true = lists:member(?MODULE,H),
+
+ %% Formatter is added automatically
+ {ok,#{formatter:={logger_formatter,_}}} = logger:get_handler_config(?MODULE),
+ logger:notice(M1=?msg,?domain),
+ Got1 = try_match_file(Log,"[0-9\\+\\-T:\\.]* notice: "++M1,5000),
+
+ ok = logger:set_handler_config(?MODULE,formatter,{nonexistingmodule,#{}}),
+ logger:notice(M2=?msg,?domain),
+ Got2 = try_match_file(Log,
+ escape(Got1)++"[0-9\\+\\-T:\\.]* notice: FORMATTER CRASH: .*"++M2,
+ 5000),
+
+ ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,crash}),
+ logger:notice(M3=?msg,?domain),
+ Got3 = try_match_file(Log,
+ escape(Got2)++"[0-9\\+\\-T:\\.]* notice: FORMATTER CRASH: .*"++M3,
+ 5000),
+
+ ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,bad_return}),
+ logger:notice(?msg,?domain),
+ try_match_file(Log,
+ escape(Got3)++"FORMATTER ERROR: bad return value",
+ 5000),
+
+ %% Check that handler is still alive and was never dead
+ Pid = whereis(h_proc_name()),
+ H = logger:get_handler_ids(),
+
+ ok.
+
+formatter_fail(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+config_fail(_Config) ->
+ {error,{handler_not_added,{invalid_config,logger_std_h,#{bad:=bad}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{bad => bad},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ {error,{handler_not_added,{invalid_config,logger_std_h,
+ #{restart_type:=bad}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{restart_type => bad},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ {error,{handler_not_added,{invalid_olp_levels,#{drop_mode_qlen:=1}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{drop_mode_qlen=>1}}),
+ {error,{handler_not_added,{invalid_olp_levels,#{sync_mode_qlen:=43,
+ drop_mode_qlen:=42}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{sync_mode_qlen=>43,
+ drop_mode_qlen=>42}}),
+ {error,{handler_not_added,{invalid_olp_levels,#{drop_mode_qlen:=43,
+ flush_qlen:=42}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{drop_mode_qlen=>43,
+ flush_qlen=>42}}),
+
+ ok = logger:add_handler(?MODULE,logger_std_h,
+ #{filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ {error,{illegal_config_change,logger_std_h,#{type:=_},#{type:=_}}} =
+ logger:set_handler_config(?MODULE,config,
+ #{type=>{file,"file"}}),
+
+ {error,{invalid_olp_levels,_}} =
+ logger:set_handler_config(?MODULE,config,
+ #{sync_mode_qlen=>100,
+ flush_qlen=>99}),
+ {error,{invalid_config,logger_std_h,#{filesync_rep_int:=2000}}} =
+ logger:set_handler_config(?MODULE, config,
+ #{filesync_rep_int => 2000}),
+
+ %% Read-only fields may (accidentially) be included in the change,
+ %% but it won't take effect
+ {ok,C} = logger:get_handler_config(?MODULE),
+ ok = logger:set_handler_config(?MODULE,config,#{olp=>dummyvalue}),
+ {ok,C} = logger:get_handler_config(?MODULE),
+
+ ok.
+
+config_fail(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+crash_std_h_to_file(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"])),
+ crash_std_h(Config,?FUNCTION_NAME,
+ [{handler,default,logger_std_h,
+ #{ config => #{ type => {file, Log} }}}],
+ file, Log).
+crash_std_h_to_file(cleanup,_Config) ->
+ crash_std_h(cleanup).
+
+crash_std_h_to_disk_log(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"])),
+ crash_std_h(Config,?FUNCTION_NAME,
+ [{handler,default,logger_disk_log_h,
+ #{ config => #{ file => Log }}}],
+ disk_log,Log).
+crash_std_h_to_disk_log(cleanup,_Config) ->
+ crash_std_h(cleanup).
+
+crash_std_h(Config,Func,Var,Type,Log) ->
+ Dir = ?config(priv_dir,Config),
+ SysConfig = filename:join(Dir,lists:concat([?MODULE,"_",Func,".config"])),
+ ok = file:write_file(SysConfig, io_lib:format("[{kernel,[{logger,~p}]}].",[Var])),
+ Pa = filename:dirname(code:which(?MODULE)),
+ Name = lists:concat([?MODULE,"_",Func]),
+ Args = lists:concat([" -config ",filename:rootname(SysConfig)," -pa ",Pa]),
+ ct:pal("Starting ~p with ~tp", [Name,Args]),
+ %% Start a node which prints kernel logs to the destination specified by Type
+ {ok,Node} = test_server:start_node(Name, peer, [{args, Args}]),
+ HProcName =
+ case Type of
+ file -> ?name_to_reg_name(logger_std_h,?STANDARD_HANDLER);
+ disk_log -> ?name_to_reg_name(logger_disk_log_h,?STANDARD_HANDLER)
+ end,
+ Pid = rpc:call(Node,erlang,whereis,[HProcName]),
+ ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter,
+ {?MODULE,self()}]),
+ ok = log_on_remote_node(Node,"dummy1"),
+ ?check("dummy1"),
+ {ok,Bin1} = sync_and_read(Node,Type,Log),
+ <<"dummy1\n">> = binary:part(Bin1,{byte_size(Bin1),-7}),
+
+ %% Kill the logger_std_h process
+ exit(Pid, kill),
+
+ %% Wait a bit, then check that it is gone
+ timer:sleep(2000),
+ undefined = rpc:call(Node,erlang,whereis,[HProcName]),
+
+ %% Check that file is not empty
+ {ok,Bin2} = sync_and_read(Node,Type,Log),
+ <<"dummy1\n">> = binary:part(Bin2,{byte_size(Bin2),-7}),
+ ok.
+
+%% Can not use rpc:call here, since the code would execute on a
+%% process with group_leader on this (the calling) node, and thus
+%% logger would send the log event to the logger process here instead
+%% of logging it itself.
+log_on_remote_node(Node,Msg) ->
+ Pid = self(),
+ _ = spawn_link(Node,
+ fun() -> erlang:group_leader(whereis(user),self()),
+ logger:notice(Msg),
+ Pid ! done
+ end),
+ receive done -> ok end,
+ ok.
+
+
+crash_std_h(cleanup) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+sync_and_read(Node,disk_log,Log) ->
+ rpc:call(Node,logger_disk_log_h,filesync,[?STANDARD_HANDLER]),
+ case file:read_file(Log ++ ".1") of
+ {ok,<<>>} ->
+ timer:sleep(5000),
+ file:read_file(Log ++ ".1");
+ Ok ->
+ Ok
+ end;
+sync_and_read(Node,file,Log) ->
+ rpc:call(Node,logger_std_h,filesync,[?STANDARD_HANDLER]),
+ case file:read_file(Log) of
+ {ok,<<>>} ->
+ timer:sleep(5000),
+ file:read_file(Log);
+ Ok ->
+ Ok
+ end.
+
+bad_input(_Config) ->
+ {error,{badarg,{filesync,["BadType"]}}} = logger_std_h:filesync("BadType").
+
+reconfig(Config) ->
+ Dir = ?config(priv_dir,Config),
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => standard_io},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ #{%id := ?MODULE,
+ cb_state:=#{handler_state := #{type := standard_io,
+ file_ctrl_pid := FileCtrlPid},
+ filesync_repeat_interval := no_repeat},
+ sync_mode_qlen := ?SYNC_MODE_QLEN,
+ drop_mode_qlen := ?DROP_MODE_QLEN,
+ flush_qlen := ?FLUSH_QLEN,
+ burst_limit_enable := ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable := ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER} =
+ logger_olp:info(h_proc_name()),
+
+ {ok,
+ #{config:=
+ #{type := standard_io,
+ sync_mode_qlen := ?SYNC_MODE_QLEN,
+ drop_mode_qlen := ?DROP_MODE_QLEN,
+ flush_qlen := ?FLUSH_QLEN,
+ burst_limit_enable := ?BURST_LIMIT_ENABLE,
+ burst_limit_max_count := ?BURST_LIMIT_MAX_COUNT,
+ burst_limit_window_time := ?BURST_LIMIT_WINDOW_TIME,
+ overload_kill_enable := ?OVERLOAD_KILL_ENABLE,
+ overload_kill_qlen := ?OVERLOAD_KILL_QLEN,
+ overload_kill_mem_size := ?OVERLOAD_KILL_MEM_SIZE,
+ overload_kill_restart_after := ?OVERLOAD_KILL_RESTART_AFTER,
+ filesync_repeat_interval := no_repeat} =
+ DefaultHConf}}
+ = logger:get_handler_config(?MODULE),
+
+ ok = logger:set_handler_config(?MODULE, config,
+ #{sync_mode_qlen => 1,
+ drop_mode_qlen => 2,
+ flush_qlen => 3,
+ burst_limit_enable => false,
+ burst_limit_max_count => 10,
+ burst_limit_window_time => 10,
+ overload_kill_enable => true,
+ overload_kill_qlen => 100000,
+ overload_kill_mem_size => 10000000,
+ overload_kill_restart_after => infinity,
+ filesync_repeat_interval => 5000}),
+ #{%id := ?MODULE,
+ cb_state := #{handler_state := #{type := standard_io,
+ file_ctrl_pid := FileCtrlPid},
+ filesync_repeat_interval := no_repeat},
+ sync_mode_qlen := 1,
+ drop_mode_qlen := 2,
+ flush_qlen := 3,
+ burst_limit_enable := false,
+ burst_limit_max_count := 10,
+ burst_limit_window_time := 10,
+ overload_kill_enable := true,
+ overload_kill_qlen := 100000,
+ overload_kill_mem_size := 10000000,
+ overload_kill_restart_after := infinity} = logger_olp:info(h_proc_name()),
+
+ {ok,#{config :=
+ #{type := standard_io,
+ sync_mode_qlen := 1,
+ drop_mode_qlen := 2,
+ flush_qlen := 3,
+ burst_limit_enable := false,
+ burst_limit_max_count := 10,
+ burst_limit_window_time := 10,
+ overload_kill_enable := true,
+ overload_kill_qlen := 100000,
+ overload_kill_mem_size := 10000000,
+ overload_kill_restart_after := infinity,
+ filesync_repeat_interval := no_repeat} = HConf}} =
+ logger:get_handler_config(?MODULE),
+
+ ok = logger:update_handler_config(?MODULE, config,
+ #{flush_qlen => ?FLUSH_QLEN}),
+ {ok,#{config:=C1}} = logger:get_handler_config(?MODULE),
+ ct:log("C1: ~p",[C1]),
+ C1 = HConf#{flush_qlen => ?FLUSH_QLEN},
+
+ ok = logger:set_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C2}} = logger:get_handler_config(?MODULE),
+ ct:log("C2: ~p",[C2]),
+ C2 = DefaultHConf#{sync_mode_qlen => 1},
+
+ ok = logger:set_handler_config(?MODULE, config, #{drop_mode_qlen => 100}),
+ {ok,#{config:=C3}} = logger:get_handler_config(?MODULE),
+ ct:log("C3: ~p",[C3]),
+ C3 = DefaultHConf#{drop_mode_qlen => 100},
+
+ ok = logger:update_handler_config(?MODULE, config, #{sync_mode_qlen => 1}),
+ {ok,#{config:=C4}} = logger:get_handler_config(?MODULE),
+ ct:log("C4: ~p",[C4]),
+ C4 = DefaultHConf#{sync_mode_qlen => 1,
+ drop_mode_qlen => 100},
+
+ ok = logger:remove_handler(?MODULE),
+
+ File = filename:join(Dir,lists:concat([?FUNCTION_NAME,".log"])),
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => {file,File}},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+
+ {ok,#{config:=#{filesync_repeat_interval:=FSI}=FileHConfig}} =
+ logger:get_handler_config(?MODULE),
+ ok = logger:update_handler_config(?MODULE,config,
+ #{filesync_repeat_interval=>FSI+2000}),
+ {ok,#{config:=C5}} = logger:get_handler_config(?MODULE),
+ ct:log("C5: ~p",[C5]),
+ C5 = FileHConfig#{filesync_repeat_interval=>FSI+2000},
+
+ %% You are not allowed to actively set 'type' in runtime, since
+ %% this is a write once field.
+ {error, {illegal_config_change,logger_std_h,_,_}} =
+ logger:set_handler_config(?MODULE,config,#{type=>standard_io}),
+ {ok,#{config:=C6}} = logger:get_handler_config(?MODULE),
+ ct:log("C6: ~p",[C6]),
+ C6 = C5,
+
+ %% ... but if you don't specify 'type', then set_handler_config shall
+ %% NOT reset it to its default value
+ ok = logger:set_handler_config(?MODULE,config,#{sync_mode_qlen=>1}),
+ {ok,#{config:=C7}} = logger:get_handler_config(?MODULE),
+ ct:log("C7: ~p",[C7]),
+ C7 = FileHConfig#{sync_mode_qlen=>1},
+ ok.
+
+reconfig(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+
+file_opts(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir, lists:concat([?FUNCTION_NAME,".log"])),
+ MissingOpts = [raw],
+ Type1 = {file,Log,MissingOpts},
+ ok = logger:add_handler(?MODULE, logger_std_h,
+ #{config => #{type => Type1}}),
+ {ok,#{config:=#{type:=file,file:=Log,modes:=Modes1}}} =
+ logger:get_handler_config(?MODULE),
+ [append,delayed_write,raw] = lists:sort(Modes1),
+ ok = logger:remove_handler(?MODULE),
+
+ OkFileOpts = [raw,append],
+ OkType = {file,Log,OkFileOpts},
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => OkType}, % old format
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+
+ ModOpts = [delayed_write|OkFileOpts],
+ #{cb_state := #{handler_state := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} =
+ logger_olp:info(h_proc_name()),
+ {ok,#{config := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => file,
+ file => Log,
+ modes => OkFileOpts}, % new format
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+
+ #{cb_state := #{handler_state := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} =
+ logger_olp:info(h_proc_name()),
+ {ok,#{config := #{type:=file,
+ file:=Log,
+ modes:=ModOpts}}} =
+ logger:get_handler_config(?MODULE),
+ logger:notice(M1=?msg,?domain),
+ ?check(M1),
+ B1 = ?bin(M1),
+ try_read_file(Log, {ok,B1}, filesync_rep_int()),
+ ok.
+file_opts(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+relative_file_path(_Config) ->
+ {ok,Dir} = file:get_cwd(),
+ AbsName1 = filename:join(Dir,?MODULE),
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type=>file},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ #{cb_state := #{handler_state := #{file:=AbsName1}}} =
+ logger_olp:info(h_proc_name()),
+ {ok,#{config := #{file:=AbsName1}}} =
+ logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+
+ RelName2 = filename:join(atom_to_list(?FUNCTION_NAME),
+ lists:concat([?FUNCTION_NAME,".log"])),
+ AbsName2 = filename:join(Dir,RelName2),
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{file => RelName2},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ #{cb_state := #{handler_state := #{file:=AbsName2}}} =
+ logger_olp:info(h_proc_name()),
+ {ok,#{config := #{file:=AbsName2}}} =
+ logger:get_handler_config(?MODULE),
+ logger:notice(M1=?msg,?domain),
+ ?check(M1),
+ B1 = ?bin(M1),
+ try_read_file(AbsName2, {ok,B1}, filesync_rep_int()),
+
+ ok = file:set_cwd(".."),
+ logger:notice(M2=?msg,?domain),
+ ?check(M2),
+ B20 = ?bin(M2),
+ B2 = <<B1/binary,B20/binary>>,
+ try_read_file(AbsName2, {ok,B2}, filesync_rep_int()),
+
+ {error,_} = logger:update_handler_config(?MODULE,config,#{file=>RelName2}),
+ ok = logger:update_handler_config(?MODULE,config,#{file=>AbsName2}),
+ ok = file:set_cwd(Dir),
+ ok = logger:update_handler_config(?MODULE,config,#{file=>RelName2}),
+ ok.
+relative_file_path(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+
+sync(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir, lists:concat([?FUNCTION_NAME,".log"])),
+ Type = {file,Log},
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{config => #{type => Type,
+ file_check => 10000},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,nl}}),
+
+ %% check repeated filesync happens
+ start_tracer([{logger_std_h, write_to_dev, 2},
+ {file, datasync, 1}],
+ [{logger_std_h, write_to_dev, <<"first\n">>},
+ {file,datasync}]),
+
+ logger:notice("first", ?domain),
+ %% wait for automatic filesync
+ check_tracer(filesync_rep_int()*2),
+
+ %% check that explicit filesync is only done once
+ start_tracer([{logger_std_h, write_to_dev, 2},
+ {file, datasync, 1}],
+ [{logger_std_h, write_to_dev, <<"second\n">>},
+ {file,datasync},
+ {no_more,500}
+ ]),
+ logger:notice("second", ?domain),
+ %% do explicit sync
+ logger_std_h:filesync(?MODULE),
+ %% a second sync should be ignored
+ logger_std_h:filesync(?MODULE),
+ check_tracer(100),
+
+ %% check that if there's no repeated filesync active,
+ %% a filesync is still performed when handler goes idle
+ ok = logger:update_handler_config(?MODULE, config,
+ #{filesync_repeat_interval => no_repeat}),
+ no_repeat = maps:get(filesync_repeat_interval,
+ maps:get(cb_state, logger_olp:info(h_proc_name()))),
+ start_tracer([{logger_std_h, write_to_dev, 2},
+ {file, datasync, 1}],
+ [{logger_std_h, write_to_dev, <<"third\n">>},
+ {file,datasync},
+ {logger_std_h, write_to_dev, <<"fourth\n">>},
+ {file,datasync}]),
+ logger:notice("third", ?domain),
+ %% wait for automatic filesync
+ timer:sleep(?IDLE_DETECT_TIME*2),
+ logger:notice("fourth", ?domain),
+ %% wait for automatic filesync
+ check_tracer(?IDLE_DETECT_TIME*2),
+
+ %% switch repeated filesync on and verify that the looping works
+ SyncInt = 1000,
+ WaitT = 4500,
+ OneSync = {logger_h_common,handle_cast,repeated_filesync},
+ %% receive 1 repeated_filesync per sec
+ start_tracer([{{logger_h_common,handle_cast,2},
+ [{[repeated_filesync,'_'],[],[]}]}],
+ [OneSync || _ <- lists:seq(1, trunc(WaitT/SyncInt))]),
+
+ ok = logger:update_handler_config(?MODULE, config,
+ #{filesync_repeat_interval => SyncInt}),
+ SyncInt = maps:get(filesync_repeat_interval,
+ maps:get(cb_state,logger_olp:info(h_proc_name()))),
+ timer:sleep(WaitT),
+ ok = logger:update_handler_config(?MODULE, config,
+ #{filesync_repeat_interval => no_repeat}),
+ check_tracer(100),
+ ok.
+sync(cleanup, _Config) ->
+ dbg:stop_clear(),
+ logger:remove_handler(?MODULE).
+
+write_failure(Config) ->
+ Dir = ?config(priv_dir, Config),
+ File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]),
+ Log = filename:join(Dir, File),
+ Node = start_std_h_on_new_node(Config, Log),
+ false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
+ rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
+ rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
+ rpc:call(Node, ?MODULE, set_result, [file_write,ok]),
+
+ ok = log_on_remote_node(Node, "Logged1"),
+ rpc:call(Node, logger_std_h, filesync, [?STANDARD_HANDLER]),
+ ?check_no_log,
+ try_read_file(Log, {ok,<<"Logged1\n">>}, filesync_rep_int()),
+
+ rpc:call(Node, ?MODULE, set_result, [file_write,{error,terminated}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+
+ ?check({error,{?STANDARD_HANDLER,write,Log,{error,terminated}}}),
+
+ ok = log_on_remote_node(Node, "No second error printout"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result, [file_write,{error,eacces}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+ ?check({error,{?STANDARD_HANDLER,write,Log,{error,eacces}}}),
+
+ rpc:call(Node, ?MODULE, set_result, [file_write,ok]),
+ ok = log_on_remote_node(Node, "Logged2"),
+ rpc:call(Node, logger_std_h, filesync, [?STANDARD_HANDLER]),
+ ?check_no_log,
+ try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, filesync_rep_int()),
+ ok.
+write_failure(cleanup, _Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+sync_failure(Config) ->
+ Dir = ?config(priv_dir, Config),
+ File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]),
+ Log = filename:join(Dir, File),
+ Node = start_std_h_on_new_node(Config, Log),
+ false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
+ rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
+ rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
+ rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]),
+
+ SyncInt = 500,
+ ok = rpc:call(Node, logger, update_handler_config,
+ [?STANDARD_HANDLER, config,
+ #{filesync_repeat_interval => SyncInt}]),
+
+ ok = log_on_remote_node(Node, "Logged1"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result, [file_datasync,{error,terminated}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+
+ ?check({error,{?STANDARD_HANDLER,filesync,Log,{error,terminated}}}),
+
+ ok = log_on_remote_node(Node, "No second error printout"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result, [file_datasync,{error,eacces}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+ ?check({error,{?STANDARD_HANDLER,filesync,Log,{error,eacces}}}),
+
+ rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]),
+ ok = log_on_remote_node(Node, "Logged2"),
+ ?check_no_log,
+ ok.
+sync_failure(cleanup, _Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+start_std_h_on_new_node(Config, Log) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(
+ Config,
+ [{logger,[{handler,default,logger_std_h,
+ #{ config => #{ type => {file,Log}}}}]}]),
+ ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter,
+ {?MODULE,nl}]),
+ Node.
+
+%% functions for test hook macros to be called by rpc
+set_internal_log(_Mod, _Func) ->
+ ?set_internal_log({_Mod,_Func}).
+set_result(_Op, _Result) ->
+ ?set_result(_Op, _Result).
+set_defaults() ->
+ ?set_defaults().
+
+%% internal log function that sends the term to the test case process
+internal_log(Type, Term) ->
+ [{tester,Tester}] = ets:lookup(?TEST_HOOKS_TAB, tester),
+ Tester ! {log,{Type,Term}},
+ logger:internal_log(Type, Term),
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% Overload protection tests
+
+op_switch_to_sync_file(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NumOfReqs = 500,
+ NewHConfig =
+ HConfig#{config => StdHConfig#{sync_mode_qlen => 2,
+ drop_mode_qlen => NumOfReqs+1,
+ flush_qlen => 2*NumOfReqs,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ %% TRecvPid = start_op_trace(),
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Lines = count_lines(Log),
+ %% true = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(async,Events) end),
+ %% true = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(sync,Events) end),
+ %% true = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_switch(async,sync,Events) end),
+ %% false = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(drop,Events) end),
+ %% false = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(flush,Events) end),
+ %% stop_op_trace(TRecvPid),
+ NumOfReqs = Lines,
+ ok = file_delete(Log),
+ ok.
+op_switch_to_sync_file(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_sync_tty(Config) ->
+ {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config),
+ NumOfReqs = 500,
+ NewHConfig =
+ HConfig#{config => StdHConfig#{sync_mode_qlen => 3,
+ drop_mode_qlen => NumOfReqs+1,
+ flush_qlen => 2*NumOfReqs,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ ok.
+op_switch_to_sync_tty(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_drop_file() ->
+ [{timetrap,{seconds,180}}].
+op_switch_to_drop_file(Config) ->
+ Test =
+ fun() ->
+ {Log,HConfig,StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NumOfReqs = 300,
+ Procs = 2,
+ Bursts = 10,
+ NewHConfig =
+ HConfig#{config =>
+ StdHConfig#{sync_mode_qlen => 1,
+ drop_mode_qlen => 2,
+ flush_qlen =>
+ Procs*NumOfReqs*Bursts,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ %% It sometimes happens that the handler gets the
+ %% requests in a slow enough pace so that dropping
+ %% never occurs. Therefore, lets generate a number of
+ %% bursts to increase the chance of message buildup.
+ [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice) ||
+ _ <- lists:seq(1, Bursts)],
+ Logged = count_lines(Log),
+ ok = stop_handler(?MODULE),
+ ct:pal("Number of messages dropped = ~w (~w)",
+ [Procs*NumOfReqs*Bursts-Logged,Procs*NumOfReqs*Bursts]),
+ true = (Logged < (Procs*NumOfReqs*Bursts)),
+ true = (Logged > 0),
+ _ = file_delete(Log),
+ ok
+ end,
+ %% As it's tricky to get the timing right in only one go, we perform the
+ %% test repeatedly, hoping that will generate a successful result.
+ case repeat_until_ok(Test, 10) of
+ {ok,{Failures,_Result}} ->
+ ct:log("Failed ~w times before success!", [Failures]);
+ {fails,Reason} ->
+ ct:fail(Reason)
+ end.
+op_switch_to_drop_file(cleanup, _Config) ->
+ _ = stop_handler(?MODULE).
+
+op_switch_to_drop_tty(Config) ->
+ {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config),
+ NumOfReqs = 300,
+ Procs = 2,
+ NewHConfig =
+ HConfig#{config => StdHConfig#{sync_mode_qlen => 1,
+ drop_mode_qlen => 2,
+ flush_qlen =>
+ Procs*NumOfReqs+1,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
+ ok.
+op_switch_to_drop_tty(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_flush_file() ->
+ [{timetrap,{minutes,5}}].
+op_switch_to_flush_file(Config) ->
+ Test =
+ fun() ->
+ {Log,HConfig,StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+
+ %% NOTE: it's important that both async and sync
+ %% requests have been queued when the flush happens
+ %% (verify with coverage of flush_log_requests/2)
+
+ NewHConfig =
+ HConfig#{config =>
+ StdHConfig#{sync_mode_qlen => 2,
+ %% disable drop mode
+ drop_mode_qlen => 300,
+ flush_qlen => 300,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 1500,
+ Procs = 10,
+ Bursts = 10,
+ %% It sometimes happens that the handler either gets
+ %% the requests in a slow enough pace so that flushing
+ %% never occurs, or it gets all messages at once,
+ %% causing all messages to get flushed (no dropping of
+ %% sync messages gets tested). Therefore, lets
+ %% generate a number of bursts to increase the chance
+ %% of message buildup in some random fashion.
+ [send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice) ||
+ _ <- lists:seq(1,Bursts)],
+ Logged = count_lines(Log),
+ ok = stop_handler(?MODULE),
+ ct:pal("Number of messages flushed/dropped = ~w (~w)",
+ [NumOfReqs*Procs*Bursts-Logged,NumOfReqs*Procs*Bursts]),
+ true = (Logged < (NumOfReqs*Procs*Bursts)),
+ true = (Logged > 0),
+ _ = file_delete(Log),
+ ok
+ end,
+ %% As it's tricky to get the timing right in only one go, we perform the
+ %% test repeatedly, hoping that will generate a successful result.
+ case repeat_until_ok(Test, 10) of
+ {ok,{Failures,_Result}} ->
+ ct:log("Failed ~w times before success!", [Failures]);
+ {fails,Reason} ->
+ ct:fail(Reason)
+ end.
+op_switch_to_flush_file(cleanup, _Config) ->
+ _ = stop_handler(?MODULE).
+
+op_switch_to_flush_tty() ->
+ [{timetrap,{minutes,5}}].
+op_switch_to_flush_tty(Config) ->
+ {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config),
+
+ %% it's important that both async and sync requests have been queued
+ %% when the flush happens (verify with coverage of flush_log_requests/2)
+
+ NewHConfig =
+ HConfig#{config => StdHConfig#{sync_mode_qlen => 2,
+ %% disable drop mode
+ drop_mode_qlen => 100,
+ flush_qlen => 100,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 1000,
+ Procs = 100,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
+ ok.
+op_switch_to_flush_tty(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_disabled(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{config => StdHConfig#{burst_limit_enable => false,
+ burst_limit_max_count => 10,
+ burst_limit_window_time => 2000,
+ drop_mode_qlen => 200,
+ flush_qlen => 300}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ NumOfReqs = Logged,
+ ok = file_delete(Log),
+ ok.
+limit_burst_disabled(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_enabled_one(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ReqLimit = 10,
+ NewHConfig =
+ HConfig#{config => StdHConfig#{burst_limit_enable => true,
+ burst_limit_max_count => ReqLimit,
+ burst_limit_window_time => 2000,
+ drop_mode_qlen => 200,
+ flush_qlen => 300}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ReqLimit = Logged,
+ ok = file_delete(Log),
+ ok.
+limit_burst_enabled_one(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_enabled_period(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ReqLimit = 10,
+ BurstTWin = 1000,
+ NewHConfig =
+ HConfig#{config => StdHConfig#{burst_limit_enable => true,
+ burst_limit_max_count => ReqLimit,
+ burst_limit_window_time => BurstTWin,
+ drop_mode_qlen => 20000,
+ flush_qlen => 20001}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+
+ Windows = 3,
+ Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w",
+ [Sent,Logged]),
+ true = (Logged > (ReqLimit*Windows)) andalso
+ (Logged < (ReqLimit*(Windows+2))),
+ ok = file_delete(Log),
+ ok.
+limit_burst_enabled_period(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+kill_disabled(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{config=>StdHConfig#{overload_kill_enable=>false,
+ overload_kill_qlen=>10,
+ overload_kill_mem_size=>100}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ok = file_delete(Log),
+ true = is_pid(whereis(h_proc_name())),
+ ok.
+kill_disabled(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+qlen_kill_new(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ Pid0 = whereis(h_proc_name()),
+ {_,Mem0} = process_info(Pid0, memory),
+ RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER,
+ NewHConfig =
+ HConfig#{config=>StdHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>10,
+ overload_kill_mem_size=>Mem0+50000,
+ overload_kill_restart_after=>RestartAfter}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ MRef = erlang:monitor(process, Pid0),
+ NumOfReqs = 100,
+ Procs = 4,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
+ %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ receive
+ {'DOWN', MRef, _, _, Info} ->
+ case Info of
+ {shutdown,{overloaded,QLen,Mem}} ->
+ ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
+ killed ->
+ ct:pal("Slow shutdown, handler process was killed!", [])
+ end,
+ file_delete(Log),
+ {ok,_} = wait_for_process_up(RestartAfter * 3),
+ ok
+ after
+ 5000 ->
+ Info = logger_olp:info(h_proc_name()),
+ ct:pal("Handler state = ~p", [Info]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end.
+qlen_kill_new(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+%% choke the standard handler on remote node to verify the termination
+%% works as expected
+qlen_kill_std(_Config) ->
+ %%! HERE
+ %% Dir = ?config(priv_dir, Config),
+ %% File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]),
+ %% Log = filename:join(Dir, File),
+ %% Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log),
+ %% ok = rpc:call(Node, logger, update_handler_config,
+ %% [?STANDARD_HANDLER, config,
+ %% #{overload_kill_enable=>true,
+ %% overload_kill_qlen=>10,
+ %% overload_kill_mem_size=>100000}]),
+ {skip,"Not done yet"}.
+
+mem_kill_new(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ Pid0 = whereis(h_proc_name()),
+ {_,Mem0} = process_info(Pid0, memory),
+ RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER,
+ NewHConfig =
+ HConfig#{config=>StdHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>50000,
+ overload_kill_mem_size=>Mem0+500,
+ overload_kill_restart_after=>RestartAfter}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ MRef = erlang:monitor(process, Pid0),
+ NumOfReqs = 100,
+ Procs = 4,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, notice),
+ %% send_burst({n,NumOfReqs}, seq, {chars,79}, notice),
+ receive
+ {'DOWN', MRef, _, _, Info} ->
+ case Info of
+ {shutdown,{overloaded,QLen,Mem}} ->
+ ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
+ killed ->
+ ct:pal("Slow shutdown, handler process was killed!", [])
+ end,
+ file_delete(Log),
+ {ok,_} = wait_for_process_up(RestartAfter * 3),
+ ok
+ after
+ 5000 ->
+ Info = logger_olp:info(h_proc_name()),
+ ct:pal("Handler state = ~p", [Info]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end.
+mem_kill_new(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+%% choke the standard handler on remote node to verify the termination
+%% works as expected
+mem_kill_std(_Config) ->
+ {skip,"Not done yet"}.
+
+restart_after() ->
+ [{timetrap,{minutes,2}}].
+restart_after(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig1 =
+ HConfig#{config=>StdHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>10,
+ overload_kill_restart_after=>infinity}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig1),
+ MRef1 = erlang:monitor(process, whereis(h_proc_name())),
+ %% kill handler
+ send_burst({n,100}, {spawn,4,0}, {chars,79}, notice),
+ receive
+ {'DOWN', MRef1, _, _, _Reason1} ->
+ file_delete(Log),
+ error = wait_for_process_up(?OVERLOAD_KILL_RESTART_AFTER * 3),
+ ok
+ after
+ 5000 ->
+ Info1 = logger_olp:info(h_proc_name()),
+ ct:pal("Handler state = ~p", [Info1]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end,
+
+ {Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ RestartAfter = ?OVERLOAD_KILL_RESTART_AFTER,
+
+ NewHConfig2 =
+ HConfig#{config=>StdHConfig#{overload_kill_enable=>true,
+ overload_kill_qlen=>10,
+ overload_kill_restart_after=>RestartAfter}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig2),
+ Pid0 = whereis(h_proc_name()),
+ MRef2 = erlang:monitor(process, Pid0),
+ %% kill handler
+ send_burst({n,100}, {spawn,4,0}, {chars,79}, notice),
+ receive
+ {'DOWN', MRef2, _, _, _Reason2} ->
+ file_delete(Log),
+ {ok,Pid1} = wait_for_process_up(RestartAfter * 3),
+ false = (Pid1 == Pid0),
+ ok
+ after
+ 5000 ->
+ Info2 = logger_olp:info(h_proc_name()),
+ ct:pal("Handler state = ~p", [Info2]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end,
+ ok.
+restart_after(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+%% send handler requests (sync, info, reset, change_config)
+%% during high load to verify that sync, dropping and flushing is
+%% handled correctly.
+handler_requests_under_load() ->
+ [{timetrap,{minutes,3}}].
+handler_requests_under_load(Config) ->
+ {Log,HConfig,StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{config => StdHConfig#{sync_mode_qlen => 2,
+ drop_mode_qlen => 1000,
+ flush_qlen => 2000,
+ burst_limit_enable => false}},
+ ok = logger:update_handler_config(?MODULE, NewHConfig),
+ Pid = spawn_link(
+ fun() -> send_requests(1,[{logger_std_h,filesync,[?MODULE],[]},
+ {logger_olp,info,[h_proc_name()],[]},
+ {logger_olp,reset,[h_proc_name()],[]},
+ {logger,update_handler_config,
+ [?MODULE, config,
+ #{overload_kill_enable => false}],
+ []}])
+ end),
+ Sent = send_burst({t,10000}, seq, {chars,79}, notice),
+ Pid ! {self(),finish},
+ ReqResult = receive {Pid,Result} -> Result end,
+ Logged = count_lines(Log),
+ ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w",
+ [Sent,Logged]),
+ FindError = fun(Res) ->
+ [E || E <- Res,
+ is_tuple(E) andalso (element(1,E) == error)]
+ end,
+ Errors = [{Func,FindError(Res)} || {_,Func,_,Res} <- ReqResult],
+ NoOfReqs = lists:foldl(fun({_,_,_,Res}, N) -> N + length(Res) end,
+ 0, ReqResult),
+ ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]),
+ ok = file_delete(Log).
+handler_requests_under_load(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+recreate_deleted_log(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ logger:notice("first",?domain),
+ logger_std_h:filesync(?MODULE),
+ ok = file:rename(Log,Log++".old"),
+ logger:notice("second",?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,<<"first\n">>} = file:read_file(Log++".old"),
+ {ok,<<"second\n">>} = file:read_file(Log),
+ ok.
+recreate_deleted_log(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+reopen_changed_log(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ logger:notice("first",?domain),
+ logger_std_h:filesync(?MODULE),
+ ok = file:rename(Log,Log++".old"),
+ ok = file:write_file(Log,""),
+ logger:notice("second",?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,<<"first\n">>} = file:read_file(Log++".old"),
+ {ok,<<"second\n">>} = file:read_file(Log),
+ ok.
+reopen_changed_log(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+rotate_size(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,#{config=>#{max_no_bytes=>1000,
+ max_no_files=>2}}),
+
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+
+ logger:notice(Str,?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,51)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+
+ logger:notice("bbbb",?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=1005}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+
+ ok.
+rotate_size(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotate_size_compressed(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,
+ #{config=>#{max_no_bytes=>1000,
+ max_no_files=>2,
+ compress_on_rotate=>true}}),
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {error,enoent} = file:read_file_info(Log++".0.gz"),
+
+ logger:notice(Str,?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {error,enoent} = file:read_file_info(Log++".1.gz"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,51)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".1.gz"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+ {error,enoent} = file:read_file_info(Log++".2.gz"),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,50)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=1000}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".1.gz"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+ {error,enoent} = file:read_file_info(Log++".2.gz"),
+
+ logger:notice("bbbb",?domain),
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {error,enoent} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=38}} = file:read_file_info(Log++".0.gz"),
+ {error,enoent} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=35}} = file:read_file_info(Log++".1.gz"),
+ {error,enoent} = file:read_file_info(Log++".2"),
+ {error,enoent} = file:read_file_info(Log++".2.gz"),
+
+ ok.
+rotate_size_compressed(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotate_size_reopen(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,#{config=>#{max_no_bytes=>1000,
+ max_no_files=>2}}),
+
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,40)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=800}} = file:read_file_info(Log),
+
+ {ok,HConfig} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(?MODULE,maps:get(module,HConfig),HConfig),
+ {ok,#file_info{size=800}} = file:read_file_info(Log),
+
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,40)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=580}} = file:read_file_info(Log),
+ {ok,#file_info{size=1020}} = file:read_file_info(Log++".0"),
+ ok.
+rotate_size_reopen(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotation_opts(Config) ->
+ {Log,_HConfig,StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ #{max_no_bytes:=infinity,
+ max_no_files:=0,
+ compress_on_rotate:=false} = StdHConfig,
+
+ %% Test bad rotation config
+ {error,{invalid_config,_,_}} =
+ logger:update_handler_config(?MODULE,config,#{max_no_bytes=>0}),
+ {error,{invalid_config,_,_}} =
+ logger:update_handler_config(?MODULE,config,#{max_no_files=>infinity}),
+ {error,{invalid_config,_,_}} =
+ logger:update_handler_config(?MODULE,config,
+ #{compress_on_rotate=>undefined}),
+
+
+ %% Test good rotation config - start with no rotation
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=200}} = file:read_file_info(Log),
+ [] = filelib:wildcard(Log++".*"),
+
+ %% Turn on rotation, check that existing file is rotated since its
+ %% size exceeds max_no_bytes
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>100,
+ max_no_files=>2}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ Log0 = Log++".0",
+ {ok,#file_info{size=200}} = file:read_file_info(Log0),
+ [Log0] = filelib:wildcard(Log++".*"),
+
+ %% Fill all logs
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,13)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log0),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Extend size and count and check that nothing changes with existing files
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>200,
+ max_no_files=>3}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log0),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Add more log events and see that extended size and count works
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=220}} = file:read_file_info(Log0),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".2"),
+ [_,_,_] = filelib:wildcard(Log++".*"),
+
+ %% Reduce count and check that archive files that exceed the new
+ %% count are moved
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_files=>1}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=220}} = file:read_file_info(Log0),
+ [Log0] = filelib:wildcard(Log++".*"),
+
+ %% Extend size and count again, and turn on compression. Check
+ %% that archives are compressed
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>100,
+ max_no_files=>2,
+ compress_on_rotate=>true}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ Log0gz = Log0++".gz",
+ {ok,#file_info{size=29}} = file:read_file_info(Log0gz),
+ [Log0gz] = filelib:wildcard(Log++".*"),
+
+ %% Fill all logs
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,13)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=29}} = file:read_file_info(Log0gz),
+ {ok,#file_info{size=29}} = file:read_file_info(Log++".1.gz"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Reduce count and turn off compression. Check that archives that
+ %% exceeds the new count are removed, and the rest are
+ %% uncompressed.
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_files=>1,
+ compress_on_rotate=>false}),
+ timer:sleep(100), % give some time to execute config_changed
+ {ok,#file_info{size=20}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log0),
+ [Log0] = filelib:wildcard(Log++".*"),
+
+ %% Check that config and handler state agree on the current rotation settings
+ {ok,#{config:=#{max_no_bytes:=100,
+ max_no_files:=1,
+ compress_on_rotate:=false}}} =
+ logger:get_handler_config(?MODULE),
+ #{cb_state:=#{handler_state:=#{max_no_bytes:=100,
+ max_no_files:=1,
+ compress_on_rotate:=false}}} =
+ logger_olp:info(h_proc_name()),
+ ok.
+rotation_opts(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+rotation_opts_restart_handler(Config) ->
+ {Log,_HConfig,_StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ok = logger:update_handler_config(?MODULE,
+ config,
+ #{max_no_bytes=>100,
+ max_no_files=>2}),
+
+ %% Fill all logs
+ Str = lists:duplicate(19,$a),
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,15)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=60}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".1"),
+ [_,_] = filelib:wildcard(Log++".*"),
+
+ %% Stop/start handler and turn off rotation. Check that archives are removed.
+ {ok,#{config:=StdHConfig1}=HConfig1} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig1#{config=>StdHConfig1#{max_no_bytes=>infinity}}),
+ timer:sleep(100),
+ {ok,#file_info{size=60}} = file:read_file_info(Log),
+ [] = filelib:wildcard(Log++".*"),
+
+ %% Add some log events and check that file is no longer rotated.
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=260}} = file:read_file_info(Log),
+ [] = filelib:wildcard(Log++".*"),
+
+ %% Stop/start handler and trun on rotation. Check that file is rotated.
+ {ok,#{config:=StdHConfig2}=HConfig2} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig2#{config=>StdHConfig2#{max_no_bytes=>100,
+ max_no_files=>2}}),
+ timer:sleep(100),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=260}} = file:read_file_info(Log++".0"),
+ [_] = filelib:wildcard(Log++".*"),
+
+ %% Fill all logs
+ [logger:notice(Str,?domain) || _ <- lists:seq(1,10)],
+ logger_std_h:filesync(?MODULE),
+ {ok,#file_info{size=80}} = file:read_file_info(Log),
+ {ok,#file_info{size=120}} = file:read_file_info(Log++".0"),
+ {ok,#file_info{size=260}} = file:read_file_info(Log++".1"),
+
+ %% Stop/start handler, reduce count and turn on compression. Check
+ %% that excess archives are removed, and the rest compressed.
+ {ok,#{config:=StdHConfig3}=HConfig3} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig3#{config=>StdHConfig3#{max_no_bytes=>75,
+ max_no_files=>1,
+ compress_on_rotate=>true}}),
+ timer:sleep(100),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=29}} = file:read_file_info(Log++".0.gz"),
+ [_] = filelib:wildcard(Log++".*"),
+
+ %% Stop/start handler and turn off compression. Check that achives
+ %% are decompressed.
+ {ok,#{config:=StdHConfig4}=HConfig4} = logger:get_handler_config(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ ok = logger:add_handler(
+ ?MODULE,logger_std_h,
+ HConfig4#{config=>StdHConfig4#{compress_on_rotate=>false}}),
+ timer:sleep(100),
+ {ok,#file_info{size=0}} = file:read_file_info(Log),
+ {ok,#file_info{size=80}} = file:read_file_info(Log++".0"),
+ [_] = filelib:wildcard(Log++".*"),
+
+ ok.
+rotation_opts_restart_handler(cleanup,_Config) ->
+ ok = stop_handler(?MODULE).
+
+%%%-----------------------------------------------------------------
+%%%
+send_requests(TO, Reqs = [{Mod,Func,Args,Res}|Rs]) ->
+ receive
+ {From,finish} ->
+ From ! {self(),Reqs}
+ after
+ TO ->
+ Result = apply(Mod,Func,Args),
+ send_requests(TO, Rs ++ [{Mod,Func,Args,[Result|Res]}])
+ end.
+
+
+%%%-----------------------------------------------------------------
+%%%
+start_handler(Name, TTY, _Config) when TTY == standard_io;
+ TTY == standard_error->
+ ok = logger:add_handler(Name,
+ logger_std_h,
+ #{config => #{type => TTY},
+ filter_default=>stop,
+ filters=>filter_only_this_domain(Name),
+ formatter=>{?MODULE,op}}),
+ {ok,HConfig = #{config := StdHConfig}} = logger:get_handler_config(Name),
+ {HConfig,StdHConfig};
+
+start_handler(Name, FuncName, Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir, lists:concat([FuncName,".log"])),
+ ct:pal("Logging to ~tp", [Log]),
+ Type = {file,Log},
+ _ = file_delete(Log),
+ ok = logger:add_handler(Name,
+ logger_std_h,
+ #{config => #{type => Type},
+ filter_default=>stop,
+ filters=>filter_only_this_domain(Name),
+ formatter=>{?MODULE,op}}),
+ {ok,HConfig = #{config := StdHConfig}} = logger:get_handler_config(Name),
+ {Log,HConfig,StdHConfig}.
+
+filter_only_this_domain(Name) ->
+ [{remote_gl,{fun logger_filters:remote_gl/2,stop}},
+ {domain,{fun logger_filters:domain/2,{log,super,[Name]}}}].
+
+
+stop_handler(Name) ->
+ R = logger:remove_handler(Name),
+ ct:pal("Handler ~p stopped! Result: ~p", [Name,R]),
+ R.
+
+count_lines(File) ->
+ wait_until_written(File, -1),
+ count_lines1(File).
+
+wait_until_written(File, Sz) ->
+ timer:sleep(2000),
+ case file:read_file_info(File) of
+ {ok,#file_info{size = Sz}} ->
+ timer:sleep(1000),
+ case file:read_file_info(File) of
+ {ok,#file_info{size = Sz}} ->
+ ok;
+ {ok,#file_info{size = Sz1}} ->
+ wait_until_written(File, Sz1)
+ end;
+ {ok,#file_info{size = Sz1}} ->
+ wait_until_written(File, Sz1)
+ end.
+
+count_lines1(File) ->
+ {_,Dev} = file:open(File, [read]),
+ Lines = count_lines2(Dev, 0),
+ file:close(Dev),
+ Lines.
+
+count_lines2(Dev, LC) ->
+ case file:read_line(Dev) of
+ {ok,"Handler logger_std_h_SUITE " ++_} ->
+ %% Not counting handler info
+ count_lines2(Dev,LC);
+ {ok,_} ->
+ count_lines2(Dev,LC+1);
+ eof -> LC
+ end.
+
+send_burst(NorT, Type, {chars,Sz}, Class) ->
+ Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,Sz)],
+ case NorT of
+ {n,N} ->
+ %% process_flag(priority, high),
+ send_n_burst(N, Type, Text, Class),
+ %% process_flag(priority, normal),
+ N;
+ {t,T} ->
+ ct:pal("Sending messages sequentially for ~w ms", [T]),
+ T0 = erlang:monotonic_time(millisecond),
+ send_t_burst(T0, T, Text, Class, 0)
+ end.
+
+send_n_burst(0, _, _Text, _Class) ->
+ ok;
+send_n_burst(N, seq, Text, Class) ->
+ ok = logger:Class(Text, ?domain),
+ send_n_burst(N-1, seq, Text, Class);
+send_n_burst(N, {spawn,Ps,TO}, Text, Class) ->
+ ct:pal("~w processes each sending ~w messages", [Ps,N]),
+ MRefs = [begin if TO == 0 -> ok; true -> timer:sleep(TO) end,
+ monitor(process,spawn_link(per_proc_fun(N,Text,Class,X)))
+ end || X <- lists:seq(1,Ps)],
+ lists:foreach(fun(MRef) ->
+ receive
+ {'DOWN', MRef, _, _, _} ->
+ ok
+ end
+ end, MRefs),
+ ct:pal("Message burst sent", []),
+ ok.
+
+send_t_burst(T0, T, Text, Class, N) ->
+ T1 = erlang:monotonic_time(millisecond),
+ if (T1-T0) > T ->
+ N;
+ true ->
+ ok = logger:Class(Text, ?domain),
+ send_t_burst(T0, T, Text, Class, N+1)
+ end.
+
+per_proc_fun(N,Text,Class,X) when X rem 2 == 0 ->
+ fun() ->
+ process_flag(priority,high),
+ send_n_burst(N, seq, Text, Class)
+ end;
+per_proc_fun(N,Text,Class,_) ->
+ fun() ->
+ send_n_burst(N, seq, Text, Class)
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Formatter callback
+%%% Using this to send the formatted string back to the test case
+%%% process - so it can check for logged events.
+format(_,bad_return) ->
+ bad_return;
+format(_,crash) ->
+ erlang:error(formatter_crashed);
+format(#{msg:={string,String0}},no_nl) ->
+ String = unicode:characters_to_list(String0),
+ String;
+format(#{msg:={string,String0}},nl) ->
+ String = unicode:characters_to_list(String0),
+ String++"\n";
+format(#{msg:={string,String0}},op) ->
+ String = unicode:characters_to_list(String0),
+ String++"\n";
+format(#{msg:={report,#{label:={supervisor,progress}}}},op) ->
+ "";
+format(#{msg:={report,#{label:={gen_server,terminate}}}},op) ->
+ "";
+format(#{msg:={report,#{label:={proc_lib,crash}}}},op) ->
+ "";
+format(#{msg:={F,A}},OpOrPid) when is_list(F), is_list(A) ->
+ String = lists:flatten(io_lib:format(F,A)),
+ if is_pid(OpOrPid) -> OpOrPid ! {log,String};
+ true -> ok
+ end,
+ String++"\n";
+format(#{msg:={string,String0}},Pid) ->
+ String = unicode:characters_to_list(String0),
+ Pid ! {log,String},
+ String++"\n".
+
+add_remove_instance_nofile(Type) ->
+ ok = logger:add_handler(?MODULE,logger_std_h,
+ #{config => #{type => Type},
+ filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ Pid = whereis(h_proc_name()),
+ true = is_pid(Pid),
+ group_leader(group_leader(),Pid), % to get printouts in test log
+ logger:notice(M1=?msg,?domain),
+ ?check(M1),
+ %% check that sync doesn't do damage even if not relevant
+ ok = logger_std_h:filesync(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ timer:sleep(500),
+ undefined = whereis(h_proc_name()),
+ logger:notice(?msg,?domain),
+ ?check_no_log,
+ ok.
+
+logger_std_h_remove() ->
+ logger:remove_handler(?MODULE).
+logger_std_h_remove(Id) ->
+ logger:remove_handler(Id).
+
+try_read_file(FileName, Expected, Time) when Time > 0 ->
+ case file:read_file(FileName) of
+ Expected ->
+ ok;
+ Error = {error,_Reason} ->
+ ct:pal("Can't read ~tp: ~tp", [FileName,Error]),
+ erlang:error(Error);
+ Got ->
+ ct:pal("try_read_file got ~tp", [Got]),
+ timer:sleep(500),
+ try_read_file(FileName, Expected, Time-500)
+ end;
+try_read_file(FileName, Expected, _) ->
+ ct:pal("Missing pattern ~tp in ~tp", [Expected,FileName]),
+ erlang:error({error,missing_expected_pattern}).
+
+try_match_file(FileName, Pattern, Time) ->
+ try_match_file(FileName, Pattern, Time, <<>>).
+
+try_match_file(FileName, Pattern, Time, _) when Time > 0 ->
+ case file:read_file(FileName) of
+ {ok, Bin} ->
+ case re:run(Bin,Pattern,[{capture,none}]) of
+ match ->
+ unicode:characters_to_list(Bin);
+ _ ->
+ timer:sleep(100),
+ try_match_file(FileName, Pattern, Time-100, Bin)
+ end;
+ Error ->
+ erlang:error(Error)
+ end;
+try_match_file(_,Pattern,_,Incorrect) ->
+ ct:pal("try_match_file did not match pattern: ~p~nGot: ~p~n",
+ [Pattern,Incorrect]),
+ erlang:error({error,not_matching_pattern,Pattern,Incorrect}).
+
+repeat_until_ok(Fun, N) ->
+ repeat_until_ok(Fun, 0, N, undefined).
+
+repeat_until_ok(_Fun, Stop, Stop, Reason) ->
+ {fails,Reason};
+
+repeat_until_ok(Fun, C, Stop, FirstReason) ->
+ if C > 0 -> timer:sleep(5000);
+ true -> ok
+ end,
+ try Fun() of
+ Result ->
+ {ok,{C,Result}}
+ catch
+ _:Reason:Stack ->
+ ct:pal("Test fails: ~p (~p)~n", [Reason,hd(Stack)]),
+ if FirstReason == undefined ->
+ repeat_until_ok(Fun, C+1, Stop, {Reason,Stack});
+ true ->
+ repeat_until_ok(Fun, C+1, Stop, FirstReason)
+ end
+ end.
+
+
+%%%-----------------------------------------------------------------
+%%%
+start_op_trace() ->
+ TraceFun = fun({trace,_,call,{_Mod,Func,Details}}, Pid) ->
+ Pid ! {trace_call,Func,Details},
+ Pid;
+ ({trace,_,return_from,{_Mod,Func,_},RetVal}, Pid) ->
+ Pid ! {trace_return,Func,RetVal},
+ Pid
+ end,
+ TRecvPid = spawn_link(fun() -> trace_receiver(5000) end),
+ {ok,_} = dbg:tracer(process, {TraceFun, TRecvPid}),
+
+ {ok,_} = dbg:p(whereis(h_proc_name()), [c]),
+ {ok,_} = dbg:p(self(), [c]),
+
+ MS1 = dbg:fun2ms(fun([_]) -> return_trace() end),
+ {ok,_} = dbg:tpl(logger_h_common, check_load, 1, MS1),
+
+ {ok,_} = dbg:tpl(logger_h_common, flush_log_requests, 2, []),
+
+ MS2 = dbg:fun2ms(fun([_,mode]) -> return_trace() end),
+ {ok,_} = dbg:tpl(ets, lookup, 2, MS2),
+
+ ct:pal("Tracing started!", []),
+ TRecvPid.
+
+stop_op_trace(TRecvPid) ->
+ dbg:stop_clear(),
+ unlink(TRecvPid),
+ exit(TRecvPid, kill),
+ ok.
+
+find_mode(flush, Events) ->
+ lists:any(fun({trace_call,flush_log_requests,[_,_]}) -> true;
+ (_) -> false
+ end, Events);
+find_mode(Mode, Events) ->
+ lists:keymember([{mode,Mode}], 3, Events).
+
+%% find_switch(_From, To, Events) ->
+%% try lists:foldl(fun({trace_return,check_load,{To,_,_,_}},
+%% {trace_call,check_load,[#{mode := From}]}) ->
+%% throw(match);
+%% (Event, _) ->
+%% Event
+%% end, undefined, Events) of
+%% _ -> false
+%% catch
+%% throw:match -> true
+%% end.
+
+analyse_trace(TRecvPid, TestFun) ->
+ TRecvPid ! {test,self(),TestFun},
+ receive
+ {result,TRecvPid,Result} ->
+ Result
+ after
+ 60000 ->
+ fails
+ end.
+
+trace_receiver(IdleT) ->
+ Msgs = receive_until_idle(IdleT, 5, []),
+ ct:pal("~w trace events generated", [length(Msgs)]),
+ analyse(Msgs).
+
+receive_until_idle(IdleT, WaitN, Msgs) ->
+ receive
+ Msg = {trace_call,_,_} ->
+ receive_until_idle(IdleT, 5, [Msg | Msgs]);
+ Msg = {trace_return,_,_} ->
+ receive_until_idle(IdleT, 5, [Msg | Msgs])
+ after
+ IdleT ->
+ if WaitN == 0 ->
+ Msgs;
+ true ->
+ receive_until_idle(IdleT, WaitN-1, Msgs)
+ end
+ end.
+
+analyse(Msgs) ->
+ receive
+ {test,From,TestFun} ->
+ From ! {result,self(),TestFun(Msgs)},
+ analyse(Msgs)
+ end.
+
+start_tracer(Trace,Expected) ->
+ Pid = self(),
+ FileCtrlPid = maps:get(file_ctrl_pid,
+ maps:get(handler_state,
+ maps:get(cb_state,
+ logger_olp:info(h_proc_name())))),
+ dbg:tracer(process,{fun tracer/2,{Pid,Expected}}),
+ dbg:p(whereis(h_proc_name()),[c]),
+ dbg:p(FileCtrlPid,[c]),
+ tpl(Trace),
+ ok.
+
+tpl([{M,F,A}|Trace]) ->
+ tpl([{{M,F,A},[]}|Trace]);
+tpl([{{M,F,A},MS}|Trace]) ->
+ {ok,Match} = dbg:tpl(M,F,A,MS),
+ case lists:keyfind(matched,1,Match) of
+ {_,_,1} ->
+ ok;
+ _ ->
+ dbg:stop_clear(),
+ throw({skip,"Can't trace "++atom_to_list(M)++":"++
+ atom_to_list(F)++"/"++integer_to_list(A)})
+ end,
+ tpl(Trace);
+tpl([]) ->
+ ok.
+
+tracer({trace,_,call,{logger_h_common,handle_cast,[Op|_]}},
+ {Pid,[{Mod,Func,Op}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func,Op});
+tracer({trace,_,call,{Mod=logger_std_h,Func=write_to_dev,[Data,_]}},
+ {Pid,[{Mod,Func,Data}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func,Data});
+tracer({trace,_,call,{Mod,Func,_}}, {Pid,[{Mod,Func}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func});
+tracer({trace,_,call,Call}, {Pid,Expected}) ->
+ ct:log("Tracer got unexpected: ~p~nExpected: ~p~n",[Call,Expected]),
+ Pid ! {tracer_got_unexpected,Call,Expected},
+ {Pid,Expected}.
+
+maybe_tracer_done(Pid,[]=Expected,Got) ->
+ ct:log("Tracer got: ~p~n",[Got]),
+ Pid ! {tracer_done,0},
+ {Pid,Expected};
+maybe_tracer_done(Pid,[{no_more,T}]=Expected,Got) ->
+ ct:log("Tracer got: ~p~n",[Got]),
+ Pid ! {tracer_done,T},
+ {Pid,Expected};
+maybe_tracer_done(Pid,Expected,Got) ->
+ ct:log("Tracer got: ~p~n",[Got]),
+ {Pid,Expected}.
+
+check_tracer(T) ->
+ check_tracer(T,fun() -> ct:fail({timeout,tracer}) end).
+check_tracer(T,TimeoutFun) ->
+ receive
+ {tracer_done,Delay} ->
+ %% Possibly wait Delay ms to check that no unexpected
+ %% traces are received
+ check_tracer(Delay,fun() -> ok end);
+ {tracer_got_unexpected,Got,Expected} ->
+ dbg:stop_clear(),
+ ct:fail({tracer_got_unexpected,Got,Expected})
+ after T ->
+ dbg:stop_clear(),
+ TimeoutFun()
+ end.
+
+escape([$+|Rest]) ->
+ [$\\,$+|escape(Rest)];
+escape([H|T]) ->
+ [H|escape(T)];
+escape([]) ->
+ [].
+
+h_proc_name() ->
+ h_proc_name(?MODULE).
+h_proc_name(Name) ->
+ ?name_to_reg_name(logger_std_h,Name).
+
+wait_for_process_up(T) ->
+ wait_for_process_up(?MODULE, h_proc_name(), T).
+
+wait_for_process_up(Name, RegName, T) ->
+ N = (T div 500) + 1,
+ wait_for_process_up1(Name, RegName, N).
+
+wait_for_process_up1(_Name, _RegName, 0) ->
+ error;
+wait_for_process_up1(Name, RegName, N) ->
+ timer:sleep(500),
+ case whereis(RegName) of
+ Pid when is_pid(Pid) ->
+ case logger:get_handler_config(Name) of
+ {ok,_} ->
+ %% ct:pal("Process ~p up (~p tries left)",[Name,N]),
+ {ok,Pid};
+ _ ->
+ wait_for_process_up1(Name, RegName, N-1)
+ end;
+ undefined ->
+ %% ct:pal("Waiting for process ~p (~p tries left)",[Name,N]),
+ wait_for_process_up1(Name, RegName, N-1)
+ end.
+
+filesync_rep_int() ->
+ case (fun() -> is_atom(?FILESYNC_REPEAT_INTERVAL) end)() of
+ true -> 5500;
+ false -> ?FILESYNC_REPEAT_INTERVAL + 500
+ end.
+
+
+file_delete(Log) ->
+ file:delete(Log).
diff --git a/lib/kernel/test/logger_stress_SUITE.erl b/lib/kernel/test/logger_stress_SUITE.erl
new file mode 100644
index 0000000000..1a278fb1b2
--- /dev/null
+++ b/lib/kernel/test/logger_stress_SUITE.erl
@@ -0,0 +1,550 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_stress_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct_event.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_h_common.hrl").
+
+-ifdef(SAVE_STATS).
+ -define(COLLECT_STATS(_All_,_Procs_),
+ ct:pal("~p",[stats(_All_,_Procs_)])).
+-else.
+ -define(COLLECT_STATS(_All_,_Procs__), ok).
+-endif.
+
+-define(TEST_DURATION,120). % seconds
+
+suite() ->
+ [{timetrap,{minutes,3}},
+ {ct_hooks,[logger_test_lib]}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [allow_events,
+ reject_events,
+ std_handler,
+ disk_log_handler,
+ std_handler_time,
+ std_handler_time_big,
+ disk_log_handler_time,
+ disk_log_handler_time_big,
+ emulator_events,
+ remote_events,
+ remote_to_disk_log,
+ remote_emulator_events,
+ remote_emulator_to_disk_log].
+
+%%%-----------------------------------------------------------------
+%%% Test cases
+%%%-----------------------------------------------------------------
+%% Time from log macro call to handler callback
+allow_events(Config) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(Config,
+ [{logger,
+ [{handler,default,?MODULE,#{}}]},
+ {logger_level,notice}]),
+ N = 100000,
+ {T,_} = timer:tc(fun() -> rpc:call(Node,?MODULE,nlogs,[N]) end),
+ IOPS = N * 1000/T, % log events allowed per millisecond
+ ct_event:notify(#event{name = benchmark_data,
+ data = [{value,IOPS}]}),
+ {comment,io_lib:format("~.2f accepted events pr millisecond",
+ [IOPS])}.
+
+%% Time from log macro call to reject (log level)
+reject_events(Config) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(Config,
+ [{logger,
+ [{handler,default,?MODULE,#{}}]},
+ {logger_level,error}]),
+ N = 1000000,
+ {T,_} = timer:tc(fun() -> rpc:call(Node,?MODULE,nlogs,[N]) end),
+ IOPS = N * 1000/T, % log events rejected per millisecond
+ ct_event:notify(#event{name = benchmark_data,
+ data = [{value,IOPS}]}),
+ {comment,io_lib:format("~.2f rejected events pr millisecond",
+ [IOPS])}.
+
+%% Cascading failure that produce gen_server and proc_lib reports -
+%% how many of the produced log events are actually written to a log
+%% with logger_std_h file handler.
+std_handler(Config) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(Config,
+ [{logger,
+ [{handler,default,logger_std_h,
+ #{config=>#{type=>{file,"default.log"}}}}]}]),
+
+ cascade({Node,{logger_backend,log_allowed,2},[]},
+ {Node,{logger_std_h,write,4},[{default,logger_std_h_default}]},
+ fun otp_cascading/0).
+std_handler(cleanup,_Config) ->
+ _ = file:delete("default.log"),
+ ok.
+
+%% Disable overload protection and just print a lot - measure time.
+%% The IOPS reported is the number of log events written per millisecond.
+std_handler_time(Config) ->
+ measure_handler_time(logger_std_h,#{type=>{file,"default.log"}},Config).
+std_handler_time(cleanup,_Config) ->
+ _ = file:delete("default.log"),
+ ok.
+
+std_handler_time_big(Config) ->
+ measure_handler_time_big(logger_std_h,#{type=>{file,"default.log"}},Config).
+std_handler_time_big(cleanup,_Config) ->
+ _ = file:delete("default.log"),
+ ok.
+
+%% Cascading failure that produce gen_server and proc_lib reports -
+%% how many of the produced log events are actually written to a log
+%% with logger_disk_log_h wrap file handler.
+disk_log_handler(Config) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(Config,
+ [{logger,
+ [{handler,default,logger_disk_log_h,#{}}]}]),
+ cascade({Node,{logger_backend,log_allowed,2},[]},
+ {Node,{logger_disk_log_h,write,4},
+ [{default,logger_disk_log_h_default}]},
+ fun otp_cascading/0).
+disk_log_handler(cleanup,_Config) ->
+ Files = filelib:wildcard("default.log.*"),
+ [_ = file:delete(F) || F <- Files],
+ ok.
+
+%% Disable overload protection and just print a lot - measure time.
+%% The IOPS reported is the number of log events written per millisecond.
+disk_log_handler_time(Config) ->
+ measure_handler_time(logger_disk_log_h,#{type=>halt},Config).
+disk_log_handler_time(cleanup,_Config) ->
+ _ = file:delete("default"),
+ ok.
+
+disk_log_handler_time_big(Config) ->
+ measure_handler_time_big(logger_disk_log_h,#{type=>halt},Config).
+disk_log_handler_time_big(cleanup,_Config) ->
+ _ = file:delete("default"),
+ ok.
+
+%% Cascading failure that produce log events from the emulator - how
+%% many of the produced log events pass through the proxy.
+emulator_events(Config) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(Config,
+ [{logger,
+ [{handler,default,?MODULE,#{}}]}]),
+ cascade({Node,{?MODULE,producer,0},[]},
+ {Node,{?MODULE,log,2},[{proxy,logger_proxy}]},
+ fun em_cascading/0).
+
+%% Cascading failure that produce gen_server and proc_lib reports on
+%% remote node - how many of the produced log events pass through the
+%% proxy.
+remote_events(Config) ->
+ {ok,_,Node1} =
+ logger_test_lib:setup([{postfix,1}|Config],
+ [{logger,
+ [{handler,default,?MODULE,#{}}]}]),
+ {ok,_,Node2} =
+ logger_test_lib:setup([{postfix,2}|Config],[]),
+ cascade({Node2,{logger_backend,log_allowed,2},[{remote_proxy,logger_proxy}]},
+ {Node1,{?MODULE,log,2},[{local_proxy,logger_proxy}]},
+ fun otp_cascading/0).
+
+%% Cascading failure that produce gen_server and proc_lib reports on
+%% remote node - how many of the produced log events are actually
+%% written to a log with logger_disk_log_h wrap file handler.
+remote_to_disk_log(Config) ->
+ {ok,_,Node1} =
+ logger_test_lib:setup([{postfix,1}|Config],
+ [{logger,
+ [{handler,default,logger_disk_log_h,#{}}]}]),
+ {ok,_,Node2} =
+ logger_test_lib:setup([{postfix,2}|Config],[]),
+ cascade({Node2,{logger_backend,log_allowed,2},[{remote_proxy,logger_proxy}]},
+ {Node1,{logger_disk_log_h,write,4},
+ [{local_proxy,logger_proxy},
+ {local_default,logger_disk_log_h_default}]},
+ fun otp_cascading/0).
+remote_to_disk_log(cleanup,_Config) ->
+ Files = filelib:wildcard("default.log.*"),
+ [_ = file:delete(F) || F <- Files],
+ ok.
+
+%% Cascading failure that produce log events from the emulator on
+%% remote node - how many of the produced log events pass through the
+%% proxy.
+remote_emulator_events(Config) ->
+ {ok,_,Node1} =
+ logger_test_lib:setup([{postfix,1}|Config],
+ [{logger,
+ [{handler,default,?MODULE,#{}}]}]),
+ {ok,_,Node2} =
+ logger_test_lib:setup([{postfix,2}|Config],[]),
+ cascade({Node2,{?MODULE,producer,0},[{remote_proxy,logger_proxy}]},
+ {Node1,{?MODULE,log,2},[{local_proxy,logger_proxy}]},
+ fun em_cascading/0).
+
+%% Cascading failure that produce log events from the emulator on
+%% remote node - how many of the produced log events are actually
+%% written to a log with logger_disk_log_h wrap file handler.
+remote_emulator_to_disk_log(Config) ->
+ {ok,_,Node1} =
+ logger_test_lib:setup([{postfix,1}|Config],
+ [{logger,
+ [{handler,default,logger_disk_log_h,#{}}]}]),
+ {ok,_,Node2} =
+ logger_test_lib:setup([{postfix,2}|Config],[]),
+ cascade({Node2,{?MODULE,producer,0},[{remote_proxy,logger_proxy}]},
+ {Node1,{logger_disk_log_h,write,4},
+ [{local_proxy,logger_proxy},
+ {local_default,logger_disk_log_h_default}]},
+ fun em_cascading/0).
+remote_emulator_to_disk_log(cleanup,_Config) ->
+ Files = filelib:wildcard("default.log.*"),
+ [_ = file:delete(F) || F <- Files],
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Internal functions
+measure_handler_time(Module,HCfg,Config) ->
+ measure_handler_time(Module,100000,small_fa(),millisecond,HCfg,#{},Config).
+
+measure_handler_time_big(Module,HCfg,Config) ->
+ FCfg = #{chars_limit=>4096, max_size=>1024},
+ measure_handler_time(Module,100,big_fa(),second,HCfg,FCfg,Config).
+
+measure_handler_time(Module,N,FA,Unit,HCfg,FCfg,Config) ->
+ {ok,_,Node} =
+ logger_test_lib:setup(
+ Config,
+ [{logger,
+ [{handler,default,Module,
+ #{formatter => {logger_formatter,
+ maps:merge(#{legacy_header=>false,
+ single_line=>true},FCfg)},
+ config=>maps:merge(#{sync_mode_qlen => N+1,
+ drop_mode_qlen => N+1,
+ flush_qlen => N+1,
+ burst_limit_enable => false,
+ filesync_repeat_interval => no_repeat},
+ HCfg)}}]}]),
+ %% HPid = rpc:call(Node,erlang,whereis,[?name_to_reg_name(Module,default)]),
+ %% {links,[_,FCPid]} = rpc:call(Node,erlang,process_info,[HPid,links]),
+ T0 = erlang:monotonic_time(millisecond),
+ ok = rpc:call(Node,?MODULE,nlogs_wait,[N,FA,Module]),
+ %% ok = rpc:call(Node,fprof,apply,[fun ?MODULE:nlogs_wait/2,[N div 10,FA,Module],[{procs,[HPid,FCPid]}]]),
+ T1 = erlang:monotonic_time(millisecond),
+ T = T1-T0,
+ M = case Unit of
+ millisecond -> 1;
+ second -> 1000
+ end,
+ IOPS = M*N/T,
+ ct:pal("N: ~p~nT: ~p~nIOPS: ~.2f events pr ~w",[N,T,IOPS,Unit]),
+ %% Stats = rpc:call(Node,logger_olp,info,[?name_to_reg_name(Module,default)]),
+ %% ct:pal("Stats: ~p",[Stats]),
+ ct_event:notify(#event{name = benchmark_data,
+ data = [{value,IOPS}]}),
+ {comment,io_lib:format("~.2f events written pr ~w",[IOPS,Unit])}.
+
+nlogs_wait(N,{F,A},Module) ->
+ group_leader(whereis(user),self()),
+ [?LOG_NOTICE(F,A) || _ <- lists:seq(1,N)],
+ wait(Module).
+
+wait(Module) ->
+ case Module:filesync(default) of
+ {error,handler_busy} ->
+ wait(Module);
+ ok ->
+ ok
+ end.
+
+small_fa() ->
+ Str = "\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "[\\]^_`abcdefghijklmnopqr",
+ {"~ts",[Str]}.
+
+big_fa() ->
+ {"~p",[lists:duplicate(1048576,"a")]}.
+
+nlogs(N) ->
+ group_leader(whereis(user),self()),
+ Str = "\"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+ "[\\]^_`abcdefghijklmnopqr",
+ [?LOG_NOTICE(Str) || _ <- lists:seq(1,N)],
+ ok.
+
+%% cascade(ProducerInfo,ConsumerInfo,TestFun)
+cascade({PNode,PMFA,_PStatProcs},{CNode,CMFA,_CStatProcs},TestFun) ->
+ Tab = ets:new(counter,[set,public]),
+ ets:insert(Tab,{producer,0}),
+ ets:insert(Tab,{consumer,0}),
+ dbg:tracer(process,{fun tracer/2,{Tab,PNode,CNode}}),
+ dbg:n(PNode),
+ dbg:n(CNode),
+ dbg:cn(node()),
+ dbg:p(all,[call,arity]),
+ dbg:tpl(PMFA,[]),
+ dbg:tpl(CMFA,[]),
+
+ Pid = rpc:call(CNode,?MODULE,wrap_test,[PNode,TestFun]),
+ MRef = erlang:monitor(process,Pid),
+ TO = ?TEST_DURATION*1000,
+ receive {'DOWN',MRef,_,_,Reason} ->
+ ct:fail({remote_pid_down,Reason})
+ after TO ->
+ All = ets:lookup_element(Tab,producer,2),
+ Written = ets:lookup_element(Tab,consumer,2),
+ dbg:stop_clear(),
+ ?COLLECT_STATS(All,
+ [{PNode,P,Id} || {Id,P} <- _PStatProcs] ++
+ [{CNode,P,Id} || {Id,P} <- _CStatProcs]),
+ Ratio = Written/All * 100,
+ ct_event:notify(#event{name = benchmark_data,
+ data = [{value,Ratio}]}),
+ {comment,io_lib:format("~p % (~p written, ~p produced)",
+ [round(Ratio),Written,All])}
+ end.
+
+wrap_test(Fun) ->
+ wrap_test(node(),Fun).
+wrap_test(Node,Fun) ->
+ reset(),
+ group_leader(whereis(user),self()),
+ rpc:call(Node,?MODULE,do_fun,[Fun]).
+
+do_fun(Fun) ->
+ reset(),
+ Fun().
+
+reset() ->
+ reset([logger_std_h_default, logger_disk_log_h_default, logger_proxy]).
+reset([P|Ps]) ->
+ is_pid(whereis(P)) andalso logger_olp:reset(P),
+ reset(Ps);
+reset([]) ->
+ ok.
+
+
+tracer({trace,_,call,{?MODULE,producer,_}},{Tab,_PNode,_CNode}=S) ->
+ ets:update_counter(Tab,producer,1),
+ S;
+tracer({trace,Pid,call,{logger_backend,log_allowed,_}},{Tab,PNode,_CNode}=S) when node(Pid)=:=PNode ->
+ ets:update_counter(Tab,producer,1),
+ S;
+tracer({trace,_,call,{?MODULE,log,_}},{Tab,_PNode,_CNode}=S) ->
+ ets:update_counter(Tab,consumer,1),
+ S;
+tracer({trace,_,call,{_,write,_}},{Tab,_PNode,_CNode}=S) ->
+ ets:update_counter(Tab,consumer,1),
+ S;
+tracer(_,S) ->
+ S.
+
+
+%%%-----------------------------------------------------------------
+%%% Collect statistics
+-define(STAT_KEYS,
+ [burst_drops,
+ calls,
+ casts,
+ drops,
+ flushed,
+ flushes,
+ freq,
+ last_qlen,
+ max_qlen,
+ time,
+ writes]).
+-define(EVENT_KEYS,
+ [calls,casts,flushed]).
+
+stats(All,Procs) ->
+ NI = [{Id,rpc:call(N,logger_olp,info,[P])} || {N,P,Id}<-Procs],
+ [{all,All}|[stats(Id,I,All) || {Id,I} <- NI]].
+
+stats(Id,Info,All) ->
+ S = maps:with(?STAT_KEYS,Info),
+ AllOnProc = lists:sum(maps:values(maps:with(?EVENT_KEYS,S))),
+ if All>0 ->
+ Writes = maps:get(writes,S),
+ {_,ActiveTime} = maps:get(time,S),
+ Rate = round(100*Writes/All),
+ RateOnProc =
+ if AllOnProc>0 ->
+ round(100*Writes/AllOnProc);
+ true ->
+ 0
+ end,
+ AvFreq =
+ if ActiveTime>0 ->
+ round(Writes/ActiveTime);
+ true ->
+ 0
+ end,
+ {Id,
+ {stats,S},
+ {rate,Rate},
+ {rate_on_proc,RateOnProc},
+ {av_freq,AvFreq}};
+ true ->
+ {Id,none}
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Spawn a lot of processes that crash repeatedly, causing a lot of
+%%% error reports from the emulator.
+em_cascading() ->
+ spawn(fun() -> super() end).
+
+super() ->
+ process_flag(trap_exit,true),
+ spawn_link(fun server/0),
+ [spawn_link(fun client/0) || _<-lists:seq(1,10000)],
+ super_loop().
+
+super_loop() ->
+ receive
+ {'EXIT',_,server} ->
+ spawn_link(fun server/0),
+ super_loop();
+ {'EXIT',_,_} ->
+ _L = lists:sum(lists:seq(1,10000)),
+ spawn_link(fun client/0),
+ super_loop()
+ end.
+
+client() ->
+ receive
+ after 1 ->
+ case whereis(server) of
+ Pid when is_pid(Pid) ->
+ ok;
+ undefined ->
+ producer(),
+ erlang:error(some_exception)
+ end
+ end,
+ client().
+
+server() ->
+ register(server,self()),
+ receive
+ after 3000 ->
+ exit(server)
+ end.
+
+
+%%%-----------------------------------------------------------------
+%%% Create a supervisor tree with processes that crash repeatedly,
+%%% causing a lot of supervisor reports and crashreports
+otp_cascading() ->
+ {ok,Pid} = supervisor:start_link({local,otp_super}, ?MODULE, [otp_super]),
+ unlink(Pid),
+ Pid.
+
+otp_server_sup() ->
+ supervisor:start_link({local,otp_server_sup},?MODULE,[otp_server_sup]).
+
+otp_client_sup(N) ->
+ supervisor:start_link({local,otp_client_sup},?MODULE,[otp_client_sup,N]).
+
+otp_server() ->
+ gen_server:start_link({local,otp_server},?MODULE,[otp_server],[]).
+
+otp_client() ->
+ gen_server:start_link(?MODULE,[otp_client],[]).
+
+init([otp_super]) ->
+ {ok, {{one_for_one, 200, 10},
+ [{client_sup,
+ {?MODULE, otp_client_sup, [10000]},
+ permanent, 1000, supervisor, [?MODULE]},
+ {server_sup,
+ {?MODULE, otp_server_sup, []},
+ permanent, 1000, supervisor, [?MODULE]}
+ ]}};
+init([otp_server_sup]) ->
+ {ok, {{one_for_one, 2, 10},
+ [{server,
+ {?MODULE, otp_server, []},
+ permanent, 1000, worker, [?MODULE]}
+ ]}};
+init([otp_client_sup,N]) ->
+ spawn(fun() ->
+ [supervisor:start_child(otp_client_sup,[])
+ || _ <- lists:seq(1,N)]
+ end),
+ {ok, {{simple_one_for_one, N*10, 1},
+ [{client,
+ {?MODULE, otp_client, []},
+ permanent, 1000, worker, [?MODULE]}
+ ]}};
+init([otp_server]) ->
+ {ok, server, 10000};
+init([otp_client]) ->
+ {ok, client,1}.
+
+handle_info(timeout, client) ->
+ true = is_pid(whereis(otp_server)),
+ {noreply,client,1};
+handle_info(timeout, server) ->
+ exit(self(), some_error).
+
+%%%-----------------------------------------------------------------
+%%% Logger callbacks
+log(_LogEvent,_Config) ->
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Function to trace on for counting produced emulator messages
+producer() ->
+ ok.
diff --git a/lib/kernel/test/logger_test_lib.erl b/lib/kernel/test/logger_test_lib.erl
new file mode 100644
index 0000000000..be4bc427fb
--- /dev/null
+++ b/lib/kernel/test/logger_test_lib.erl
@@ -0,0 +1,88 @@
+%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_test_lib).
+
+-include_lib("kernel/src/logger_internal.hrl").
+
+-export([setup/2, log/3, sync_and_read/3]).
+
+-export([init/2,
+ pre_init_per_suite/3, pre_init_per_testcase/4,
+ post_end_per_testcase/5, post_end_per_suite/3]).
+
+setup(Config,Vars) ->
+ Postfix = case proplists:get_value(postfix, Config) of
+ undefined -> "";
+ P -> ["_",P]
+ end,
+ FuncStr = lists:concat([proplists:get_value(suite, Config), "_",
+ proplists:get_value(tc, Config)|
+ Postfix]),
+ ConfigFileName = filename:join(proplists:get_value(priv_dir, Config), FuncStr),
+ file:write_file(ConfigFileName ++ ".config", io_lib:format("[{kernel, ~p}].",[Vars])),
+ Sname = lists:concat([proplists:get_value(tc,Config)|Postfix]),
+ case test_server:start_node(Sname, slave,
+ [{args, ["-pa ",filename:dirname(code:which(?MODULE)),
+ " -boot start_sasl -kernel start_timer true "
+ "-config ",ConfigFileName]}]) of
+ {ok, Node} ->
+ L = rpc:call(Node, logger, get_config, []),
+ ct:log("~p",[L]),
+ {ok, L, Node};
+ {error, Reason} ->
+ ct:log("Failed to start node: ~p",[Reason]),
+ error
+ end.
+
+log(Node, F, A) ->
+ log(Node, logger, F, A).
+log(Node, M, F, A) ->
+ MD = #{ gl => rpc:call(Node, erlang, whereis, [logger]) },
+ rpc:call(Node, M, F, A ++ [MD]).
+
+sync_and_read(Node,disk_log,Log) ->
+ rpc:call(Node,logger_disk_log_h,filesync,[?STANDARD_HANDLER]),
+ file:read_file(Log ++ ".1");
+sync_and_read(Node, file,Log) ->
+ ok = rpc:call(Node,logger_std_h,filesync,[?STANDARD_HANDLER]),
+ file:read_file(Log).
+
+
+init(_, _) ->
+ {ok, []}.
+
+pre_init_per_suite(_Suite, Config, State) ->
+ {[{nodes, nodes()} | Config], State}.
+
+pre_init_per_testcase(Suite, TC, Config, State) ->
+ cleanup(Config),
+ {[{suite, Suite}, {tc, TC} | Config], State}.
+
+post_end_per_testcase(_, _TC, Config, Res, State) ->
+ cleanup(Config),
+ {Res, State}.
+
+post_end_per_suite(_, Config, State) ->
+ cleanup(Config),
+ {Config, State}.
+
+cleanup(Config) ->
+ [test_server:stop_node(N) || N <- nodes(),
+ not lists:member(N, proplists:get_value(nodes, Config))].
diff --git a/lib/kernel/test/multi_load_SUITE.erl b/lib/kernel/test/multi_load_SUITE.erl
index 369e25ac64..f3258ea520 100644
--- a/lib/kernel/test/multi_load_SUITE.erl
+++ b/lib/kernel/test/multi_load_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -144,14 +144,14 @@ prep_magic([H|T]) ->
prep_magic(Tuple) when is_tuple(Tuple) ->
L = prep_magic(tuple_to_list(Tuple)),
list_to_tuple(L);
-prep_magic(Bin) when is_binary(Bin) ->
- try erlang:has_prepared_code_on_load(Bin) of
+prep_magic(Ref) when is_reference(Ref) ->
+ try erlang:has_prepared_code_on_load(Ref) of
false ->
- %% Create a different kind of magic binary.
+ %% Create a different kind of magic ref.
ets:match_spec_compile([{'_',[true],['$_']}])
catch
_:_ ->
- Bin
+ Ref
end;
prep_magic(Other) ->
Other.
diff --git a/lib/kernel/test/os_SUITE.erl b/lib/kernel/test/os_SUITE.erl
index e76d6ec482..710b9b115c 100644
--- a/lib/kernel/test/os_SUITE.erl
+++ b/lib/kernel/test/os_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -22,10 +22,12 @@
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
init_per_testcase/2,end_per_testcase/2]).
--export([space_in_cwd/1, quoting/1, cmd_unicode/1, space_in_name/1, bad_command/1,
+-export([space_in_cwd/1, quoting/1, cmd_unicode/1,
+ null_in_command/1, space_in_name/1, bad_command/1,
find_executable/1, unix_comment_in_command/1, deep_list_command/1,
large_output_command/1, background_command/0, background_command/1,
- message_leak/1, close_stdin/0, close_stdin/1, perf_counter_api/1]).
+ message_leak/1, close_stdin/0, close_stdin/1, max_size_command/1,
+ perf_counter_api/1]).
-include_lib("common_test/include/ct.hrl").
@@ -34,10 +36,11 @@ suite() ->
{timetrap,{minutes,1}}].
all() ->
- [space_in_cwd, quoting, cmd_unicode, space_in_name, bad_command,
+ [space_in_cwd, quoting, cmd_unicode, null_in_command,
+ space_in_name, bad_command,
find_executable, unix_comment_in_command, deep_list_command,
large_output_command, background_command, message_leak,
- close_stdin, perf_counter_api].
+ close_stdin, max_size_command, perf_counter_api].
groups() ->
[].
@@ -125,6 +128,14 @@ cmd_unicode(Config) when is_list(Config) ->
[] = receive_all(),
ok.
+null_in_command(Config) ->
+ {Ok, Error} = case os:type() of
+ {win32,_} -> {"dir", "di\0r"};
+ _ -> {"ls", "l\0s"}
+ end,
+ true = is_list(try os:cmd(Ok) catch Class0:_ -> Class0 end),
+ error = try os:cmd(Error) catch Class1:_ -> Class1 end,
+ ok.
%% Test that program with a space in its name can be executed.
space_in_name(Config) when is_list(Config) ->
@@ -216,8 +227,8 @@ find_executable(Config) when is_list(Config) ->
DataDir = proplists:get_value(data_dir, Config),
%% Smoke test.
- case lib:progname() of
- erl ->
+ case ct:get_progname() of
+ "erl" ->
ErlPath = os:find_executable("erl"),
true = is_list(ErlPath),
true = filelib:is_regular(ErlPath);
@@ -312,6 +323,19 @@ close_stdin(Config) ->
"-1" = os:cmd(Fds).
+max_size_command(_Config) ->
+
+ Res20 = os:cmd("cat /dev/zero", #{ max_size => 20 }),
+ 20 = length(Res20),
+
+ Res0 = os:cmd("cat /dev/zero", #{ max_size => 0 }),
+ 0 = length(Res0),
+
+ Res32768 = os:cmd("cat /dev/zero", #{ max_size => 32768 }),
+ 32768 = length(Res32768),
+
+ ResHello = string:trim(os:cmd("echo hello", #{ max_size => 20 })),
+ 5 = length(ResHello).
%% Test that the os:perf_counter api works as expected
perf_counter_api(_Config) ->
@@ -319,31 +343,38 @@ perf_counter_api(_Config) ->
true = is_integer(os:perf_counter()),
true = os:perf_counter() > 0,
- T1 = os:perf_counter(),
+ Conv = fun(T1, T2) ->
+ erlang:convert_time_unit(T2 - T1, perf_counter, nanosecond)
+ end,
+
+ do_perf_counter_test([], Conv, 120000000, 80000000),
+ do_perf_counter_test([1000], fun(T1, T2) -> T2 - T1 end, 120, 80).
+
+do_perf_counter_test(CntArgs, Conv, Upper, Lower) ->
+ %% We run the test multiple times to try to get a somewhat
+ %% stable value... what does this test? That the
+ %% calculate_perf_counter_unit in sys_time.c works somewhat ok.
+ do_perf_counter_test(CntArgs, Conv, Upper, Lower, 10).
+
+do_perf_counter_test(CntArgs, _Conv, Upper, Lower, 0) ->
+ ct:fail("perf_counter_test ~p ~p ~p",[CntArgs, Upper, Lower]);
+do_perf_counter_test(CntArgs, Conv, Upper, Lower, Iters) ->
+
+ T1 = apply(os, perf_counter, CntArgs),
timer:sleep(100),
- T2 = os:perf_counter(),
- TsDiff = erlang:convert_time_unit(T2 - T1, perf_counter, nano_seconds),
- ct:pal("T1: ~p~n"
+ T2 = apply(os, perf_counter, CntArgs),
+ TsDiff = Conv(T1, T2),
+ ct:log("T1: ~p~n"
"T2: ~p~n"
"TsDiff: ~p~n",
[T1,T2,TsDiff]),
- %% We allow a 15% diff
- true = TsDiff < 115000000,
- true = TsDiff > 85000000,
-
- T1Ms = os:perf_counter(1000),
- timer:sleep(100),
- T2Ms = os:perf_counter(1000),
- MsDiff = T2Ms - T1Ms,
- ct:pal("T1Ms: ~p~n"
- "T2Ms: ~p~n"
- "MsDiff: ~p~n",
- [T1Ms,T2Ms,MsDiff]),
-
- %% We allow a 15% diff
- true = MsDiff < 115,
- true = MsDiff > 85.
+ if
+ TsDiff < Upper, TsDiff > Lower ->
+ ok;
+ true ->
+ do_perf_counter_test(CntArgs, Conv, Upper, Lower, Iters-1)
+ end.
%% Util functions
@@ -357,7 +388,7 @@ comp(Expected, Got) ->
ct:fail(failed)
end.
-%% Like lib:nonl/1, but strips \r as well as \n.
+%% strips \n and \r\n from end of string
strip_nl([$\r, $\n]) -> [];
strip_nl([$\n]) -> [];
diff --git a/lib/kernel/test/pdict_SUITE.erl b/lib/kernel/test/pdict_SUITE.erl
index 638d99176e..3685e51c10 100644
--- a/lib/kernel/test/pdict_SUITE.erl
+++ b/lib/kernel/test/pdict_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1999-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1999-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -32,10 +32,14 @@
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
mixed/1,
+ literals/1,
+ destructive/1,
simple/1, complicated/1, heavy/1, simple_all_keys/1, info/1]).
-export([init_per_testcase/2, end_per_testcase/2]).
-export([other_process/2]).
+-export([put_do/2, get_do/1, erase_do/1]).
+
init_per_testcase(_Case, Config) ->
Config.
@@ -48,6 +52,8 @@ suite() ->
all() ->
[simple, complicated, heavy, simple_all_keys, info,
+ literals,
+ destructive,
mixed].
groups() ->
@@ -363,6 +369,36 @@ match_keys(All) ->
ok.
+%% Test destructive put optimization of immed values
+%% does not affect get/0 or process_info.
+destructive(_Config) ->
+ Keys = lists:seq(1,100),
+ [put(Key, 17) || Key <- Keys],
+ Get1 = get(),
+ {dictionary,PI1} = process_info(self(), dictionary),
+
+ [begin
+ {Key, 17} = lists:keyfind(Key, 1, Get1),
+ {Key, 17} = lists:keyfind(Key, 1, PI1)
+ end
+ || Key <- Keys],
+
+ [17 = put(Key, 42) || Key <- Keys], % Mutate
+
+ Get2 = get(),
+ {dictionary,PI2} = process_info(self(), dictionary),
+
+ [begin
+ {Key, 17} = lists:keyfind(Key, 1, Get1),
+ {Key, 17} = lists:keyfind(Key, 1, PI1),
+ {Key, 42} = lists:keyfind(Key, 1, Get2),
+ {Key, 42} = lists:keyfind(Key, 1, PI2)
+
+ end
+ || Key <- Keys],
+
+ ok.
+
%% Do random mixed put/erase to test grow/shrink
%% Written for a temporary bug in gc during shrink
mixed(_Config) ->
@@ -418,3 +454,59 @@ do_mixed([GoalN | _]=Goals, CurrN, Array0, C, Rand0) ->
Array2 = array:resize(CurrN-1, Array1),
do_mixed(Goals, CurrN-1, Array2, C+1, Rand2)
end.
+
+%% Test hash precalculation of literal keys
+literals(_Config) ->
+ %% Put literal -> get variable
+ put(1742, "1742"),
+ "1742" = ?MODULE:get_do(1742),
+ "1742" = ?MODULE:erase_do(1742),
+
+ put(-1742, "-1742"),
+ "-1742" = ?MODULE:get_do(-1742),
+ "-1742" = ?MODULE:erase_do(-1742),
+
+ put([], "NIL"),
+ "NIL" = ?MODULE:get_do([]),
+ "NIL" = ?MODULE:erase_do([]),
+
+ put(<<"binary">>, "binary"),
+ "binary" = ?MODULE:get_do(<<"binary">>),
+ "binary" = ?MODULE:erase_do(<<"binary">>),
+
+ BigBin = <<"A large binary with a lot of bytes to make it go off heap as shared and reference counted">>,
+ put(BigBin, "bigbin"),
+ "bigbin" = ?MODULE:get_do(BigBin),
+ "bigbin" = ?MODULE:erase_do(BigBin),
+
+ %% Put variable -> get literal
+ ?MODULE:put_do(4217, "4217"),
+ "4217" = get(4217),
+ "4217" = erase(4217),
+
+ ?MODULE:put_do(-4217, "-4217"),
+ "-4217" = get(-4217),
+ "-4217" = erase(-4217),
+
+ ?MODULE:put_do([], "NIL"),
+ "NIL" = get([]),
+ "NIL" = erase([]),
+
+ ?MODULE:put_do(<<"bytes">>, "bytes"),
+ "bytes" = get(<<"bytes">>),
+ "bytes" = erase(<<"bytes">>),
+
+ ?MODULE:put_do(BigBin, "BigBin"),
+ "BigBin" = get(BigBin),
+ "BigBin" = erase(BigBin),
+
+ ok.
+
+put_do(K, V) ->
+ put(K, V).
+
+get_do(K) ->
+ get(K).
+
+erase_do(K) ->
+ erase(K).
diff --git a/lib/kernel/test/pg2_SUITE.erl b/lib/kernel/test/pg2_SUITE.erl
index fdc268cb5a..acecd34ead 100644
--- a/lib/kernel/test/pg2_SUITE.erl
+++ b/lib/kernel/test/pg2_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2008-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2008-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -31,7 +31,7 @@
-export([
otp_7277/1, otp_8259/1, otp_8653/1,
- compat/1, basic/1]).
+ basic/1]).
-define(TESTCASE, testcase_name).
-define(testcase, proplists:get_value(?TESTCASE, Config)).
@@ -56,7 +56,7 @@ all() ->
groups() ->
[{tickets, [],
- [otp_7277, otp_8259, otp_8653, compat, basic]}].
+ [otp_7277, otp_8259, otp_8653, basic]}].
init_per_suite(Config) ->
Config.
@@ -218,29 +218,6 @@ loop() ->
exit(normal)
end.
-%% OTP-8259. Check that 'exchange' and 'del_member' work.
-compat(Config) when is_list(Config) ->
- case test_server:is_release_available("r13b") of
- true ->
- Pid = spawn(forever()),
- G = a,
- ok = pg2:create(G),
- ok = pg2:join(G, Pid),
- ok = pg2:join(G, Pid),
- {ok, A} = start_node_rel(r13, r13b, slave),
- pong = net_adm:ping(A),
- wait_for_ready_net(Config),
- {ok, _} = rpc:call(A, pg2, start, []),
- ?UNTIL([Pid,Pid] =:= rpc:call(A, pg2, get_members, [a])),
- true = exit(Pid, kill),
- ?UNTIL([] =:= pg2:get_members(a)),
- ?UNTIL([] =:= rpc:call(A, pg2, get_members, [a])),
- test_server:stop_node(A),
- ok;
- false ->
- {skipped, "No support for old node"}
- end.
-
%% OTP-8259. Some basic tests.
basic(Config) when is_list(Config) ->
_ = [pg2:delete(G) || G <- pg2:which_groups()],
diff --git a/lib/kernel/test/prim_file_SUITE.erl b/lib/kernel/test/prim_file_SUITE.erl
index 8be94f1e57..2f465a15bc 100644
--- a/lib/kernel/test/prim_file_SUITE.erl
+++ b/lib/kernel/test/prim_file_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2000-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2000-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -19,40 +19,25 @@
%%
-module(prim_file_SUITE).
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
- init_per_group/2,end_per_group/2,
- read_write_file/1]).
--export([cur_dir_0a/1, cur_dir_0b/1,
- cur_dir_1a/1, cur_dir_1b/1,
- make_del_dir_a/1, make_del_dir_b/1,
- pos1/1, pos2/1]).
--export([close/1,
- delete_a/1, delete_b/1]).
--export([ open1/1, modes/1]).
--export([
- file_info_basic_file_a/1, file_info_basic_file_b/1,
- file_info_basic_directory_a/1, file_info_basic_directory_b/1,
- file_info_bad_a/1, file_info_bad_b/1,
- file_info_times_a/1, file_info_times_b/1,
- file_write_file_info_a/1, file_write_file_info_b/1,
- file_read_file_info_opts/1, file_write_file_info_opts/1,
- file_write_read_file_info_opts/1
- ]).
--export([rename_a/1, rename_b/1,
- access/1, truncate/1, datasync/1, sync/1,
+ init_per_group/2,end_per_group/2, init_per_testcase/2, end_per_testcase/2,
+ read_write_file/1, free_memory/0]).
+-export([cur_dir_0/1, cur_dir_1/1,
+ make_del_dir/1, pos1/1, pos2/1]).
+-export([close/1, delete/1]).
+-export([open1/1, modes/1]).
+-export([file_info_basic_file/1, file_info_basic_directory/1, file_info_bad/1,
+ file_info_times/1, file_write_file_info/1,
+ file_read_file_info_opts/1, file_write_file_info_opts/1,
+ file_write_read_file_info_opts/1]).
+-export([rename/1, access/1, truncate/1, datasync/1, sync/1,
read_write/1, pread_write/1, append/1, exclusive/1]).
--export([ e_delete/1, e_rename/1, e_make_dir/1, e_del_dir/1]).
+-export([e_delete/1, e_rename/1, e_make_dir/1, e_del_dir/1]).
--export([ read_not_really_compressed/1,
- read_compressed/1, write_compressed/1,
- compress_errors/1]).
-
--export([
- make_link_a/1, make_link_b/1,
- read_link_info_for_non_link/1,
- symlinks_a/1, symlinks_b/1,
- list_dir_limit/1,
- list_dir_error/1,
- list_dir/1]).
+-export([make_link/1, read_link_info_for_non_link/1,
+ symlinks/1,
+ list_dir_limit/1,
+ list_dir_error/1,
+ list_dir/1]).
-export([advise/1]).
-export([large_write/1]).
@@ -67,29 +52,16 @@
-define(PRIM_FILE, prim_file).
-%% Calls ?PRIM_FILE:F with arguments A and an optional handle H
-%% as first argument, unless the handle is [], i.e no handle.
-%% This is a macro to give the compiler and thereby
-%% the cross reference tool the possibility to interprete
-%% the call, since M, F, A (or [H | A]) can all be known at
-%% compile time.
--define(PRIM_FILE_call(F, H, A),
- case H of
- [] -> apply(?PRIM_FILE, F, A);
- _ -> apply(?PRIM_FILE, F, [H | A])
- end).
-
suite() -> [].
all() ->
[read_write_file, {group, dirs}, {group, files},
- delete_a, delete_b, rename_a, rename_b, {group, errors},
- {group, compression}, {group, links}, list_dir_limit, list_dir].
+ delete, rename, {group, errors}, {group, links},
+ list_dir_limit, list_dir].
groups() ->
[{dirs, [],
- [make_del_dir_a, make_del_dir_b, cur_dir_0a, cur_dir_0b,
- cur_dir_1a, cur_dir_1b]},
+ [make_del_dir, cur_dir_0, cur_dir_1]},
{files, [],
[{group, open}, {group, pos}, {group, file_info},
truncate, sync, datasync, advise, large_write, allocate]},
@@ -98,22 +70,26 @@ groups() ->
append, exclusive]},
{pos, [], [pos1, pos2]},
{file_info, [],
- [file_info_basic_file_a, file_info_basic_file_b,
- file_info_basic_directory_a,
- file_info_basic_directory_b, file_info_bad_a,
- file_info_bad_b, file_info_times_a, file_info_times_b,
- file_write_file_info_a, file_write_file_info_b,
- file_read_file_info_opts, file_write_file_info_opts,
- file_write_read_file_info_opts
+ [file_info_basic_file,file_info_basic_directory, file_info_bad,
+ file_info_times, file_write_file_info, file_read_file_info_opts,
+ file_write_file_info_opts, file_write_read_file_info_opts
]},
{errors, [],
[e_delete, e_rename, e_make_dir, e_del_dir]},
- {compression, [],
- [read_compressed, read_not_really_compressed,
- write_compressed, compress_errors]},
{links, [],
- [make_link_a, make_link_b, read_link_info_for_non_link,
- symlinks_a, symlinks_b, list_dir_error]}].
+ [make_link, read_link_info_for_non_link, symlinks, list_dir_error]}].
+
+init_per_testcase(large_write, Config) ->
+ {ok, Started} = application:ensure_all_started(os_mon),
+ [{started, Started}|Config];
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(large_write, Config) ->
+ [application:stop(App) || App <- lists:reverse(proplists:get_value(started, Config))],
+ ok;
+end_per_testcase(_, _Config) ->
+ ok.
init_per_group(_GroupName, Config) ->
Config.
@@ -234,39 +210,27 @@ read_write_file(Config) when is_list(Config) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-
-make_del_dir_a(Config) when is_list(Config) ->
- make_del_dir(Config, [], "_a").
-
-make_del_dir_b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = make_del_dir(Config, Handle, "_b"),
- ok = ?PRIM_FILE:stop(Handle),
- %% Just to make sure the state of the server makes a difference
- {error, einval} = ?PRIM_FILE_call(get_cwd, Handle, []),
- Result.
-
-make_del_dir(Config, Handle, Suffix) ->
+make_del_dir(Config) when is_list(Config) ->
RootDir = proplists:get_value(priv_dir,Config),
NewDir = filename:join(RootDir,
atom_to_list(?MODULE)
- ++"_mk-dir"++Suffix),
- ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]),
- {error, eexist} = ?PRIM_FILE_call(make_dir, Handle, [NewDir]),
- ok = ?PRIM_FILE_call(del_dir, Handle, [NewDir]),
- {error, enoent} = ?PRIM_FILE_call(del_dir, Handle, [NewDir]),
+ ++"_mk-dir"),
+ ok = ?PRIM_FILE:make_dir(NewDir),
+ {error, eexist} = ?PRIM_FILE:make_dir(NewDir),
+ ok = ?PRIM_FILE:del_dir(NewDir),
+ {error, enoent} = ?PRIM_FILE:del_dir(NewDir),
%% Make sure we are not in a directory directly under test_server
%% as that would result in eacces errors when trying to delete '..',
%% because there are processes having that directory as current.
- ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]),
- {ok, CurrentDir} = ?PRIM_FILE_call(get_cwd, Handle, []),
+ ok = ?PRIM_FILE:make_dir(NewDir),
+ {ok, CurrentDir} = ?PRIM_FILE:get_cwd(),
case {os:type(), length(NewDir) >= 260 } of
{{win32,_}, true} ->
io:format("Skip set_cwd for windows path longer than 260 (MAX_PATH)\n", []),
io:format("\nNewDir = ~p\n", [NewDir]);
_ ->
- ok = ?PRIM_FILE_call(set_cwd, Handle, [NewDir])
+ ok = ?PRIM_FILE:set_cwd(NewDir)
end,
try
%% Check that we get an error when trying to create...
@@ -274,14 +238,14 @@ make_del_dir(Config, Handle, Suffix) ->
NewDir2 = filename:join(RootDir,
atom_to_list(?MODULE)
++"_mk-dir-noexist/foo"),
- {error, enoent} = ?PRIM_FILE_call(make_dir, Handle, [NewDir2]),
+ {error, enoent} = ?PRIM_FILE:make_dir(NewDir2),
%% a nameless directory
- {error, enoent} = ?PRIM_FILE_call(make_dir, Handle, [""]),
+ {error, enoent} = ?PRIM_FILE:make_dir(""),
%% a directory with illegal name
- {error, badarg} = ?PRIM_FILE_call(make_dir, Handle, ['mk-dir']),
+ {error, badarg} = ?PRIM_FILE:make_dir('mk-dir'),
%% a directory with illegal name, even if it's a (bad) list
- {error, badarg} = ?PRIM_FILE_call(make_dir, Handle, [[1,2,3,{}]]),
+ {error, badarg} = ?PRIM_FILE:make_dir([1,2,3,{}]),
%% Maybe this isn't an error, exactly, but worth mentioning anyway:
%% ok = ?PRIM_FILE:make_dir([$f,$o,$o,0,$b,$a,$r])),
@@ -294,125 +258,101 @@ make_del_dir(Config, Handle, Suffix) ->
%% Try deleting some bad directories
%% Deleting the parent directory to the current, sounds dangerous, huh?
%% Don't worry ;-) the parent directory should never be empty, right?
- case ?PRIM_FILE_call(del_dir, Handle, [".."]) of
+ case ?PRIM_FILE:del_dir("..") of
{error, eexist} -> ok;
{error, eacces} -> ok; %OpenBSD
{error, einval} -> ok %FreeBSD
end,
- {error, enoent} = ?PRIM_FILE_call(del_dir, Handle, [""]),
- {error, badarg} = ?PRIM_FILE_call(del_dir, Handle, [[3,2,1,{}]])
+ {error, enoent} = ?PRIM_FILE:del_dir(""),
+ {error, badarg} = ?PRIM_FILE:del_dir([3,2,1,{}])
after
- ok = ?PRIM_FILE_call(set_cwd, Handle, [CurrentDir])
+ ok = ?PRIM_FILE:set_cwd(CurrentDir)
end,
ok.
-cur_dir_0a(Config) when is_list(Config) ->
- cur_dir_0(Config, []).
-
-cur_dir_0b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = cur_dir_0(Config, Handle),
- ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-cur_dir_0(Config, Handle) ->
+cur_dir_0(Config) when is_list(Config) ->
%% Find out the current dir, and cd to it ;-)
- {ok,BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, []),
+ {ok,BaseDir} = ?PRIM_FILE:get_cwd(),
Dir1 = BaseDir ++ "", %% Check that it's a string
- ok = ?PRIM_FILE_call(set_cwd, Handle, [Dir1]),
- DirName = atom_to_list(?MODULE) ++
- case Handle of
- [] ->
- "_curdir";
- _ ->
- "_curdir_h"
- end,
+ ok = ?PRIM_FILE:set_cwd(Dir1),
+ DirName = atom_to_list(?MODULE) ++ "_curdir",
%% Make a new dir, and cd to that
RootDir = proplists:get_value(priv_dir,Config),
NewDir = filename:join(RootDir, DirName),
- ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]),
+ ok = ?PRIM_FILE:make_dir(NewDir),
case {os:type(), length(NewDir) >= 260} of
{{win32,_}, true} ->
io:format("Skip set_cwd for windows path longer than 260 (MAX_PATH):\n"),
io:format("\nNewDir = ~p\n", [NewDir]);
_ ->
io:format("cd to ~s",[NewDir]),
- ok = ?PRIM_FILE_call(set_cwd, Handle, [NewDir]),
+ ok = ?PRIM_FILE:set_cwd(NewDir),
%% Create a file in the new current directory, and check that it
%% really is created there
UncommonName = "uncommon.fil",
{ok,Fd} = ?PRIM_FILE:open(UncommonName, [read, write]),
ok = ?PRIM_FILE:close(Fd),
- {ok,NewDirFiles} = ?PRIM_FILE_call(list_dir, Handle, ["."]),
+ {ok,NewDirFiles} = ?PRIM_FILE:list_dir("."),
true = lists:member(UncommonName,NewDirFiles),
%% Delete the directory and return to the old current directory
%% and check that the created file isn't there (too!)
expect({error, einval}, {error, eacces}, {error, eexist},
- ?PRIM_FILE_call(del_dir, Handle, [NewDir])),
- ?PRIM_FILE_call(delete, Handle, [UncommonName]),
- {ok,[]} = ?PRIM_FILE_call(list_dir, Handle, ["."]),
- ok = ?PRIM_FILE_call(set_cwd, Handle, [Dir1]),
+ ?PRIM_FILE:del_dir(NewDir)),
+ ?PRIM_FILE:delete(UncommonName),
+ {ok,[]} = ?PRIM_FILE:list_dir("."),
+ ok = ?PRIM_FILE:set_cwd(Dir1),
io:format("cd back to ~s",[Dir1]),
- ok = ?PRIM_FILE_call(del_dir, Handle, [NewDir]),
- {error, enoent} = ?PRIM_FILE_call(set_cwd, Handle, [NewDir]),
- ok = ?PRIM_FILE_call(set_cwd, Handle, [Dir1]),
+ ok = ?PRIM_FILE:del_dir(NewDir),
+ {error, enoent} = ?PRIM_FILE:set_cwd(NewDir),
+ ok = ?PRIM_FILE:set_cwd(Dir1),
io:format("cd back to ~s",[Dir1]),
- {ok,OldDirFiles} = ?PRIM_FILE_call(list_dir, Handle, ["."]),
+ {ok,OldDirFiles} = ?PRIM_FILE:list_dir("."),
false = lists:member(UncommonName,OldDirFiles)
end,
%% Try doing some bad things
{error, badarg} =
- ?PRIM_FILE_call(set_cwd, Handle, [{foo,bar}]),
+ ?PRIM_FILE:set_cwd({foo,bar}),
{error, enoent} =
- ?PRIM_FILE_call(set_cwd, Handle, [""]),
+ ?PRIM_FILE:set_cwd(""),
{error, enoent} =
- ?PRIM_FILE_call(set_cwd, Handle, [".......a......"]),
+ ?PRIM_FILE:set_cwd(".......a......"),
{ok,BaseDir} =
- ?PRIM_FILE_call(get_cwd, Handle, []), %% Still there?
+ ?PRIM_FILE:get_cwd(), %% Still there?
%% On Windows, there should only be slashes, no backslashes,
%% in the return value of get_cwd().
%% (The test is harmless on Unix, because filenames usually
%% don't contain backslashes.)
- {ok, BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, []),
+ {ok, BaseDir} = ?PRIM_FILE:get_cwd(),
false = lists:member($\\, BaseDir),
ok.
%% Tests ?PRIM_FILE:get_cwd/1.
-cur_dir_1a(Config) when is_list(Config) ->
- cur_dir_1(Config, []).
-
-cur_dir_1b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = cur_dir_1(Config, Handle),
- ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-cur_dir_1(Config, Handle) ->
+cur_dir_1(Config) when is_list(Config) ->
case os:type() of
{win32, _} ->
- win_cur_dir_1(Config, Handle);
+ win_cur_dir_1(Config);
_ ->
{error, enotsup} =
- ?PRIM_FILE_call(get_cwd, Handle, ["d:"])
+ ?PRIM_FILE:get_cwd("d:")
end,
ok.
-win_cur_dir_1(_Config, Handle) ->
- {ok, BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, []),
+win_cur_dir_1(_Config) ->
+ {ok, BaseDir} = ?PRIM_FILE:get_cwd(),
%% Get the drive letter from the current directory,
%% and try to get current directory for that drive.
[Drive, $:|_] = BaseDir,
- {ok, BaseDir} = ?PRIM_FILE_call(get_cwd, Handle, [[Drive, $:]]),
+ {ok, BaseDir} = ?PRIM_FILE:get_cwd([Drive, $:]),
io:format("BaseDir = ~s\n", [BaseDir]),
%% Unfortunately, there is no way to move away from the
@@ -434,12 +374,12 @@ open1(Config) when is_list(Config) ->
Name = filename:join(NewDir, "foo1.fil"),
{ok,Fd1} = ?PRIM_FILE:open(Name, [read, write]),
{ok,Fd2} = ?PRIM_FILE:open(Name, [read]),
- Str = "{a,tuple}.\n",
- Length = length(Str),
- ?PRIM_FILE:write(Fd1,Str),
+ Bin = list_to_binary("{a,tuple}.\n"),
+ Length = byte_size(Bin),
+ ?PRIM_FILE:write(Fd1,Bin),
{ok,0} = ?PRIM_FILE:position(Fd1,bof),
- {ok, Str} = ?PRIM_FILE:read(Fd1,Length),
- {ok, Str} = ?PRIM_FILE:read(Fd2,Length),
+ {ok, Bin} = ?PRIM_FILE:read(Fd1,Length),
+ {ok, Bin} = ?PRIM_FILE:read(Fd2,Length),
ok = ?PRIM_FILE:close(Fd2),
{ok,0} = ?PRIM_FILE:position(Fd1,bof),
ok = ?PRIM_FILE:truncate(Fd1),
@@ -459,13 +399,13 @@ modes(Config) when is_list(Config) ->
++"_open_modes"),
ok = ?PRIM_FILE:make_dir(NewDir),
Name1 = filename:join(NewDir, "foo1.fil"),
- Marker = "hello, world",
- Length = length(Marker),
+ Marker = <<"hello, world">>,
+ Length = byte_size(Marker),
%% write
{ok, Fd1} = ?PRIM_FILE:open(Name1, [write]),
ok = ?PRIM_FILE:write(Fd1, Marker),
- ok = ?PRIM_FILE:write(Fd1, ".\n"),
+ ok = ?PRIM_FILE:write(Fd1, <<".\n">>),
ok = ?PRIM_FILE:close(Fd1),
%% read
@@ -484,12 +424,6 @@ modes(Config) when is_list(Config) ->
{ok, Marker} = ?PRIM_FILE:read(Fd4, Length),
ok = ?PRIM_FILE:close(Fd4),
- %% read and binary
- BinaryMarker = list_to_binary(Marker),
- {ok, Fd5} = ?PRIM_FILE:open(Name1, [read, binary]),
- {ok, BinaryMarker} = ?PRIM_FILE:read(Fd5, Length),
- ok = ?PRIM_FILE:close(Fd5),
-
ok.
close(Config) when is_list(Config) ->
@@ -516,9 +450,9 @@ access(Config) when is_list(Config) ->
Name = filename:join(RootDir,
atom_to_list(?MODULE)
++"_access.fil"),
- Str = "ABCDEFGH",
+ Bin = <<"ABCDEFGH">>,
{ok,Fd1} = ?PRIM_FILE:open(Name, [write]),
- ?PRIM_FILE:write(Fd1,Str),
+ ?PRIM_FILE:write(Fd1,Bin),
ok = ?PRIM_FILE:close(Fd1),
%% Check that we can't write when in read only mode
{ok,Fd2} = ?PRIM_FILE:open(Name, [read]),
@@ -530,7 +464,7 @@ access(Config) when is_list(Config) ->
end,
ok = ?PRIM_FILE:close(Fd2),
{ok, Fd3} = ?PRIM_FILE:open(Name, [read]),
- {ok, Str} = ?PRIM_FILE:read(Fd3,length(Str)),
+ {ok, Bin} = ?PRIM_FILE:read(Fd3,byte_size(Bin)),
ok = ?PRIM_FILE:close(Fd3),
ok.
@@ -552,7 +486,7 @@ read_write(Config) when is_list(Config) ->
ok.
read_write_test(File) ->
- Marker = "hello, world",
+ Marker = <<"hello, world">>,
ok = ?PRIM_FILE:write(File, Marker),
{ok, 0} = ?PRIM_FILE:position(File, 0),
{ok, Marker} = ?PRIM_FILE:read(File, 100),
@@ -578,15 +512,15 @@ pread_write(Config) when is_list(Config) ->
ok.
pread_write_test(File) ->
- Marker = "hello, world",
- Len = length(Marker),
+ Marker = <<"hello, world">>,
+ Len = byte_size(Marker),
ok = ?PRIM_FILE:write(File, Marker),
{ok, Marker} = ?PRIM_FILE:pread(File, 0, 100),
eof = ?PRIM_FILE:pread(File, 100, 1),
ok = ?PRIM_FILE:pwrite(File, Len, Marker),
{ok, Marker} = ?PRIM_FILE:pread(File, Len, 100),
eof = ?PRIM_FILE:pread(File, 100, 1),
- MM = Marker ++ Marker,
+ MM = <<Marker/binary,Marker/binary>>,
{ok, MM} = ?PRIM_FILE:pread(File, 0, 100),
ok = ?PRIM_FILE:close(File),
ok.
@@ -643,24 +577,24 @@ pos1(Config) when is_list(Config) ->
atom_to_list(?MODULE)
++"_pos1.fil"),
{ok, Fd1} = ?PRIM_FILE:open(Name, [write]),
- ?PRIM_FILE:write(Fd1,"ABCDEFGH"),
+ ?PRIM_FILE:write(Fd1,<<"ABCDEFGH">>),
ok = ?PRIM_FILE:close(Fd1),
{ok, Fd2} = ?PRIM_FILE:open(Name, [read]),
%% Start pos is first char
io:format("Relative positions"),
- {ok, "A"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1),
{ok, 2} = ?PRIM_FILE:position(Fd2,{cur,1}),
- {ok, "C"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"C">>} = ?PRIM_FILE:read(Fd2,1),
{ok, 0} = ?PRIM_FILE:position(Fd2,{cur,-3}),
- {ok, "A"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1),
%% Backwards from first char should be an error
{ok,0} = ?PRIM_FILE:position(Fd2,{cur,-1}),
{error, einval} = ?PRIM_FILE:position(Fd2,{cur,-1}),
%% Reset position and move again
{ok, 0} = ?PRIM_FILE:position(Fd2,0),
{ok, 2} = ?PRIM_FILE:position(Fd2,{cur,2}),
- {ok, "C"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"C">>} = ?PRIM_FILE:read(Fd2,1),
%% Go a lot forwards
{ok, 13} = ?PRIM_FILE:position(Fd2,{cur,10}),
eof = ?PRIM_FILE:read(Fd2,1),
@@ -672,27 +606,27 @@ pos1(Config) when is_list(Config) ->
{ok, 8} = ?PRIM_FILE:position(Fd2,cur),
eof = ?PRIM_FILE:read(Fd2,1),
{ok, 7} = ?PRIM_FILE:position(Fd2,7),
- {ok, "H"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"H">>} = ?PRIM_FILE:read(Fd2,1),
{ok, 0} = ?PRIM_FILE:position(Fd2,0),
- {ok, "A"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1),
{ok, 3} = ?PRIM_FILE:position(Fd2,3),
- {ok, "D"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1),
{ok, 12} = ?PRIM_FILE:position(Fd2,12),
eof = ?PRIM_FILE:read(Fd2,1),
{ok, 3} = ?PRIM_FILE:position(Fd2,3),
- {ok, "D"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1),
%% Try the {bof,X} notation
{ok, 3} = ?PRIM_FILE:position(Fd2,{bof,3}),
- {ok, "D"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1),
%% Try eof positions
io:format("EOF positions"),
{ok, 8} = ?PRIM_FILE:position(Fd2,{eof,0}),
eof = ?PRIM_FILE:read(Fd2,1),
{ok, 7} = ?PRIM_FILE:position(Fd2,{eof,-1}),
- {ok, "H"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"H">>} = ?PRIM_FILE:read(Fd2,1),
{ok, 0} = ?PRIM_FILE:position(Fd2,{eof,-8}),
- {ok, "A"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"A">>} = ?PRIM_FILE:read(Fd2,1),
{error, einval} = ?PRIM_FILE:position(Fd2,{eof,-9}),
ok.
@@ -702,7 +636,7 @@ pos2(Config) when is_list(Config) ->
atom_to_list(?MODULE)
++"_pos2.fil"),
{ok,Fd1} = ?PRIM_FILE:open(Name, [write]),
- ?PRIM_FILE:write(Fd1,"ABCDEFGH"),
+ ?PRIM_FILE:write(Fd1,<<"ABCDEFGH">>),
ok = ?PRIM_FILE:close(Fd1),
{ok, Fd2} = ?PRIM_FILE:open(Name, [read]),
{error, einval} = ?PRIM_FILE:position(Fd2,-1),
@@ -710,35 +644,25 @@ pos2(Config) when is_list(Config) ->
%% Make sure that we still can search after an error.
{ok, 0} = ?PRIM_FILE:position(Fd2, 0),
{ok, 3} = ?PRIM_FILE:position(Fd2, {bof,3}),
- {ok, "D"} = ?PRIM_FILE:read(Fd2,1),
+ {ok, <<"D">>} = ?PRIM_FILE:read(Fd2,1),
io:format("DONE"),
ok.
-
-file_info_basic_file_a(Config) when is_list(Config) ->
- file_info_basic_file(Config, [], "_a").
-
-file_info_basic_file_b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = file_info_basic_file(Config, Handle, "_b"),
- ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-file_info_basic_file(Config, Handle, Suffix) ->
+file_info_basic_file(Config) when is_list(Config) ->
RootDir = proplists:get_value(priv_dir, Config),
%% Create a short file.
Name = filename:join(RootDir,
atom_to_list(?MODULE)
- ++"_basic_test"++Suffix++".fil"),
+ ++"_basic_test"".fil"),
{ok,Fd1} = ?PRIM_FILE:open(Name, [write]),
?PRIM_FILE:write(Fd1, "foo bar"),
ok = ?PRIM_FILE:close(Fd1),
%% Test that the file has the expected attributes.
%% The times are tricky, so we will save them to a separate test case.
- {ok, FileInfo} = ?PRIM_FILE_call(read_file_info, Handle, [Name]),
+ {ok, FileInfo} = ?PRIM_FILE:read_file_info(Name),
#file_info{size = Size, type = Type, access = Access,
atime = AccessTime, mtime = ModifyTime} =
FileInfo,
@@ -756,39 +680,30 @@ file_info_basic_file(Config, Handle, Suffix) ->
ok.
-file_info_basic_directory_a(Config) when is_list(Config) ->
- file_info_basic_directory(Config, []).
-
-file_info_basic_directory_b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = file_info_basic_directory(Config, Handle),
- ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-file_info_basic_directory(Config, Handle) ->
+file_info_basic_directory(Config) when is_list(Config) ->
%% Note: filename:join/1 removes any trailing slash,
%% which is essential for ?PRIM_FILE:read_file_info/1 to work on
%% platforms such as Windows95.
RootDir = filename:join([proplists:get_value(priv_dir, Config)]),
%% Test that the RootDir directory has the expected attributes.
- test_directory(RootDir, read_write, Handle),
+ test_directory(RootDir, read_write),
%% Note that on Windows file systems, "/" or "c:/" are *NOT* directories.
%% Therefore, test that ?PRIM_FILE:read_file_info/1 behaves
%% as if they were directories.
case os:type() of
{win32, _} ->
- test_directory("/", read_write, Handle),
- test_directory("c:/", read_write, Handle),
- test_directory("c:\\", read_write, Handle);
+ test_directory("/", read_write),
+ test_directory("c:/", read_write),
+ test_directory("c:\\", read_write);
_ ->
- test_directory("/", read, Handle)
+ test_directory("/", read)
end,
ok.
-test_directory(Name, ExpectedAccess, Handle) ->
- {ok, FileInfo} = ?PRIM_FILE_call(read_file_info, Handle, [Name]),
+test_directory(Name, ExpectedAccess) ->
+ {ok, FileInfo} = ?PRIM_FILE:read_file_info(Name),
#file_info{size = Size, type = Type, access = Access,
atime = AccessTime, mtime = ModifyTime} =
FileInfo,
@@ -812,45 +727,24 @@ all_integers([]) ->
%% Try something nonexistent.
-file_info_bad_a(Config) when is_list(Config) ->
- file_info_bad(Config, []).
-
-file_info_bad_b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = file_info_bad(Config, Handle),
- ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-file_info_bad(Config, Handle) ->
+file_info_bad(Config) when is_list(Config) ->
RootDir = filename:join([proplists:get_value(priv_dir, Config)]),
- {error, enoent} =
- ?PRIM_FILE_call(
- read_file_info, Handle,
- [filename:join(RootDir,
- atom_to_list(?MODULE)++"_nonexistent")]),
+ NonExistent = filename:join(RootDir, atom_to_list(?MODULE)++"_nonexistent"),
+ {error, enoent} = ?PRIM_FILE:read_file_info(NonExistent),
ok.
%% Test that the file times behave as they should.
-file_info_times_a(Config) when is_list(Config) ->
- file_info_times(Config, [], "_a").
-
-file_info_times_b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = file_info_times(Config, Handle, "_b"),
- ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-file_info_times(Config, Handle, Suffix) ->
+file_info_times(Config) when is_list(Config) ->
%% We have to try this twice, since if the test runs across the change
%% of a month the time diff calculations will fail. But it won't happen
%% if you run it twice in succession.
test_server:m_out_of_n(
1,2,
- fun() -> file_info_int(Config, Handle, Suffix) end),
+ fun() -> file_info_int(Config) end),
ok.
-file_info_int(Config, Handle, Suffix) ->
+file_info_int(Config) ->
%% Note: filename:join/1 removes any trailing slash,
%% which is essential for ?PRIM_FILE:read_file_info/1 to work on
%% platforms such as Windows95.
@@ -860,14 +754,14 @@ file_info_int(Config, Handle, Suffix) ->
Name = filename:join(RootDir,
atom_to_list(?MODULE)
- ++"_file_info"++Suffix++".fil"),
+ ++"_file_info.fil"),
{ok,Fd1} = ?PRIM_FILE:open(Name, [write]),
?PRIM_FILE:write(Fd1,"foo"),
%% check that the file got a modify date max a few seconds away from now
{ok, #file_info{type = regular,
atime = AccTime1, mtime = ModTime1}} =
- ?PRIM_FILE_call(read_file_info, Handle, [Name]),
+ ?PRIM_FILE:read_file_info(Name),
Now = erlang:localtime(),
io:format("Now ~p",[Now]),
io:format("Open file Acc ~p Mod ~p",[AccTime1,ModTime1]),
@@ -885,7 +779,7 @@ file_info_int(Config, Handle, Suffix) ->
ok = ?PRIM_FILE:close(Fd1),
{ok, #file_info{size = Size, type = regular, access = Access,
atime = AccTime2, mtime = ModTime2}} =
- ?PRIM_FILE_call(read_file_info, Handle, [Name]),
+ ?PRIM_FILE:read_file_info(Name),
io:format("Closed file Acc ~p Mod ~p",[AccTime2,ModTime2]),
true = time_dist(ModTime1, ModTime2) >= 0,
@@ -897,7 +791,7 @@ file_info_int(Config, Handle, Suffix) ->
{ok, #file_info{size = DSize, type = directory,
access = DAccess,
atime = AccTime3, mtime = ModTime3}} =
- ?PRIM_FILE_call(read_file_info, Handle, [RootDir]),
+ ?PRIM_FILE:read_file_info(RootDir),
%% this dir was modified only a few secs ago
io:format("Dir Acc ~p; Mod ~p; Now ~p",
[AccTime3, ModTime3, Now]),
@@ -924,16 +818,7 @@ filter_atime(Atime, Config) ->
%% Test the write_file_info/2 function.
-file_write_file_info_a(Config) when is_list(Config) ->
- file_write_file_info(Config, [], "_a").
-
-file_write_file_info_b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = file_write_file_info(Config, Handle, "_b"),
- ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-file_write_file_info(Config, Handle, Suffix) ->
+file_write_file_info(Config) when is_list(Config) ->
RootDir = get_good_directory(Config),
io:format("RootDir = ~p", [RootDir]),
@@ -943,16 +828,16 @@ file_write_file_info(Config, Handle, Suffix) ->
Name = filename:join(RootDir,
atom_to_list(?MODULE)
- ++"_write_file_info_ro"++Suffix),
+ ++"_write_file_info_ro"),
ok = ?PRIM_FILE:write_file(Name, "hello"),
Time = {{1997, 01, 02}, {12, 35, 42}},
Info = #file_info{mode=8#400, atime=Time, mtime=Time, ctime=Time},
- ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, Info]),
+ ok = ?PRIM_FILE:write_file_info(Name, Info),
%% Read back the times.
{ok, ActualInfo} =
- ?PRIM_FILE_call(read_file_info, Handle, [Name]),
+ ?PRIM_FILE:read_file_info(Name),
#file_info{mode=_Mode, atime=ActAtime, mtime=Time,
ctime=ActCtime} = ActualInfo,
FilteredAtime = filter_atime(Time, Config),
@@ -968,14 +853,11 @@ file_write_file_info(Config, Handle, Suffix) ->
{error, eacces} = ?PRIM_FILE:write_file(Name, "hello again"),
%% Make the file writable again.
-
- ?PRIM_FILE_call(write_file_info, Handle,
- [Name, #file_info{mode=8#600}]),
+ ?PRIM_FILE:write_file_info(Name, #file_info{mode=8#600}),
ok = ?PRIM_FILE:write_file(Name, "hello again"),
%% And unwritable.
- ?PRIM_FILE_call(write_file_info, Handle,
- [Name, #file_info{mode=8#400}]),
+ ?PRIM_FILE:write_file_info(Name, #file_info{mode=8#400}),
{error, eacces} = ?PRIM_FILE:write_file(Name, "hello again"),
%% Write the times again.
@@ -983,9 +865,9 @@ file_write_file_info(Config, Handle, Suffix) ->
NewTime = {{1997, 02, 15}, {13, 18, 20}},
NewInfo = #file_info{atime=NewTime, mtime=NewTime, ctime=NewTime},
- ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, NewInfo]),
+ ok = ?PRIM_FILE:write_file_info(Name, NewInfo),
{ok, ActualInfo2} =
- ?PRIM_FILE_call(read_file_info, Handle, [Name]),
+ ?PRIM_FILE:read_file_info(Name),
#file_info{atime=NewActAtime, mtime=NewTime,
ctime=NewActCtime} = ActualInfo2,
NewFilteredAtime = filter_atime(NewTime, Config),
@@ -1000,14 +882,12 @@ file_write_file_info(Config, Handle, Suffix) ->
%% Make the file writeable again, so that we can remove the
%% test suites ... :-)
- ?PRIM_FILE_call(write_file_info, Handle,
- [Name, #file_info{mode=8#600}]),
+ ?PRIM_FILE:write_file_info(Name, #file_info{mode=8#600}),
ok.
%% Test the write_file_info/3 function.
file_write_file_info_opts(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
RootDir = get_good_directory(Config),
io:format("RootDir = ~p", [RootDir]),
@@ -1016,7 +896,7 @@ file_write_file_info_opts(Config) when is_list(Config) ->
lists:foreach(fun
({FI, Opts}) ->
- ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, FI, Opts])
+ ok = ?PRIM_FILE:write_file_info(Name, FI, Opts)
end, [
{#file_info{ mode=8#600, atime = Time, mtime = Time, ctime = Time}, Opts} ||
Opts <- [[{time, posix}]],
@@ -1026,7 +906,7 @@ file_write_file_info_opts(Config) when is_list(Config) ->
%% REM: determine date range dependent on time_t = Uint32 | Sint32 | Sint64 | Uint64
%% Determine time_t on os:type()?
lists:foreach(fun ({FI, Opts}) ->
- ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, FI, Opts])
+ ok = ?PRIM_FILE:write_file_info(Name, FI, Opts)
end, [ {#file_info{ mode=8#400, atime = Time, mtime = Time, ctime = Time}, Opts} ||
Opts <- [[{time, universal}],[{time, local}]],
Time <- [
@@ -1038,11 +918,9 @@ file_write_file_info_opts(Config) when is_list(Config) ->
{{2037,2,3},{23,59,59}},
erlang:localtime()
]]),
- ok = ?PRIM_FILE:stop(Handle),
ok.
file_read_file_info_opts(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
RootDir = get_good_directory(Config),
io:format("RootDir = ~p", [RootDir]),
@@ -1051,41 +929,38 @@ file_read_file_info_opts(Config) when is_list(Config) ->
lists:foreach(fun
(Opts) ->
- {ok,_} = ?PRIM_FILE_call(read_file_info, Handle, [Name, Opts])
+ {ok,_} = ?PRIM_FILE:read_file_info(Name, Opts)
end, [[{time, Type}] || Type <- [local, universal, posix]]),
- ok = ?PRIM_FILE:stop(Handle),
ok.
%% Test the write and read back *_file_info/3 functions.
file_write_read_file_info_opts(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
RootDir = get_good_directory(Config),
io:format("RootDir = ~p", [RootDir]),
Name = filename:join(RootDir, atom_to_list(?MODULE) ++"_read_write_file_info_opts"),
ok = ?PRIM_FILE:write_file(Name, "hello_opts2"),
- ok = file_write_read_file_info_opts(Handle, Name, {{1989, 04, 28}, {19,30,22}}, [{time, local}]),
- ok = file_write_read_file_info_opts(Handle, Name, {{1989, 04, 28}, {19,30,22}}, [{time, universal}]),
+ ok = file_write_read_file_info_opts(Name, {{1989, 04, 28}, {19,30,22}}, [{time, local}]),
+ ok = file_write_read_file_info_opts(Name, {{1989, 04, 28}, {19,30,22}}, [{time, universal}]),
%% will not work on platforms with unsigned time_t
- %ok = file_write_read_file_info_opts(Handle, Name, {{1930, 04, 28}, {19,30,22}}, [{time, local}]),
- %ok = file_write_read_file_info_opts(Handle, Name, {{1930, 04, 28}, {19,30,22}}, [{time, universal}]),
- ok = file_write_read_file_info_opts(Handle, Name, 1, [{time, posix}]),
+ %ok = file_write_read_file_info_opts(Name, {{1930, 04, 28}, {19,30,22}}, [{time, local}]),
+ %ok = file_write_read_file_info_opts(Name, {{1930, 04, 28}, {19,30,22}}, [{time, universal}]),
+ ok = file_write_read_file_info_opts(Name, 1, [{time, posix}]),
%% will not work on platforms with unsigned time_t
- %ok = file_write_read_file_info_opts(Handle, Name, -1, [{time, posix}]),
- %ok = file_write_read_file_info_opts(Handle, Name, -300000, [{time, posix}]),
- ok = file_write_read_file_info_opts(Handle, Name, 300000, [{time, posix}]),
- ok = file_write_read_file_info_opts(Handle, Name, 0, [{time, posix}]),
+ %ok = file_write_read_file_info_opts(Name, -1, [{time, posix}]),
+ %ok = file_write_read_file_info_opts(Name, -300000, [{time, posix}]),
+ ok = file_write_read_file_info_opts(Name, 300000, [{time, posix}]),
+ ok = file_write_read_file_info_opts(Name, 0, [{time, posix}]),
- ok = ?PRIM_FILE:stop(Handle),
ok.
-file_write_read_file_info_opts(Handle, Name, Mtime, Opts) ->
- {ok, FI} = ?PRIM_FILE_call(read_file_info, Handle, [Name, Opts]),
+file_write_read_file_info_opts(Name, Mtime, Opts) ->
+ {ok, FI} = ?PRIM_FILE:read_file_info(Name, Opts),
FI2 = FI#file_info{ mtime = Mtime },
- ok = ?PRIM_FILE_call(write_file_info, Handle, [Name, FI2, Opts]),
- {ok, FI3} = ?PRIM_FILE_call(read_file_info, Handle, [Name, Opts]),
+ ok = ?PRIM_FILE:write_file_info(Name, FI2, Opts),
+ {ok, FI3} = ?PRIM_FILE:read_file_info(Name, Opts),
io:format("Expecting mtime = ~p, got ~p~n", [FI2#file_info.mtime, FI3#file_info.mtime]),
FI2 = FI3,
ok.
@@ -1163,8 +1038,8 @@ advise(Config) when is_list(Config) ->
atom_to_list(?MODULE)
++"_advise.fil"),
- Line1 = "Hello\n",
- Line2 = "World!\n",
+ Line1 = <<"Hello\n">>,
+ Line2 = <<"World!\n">>,
{ok, Fd} = ?PRIM_FILE:open(Advise, [write]),
ok = ?PRIM_FILE:advise(Fd, 0, 0, normal),
@@ -1214,7 +1089,7 @@ advise(Config) when is_list(Config) ->
{ok, Fd9} = ?PRIM_FILE:open(Advise, [read]),
Offset = 0,
%% same as a 0 length in some implementations
- Length = length(Line1) + length(Line2),
+ Length = byte_size(Line1) + byte_size(Line2),
ok = ?PRIM_FILE:advise(Fd9, Offset, Length, sequential),
{ok, Line1} = ?PRIM_FILE:read_line(Fd9),
{ok, Line2} = ?PRIM_FILE:read_line(Fd9),
@@ -1238,23 +1113,18 @@ do_large_write(Name) ->
Chunk = <<0:ChunkSize/unit:8>>,
Data = zip_data(lists:duplicate(Chunks, Chunk), Interleave),
Size = Chunks * ChunkSize + Chunks, % 4 G + 32
- Wordsize = erlang:system_info(wordsize),
- case prim_file:write_file(Name, Data) of
- ok when Wordsize =:= 8 ->
- {ok,#file_info{size=Size}} = file:read_file_info(Name),
- {ok,Fd} = prim_file:open(Name, [read]),
- check_large_write(Fd, ChunkSize, 0, Interleave);
- {error,einval} when Wordsize =:= 4 ->
- ok
- end.
+ ok = ?PRIM_FILE:write_file(Name, Data),
+ {ok,#file_info{size=Size}} = file:read_file_info(Name),
+ {ok,Fd} = ?PRIM_FILE:open(Name, [read]),
+ check_large_write(Fd, ChunkSize, 0, Interleave).
check_large_write(Fd, ChunkSize, Pos, [X|Interleave]) ->
Pos1 = Pos + ChunkSize,
- {ok,Pos1} = prim_file:position(Fd, {cur,ChunkSize}),
- {ok,[X]} = prim_file:read(Fd, 1),
+ {ok,Pos1} = ?PRIM_FILE:position(Fd, {cur,ChunkSize}),
+ {ok,<<X>>} = ?PRIM_FILE:read(Fd, 1),
check_large_write(Fd, ChunkSize, Pos1+1, Interleave);
check_large_write(Fd, _, _, []) ->
- eof = prim_file:read(Fd, 1),
+ eof = ?PRIM_FILE:read(Fd, 1),
ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@@ -1326,71 +1196,53 @@ allocate_and_assert(Fd, Offset, Length) ->
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
-delete_a(Config) when is_list(Config) ->
- delete(Config, [], "_a").
-
-delete_b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = delete(Config, Handle, "_b"),
- ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-delete(Config, Handle, Suffix) ->
+delete(Config) when is_list(Config) ->
RootDir = proplists:get_value(priv_dir,Config),
Name = filename:join(RootDir,
atom_to_list(?MODULE)
- ++"_delete"++Suffix++".fil"),
+ ++"_delete.fil"),
{ok, Fd1} = ?PRIM_FILE:open(Name, [write]),
?PRIM_FILE:write(Fd1,"ok.\n"),
ok = ?PRIM_FILE:close(Fd1),
%% Check that the file is readable
{ok, Fd2} = ?PRIM_FILE:open(Name, [read]),
ok = ?PRIM_FILE:close(Fd2),
- ok = ?PRIM_FILE_call(delete, Handle, [Name]),
+ ok = ?PRIM_FILE:delete(Name),
%% Check that the file is not readable anymore
{error, _} = ?PRIM_FILE:open(Name, [read]),
%% Try deleting a nonexistent file
- {error, enoent} = ?PRIM_FILE_call(delete, Handle, [Name]),
+ {error, enoent} = ?PRIM_FILE:delete(Name),
ok.
-rename_a(Config) when is_list(Config) ->
- rename(Config, [], "_a").
-
-rename_b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = rename(Config, Handle, "_b"),
- ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-rename(Config, Handle, Suffix) ->
+rename(Config) when is_list(Config) ->
RootDir = proplists:get_value(priv_dir,Config),
- FileName1 = atom_to_list(?MODULE)++"_rename"++Suffix++".fil",
- FileName2 = atom_to_list(?MODULE)++"_rename"++Suffix++".ful",
+ FileName1 = atom_to_list(?MODULE)++"_rename.fil",
+ FileName2 = atom_to_list(?MODULE)++"_rename.ful",
Name1 = filename:join(RootDir, FileName1),
Name2 = filename:join(RootDir, FileName2),
{ok,Fd1} = ?PRIM_FILE:open(Name1, [write]),
ok = ?PRIM_FILE:close(Fd1),
%% Rename, and check that it really changed name
- ok = ?PRIM_FILE_call(rename, Handle, [Name1, Name2]),
+ ok = ?PRIM_FILE:rename(Name1, Name2),
{error, _} = ?PRIM_FILE:open(Name1, [read]),
{ok, Fd2} = ?PRIM_FILE:open(Name2, [read]),
ok = ?PRIM_FILE:close(Fd2),
%% Try renaming something to itself
- ok = ?PRIM_FILE_call(rename, Handle, [Name2, Name2]),
+ ok = ?PRIM_FILE:rename(Name2, Name2),
%% Try renaming something that doesn't exist
{error, enoent} =
- ?PRIM_FILE_call(rename, Handle, [Name1, Name2]),
+ ?PRIM_FILE:rename(Name1, Name2),
%% Try renaming to something else than a string
{error, badarg} =
- ?PRIM_FILE_call(rename, Handle, [Name1, foobar]),
+ ?PRIM_FILE:rename(Name1, foobar),
%% Move between directories
DirName1 = filename:join(RootDir,
atom_to_list(?MODULE)
- ++"_rename_dir"++Suffix),
+ ++"_rename_dir"),
DirName2 = filename:join(RootDir,
atom_to_list(?MODULE)
- ++"_second_rename_dir"++Suffix),
+ ++"_second_rename_dir"),
Name1foo = filename:join(DirName1, "foo.fil"),
Name2foo = filename:join(DirName2, "foo.fil"),
Name2bar = filename:join(DirName2, "bar.dir"),
@@ -1398,21 +1250,21 @@ rename(Config, Handle, Suffix) ->
%% The name has to include the full file name, path is not enough
expect(
{error, eexist}, {error, eisdir},
- ?PRIM_FILE_call(rename, Handle, [Name2, DirName1])),
+ ?PRIM_FILE:rename(Name2, DirName1)),
ok =
- ?PRIM_FILE_call(rename, Handle, [Name2, Name1foo]),
+ ?PRIM_FILE:rename(Name2, Name1foo),
%% Now rename the directory
- ok = ?PRIM_FILE_call(rename, Handle, [DirName1, DirName2]),
+ ok = ?PRIM_FILE:rename(DirName1, DirName2),
%% And check that the file is there now
{ok,Fd3} = ?PRIM_FILE:open(Name2foo, [read]),
ok = ?PRIM_FILE:close(Fd3),
%% Try some dirty things now: move the directory into itself
{error, Msg1} =
- ?PRIM_FILE_call(rename, Handle, [DirName2, Name2bar]),
+ ?PRIM_FILE:rename(DirName2, Name2bar),
io:format("Errmsg1: ~p",[Msg1]),
%% move dir into a file in itself
{error, Msg2} =
- ?PRIM_FILE_call(rename, Handle, [DirName2, Name2foo]),
+ ?PRIM_FILE:rename(DirName2, Name2foo),
io:format("Errmsg2: ~p",[Msg2]),
ok.
@@ -1448,13 +1300,14 @@ e_delete(Config) when is_list(Config) ->
case os:type() of
{win32, _} ->
%% Remove a character device.
- {error, eacces} = ?PRIM_FILE:delete("nul");
+ expect({error, eacces}, {error, einval},
+ ?PRIM_FILE:delete("nul"));
_ ->
?PRIM_FILE:write_file_info(
Base, #file_info {mode=0}),
{error, eacces} = ?PRIM_FILE:delete(Afile),
?PRIM_FILE:write_file_info(
- Base, #file_info {mode=8#600})
+ Base, #file_info {mode=8#700})
end,
ok.
@@ -1590,7 +1443,7 @@ e_make_dir(Config) when is_list(Config) ->
?PRIM_FILE:write_file_info(Base, #file_info {mode=0}),
{error, eacces} =
?PRIM_FILE:make_dir(filename:join(Base, "xxxx")),
- ?PRIM_FILE:write_file_info(Base, #file_info {mode=8#600})
+ ?PRIM_FILE:write_file_info(Base, #file_info {mode=8#700})
end,
ok.
@@ -1640,170 +1493,24 @@ e_del_dir(Config) when is_list(Config) ->
?PRIM_FILE:write_file_info(Base, #file_info {mode=0}),
{error, eacces} = ?PRIM_FILE:del_dir(ADirectory),
?PRIM_FILE:write_file_info(
- Base, #file_info {mode=8#600})
+ Base, #file_info {mode=8#700})
end,
ok.
-%% Trying reading and positioning from a compressed file.
-
-read_compressed(Config) when is_list(Config) ->
- Data = proplists:get_value(data_dir, Config),
- Real = filename:join(Data, "realmen.html.gz"),
- {ok, Fd} = ?PRIM_FILE:open(Real, [read, compressed]),
- try_read_file(Fd).
-
-%% Trying reading and positioning from an uncompressed file,
-%% but with the compressed flag given.
-
-read_not_really_compressed(Config) when is_list(Config) ->
- Data = proplists:get_value(data_dir, Config),
- Priv = proplists:get_value(priv_dir, Config),
-
- %% The file realmen.html might have got CRs added (by WinZip).
- %% Remove them, or the file positions will not be correct.
-
- Real = filename:join(Data, "realmen.html"),
- RealPriv = filename:join(Priv,
- atom_to_list(?MODULE)++"_realmen.html"),
- {ok, RealDataBin} = ?PRIM_FILE:read_file(Real),
- RealData = remove_crs(binary_to_list(RealDataBin), []),
- ok = ?PRIM_FILE:write_file(RealPriv, RealData),
- {ok, Fd} = ?PRIM_FILE:open(RealPriv, [read, compressed]),
- try_read_file(Fd).
-
-remove_crs([$\r|Rest], Result) ->
- remove_crs(Rest, Result);
-remove_crs([C|Rest], Result) ->
- remove_crs(Rest, [C|Result]);
-remove_crs([], Result) ->
- lists:reverse(Result).
-
-try_read_file(Fd) ->
- %% Seek to the current position (nothing should happen).
-
- {ok, 0} = ?PRIM_FILE:position(Fd, 0),
- {ok, 0} = ?PRIM_FILE:position(Fd, {cur, 0}),
-
- %% Read a few lines from a compressed file.
-
- ShouldBe = "<TITLE>Real Programmers Don't Use PASCAL</TITLE>\n",
- {ok, ShouldBe} = ?PRIM_FILE:read(Fd, length(ShouldBe)),
-
- %% Now seek forward.
-
- {ok, 381} = ?PRIM_FILE:position(Fd, 381),
- Back = "Back in the good old days -- the \"Golden Era\" " ++
- "of computers, it was\n",
- {ok, Back} = ?PRIM_FILE:read(Fd, length(Back)),
-
- %% Try to search forward relative to the current position.
-
- {ok, CurPos} = ?PRIM_FILE:position(Fd, {cur, 0}),
- RealPos = 4273,
- {ok, RealPos} = ?PRIM_FILE:position(Fd, {cur, RealPos-CurPos}),
- RealProg = "<LI> Real Programmers aren't afraid to use GOTOs.\n",
- {ok, RealProg} = ?PRIM_FILE:read(Fd, length(RealProg)),
-
- %% Seek backward.
-
- AfterTitle = length("<TITLE>"),
- {ok, AfterTitle} = ?PRIM_FILE:position(Fd, AfterTitle),
- Title = "Real Programmers Don't Use PASCAL</TITLE>\n",
- {ok, Title} = ?PRIM_FILE:read(Fd, length(Title)),
-
- %% Done.
-
- ?PRIM_FILE:close(Fd),
- ok.
-
-write_compressed(Config) when is_list(Config) ->
- Priv = proplists:get_value(priv_dir, Config),
- MyFile = filename:join(Priv,
- atom_to_list(?MODULE)++"_test.gz"),
-
- %% Write a file.
-
- {ok, Fd} = ?PRIM_FILE:open(MyFile, [write, compressed]),
- {ok, 0} = ?PRIM_FILE:position(Fd, 0),
- Prefix = "hello\n",
- End = "end\n",
- ok = ?PRIM_FILE:write(Fd, Prefix),
- {ok, 143} = ?PRIM_FILE:position(Fd, 143),
- ok = ?PRIM_FILE:write(Fd, End),
- ok = ?PRIM_FILE:close(Fd),
-
- %% Read the file and verify the contents.
-
- {ok, Fd1} = ?PRIM_FILE:open(MyFile, [read, compressed]),
- {ok, Prefix} = ?PRIM_FILE:read(Fd1, length(Prefix)),
- Second = lists:duplicate(143-length(Prefix), 0) ++ End,
- {ok, Second} = ?PRIM_FILE:read(Fd1, length(Second)),
- ok = ?PRIM_FILE:close(Fd1),
-
- %% Ensure that the file is compressed.
-
- TotalSize = 143 + length(End),
- case ?PRIM_FILE:read_file_info(MyFile) of
- {ok, #file_info{size=Size}} when Size < TotalSize ->
- ok;
- {ok, #file_info{size=Size}} when Size == TotalSize ->
- ct:fail(file_not_compressed)
- end,
-
- %% Write again to ensure that the file is truncated.
-
- {ok, Fd2} = ?PRIM_FILE:open(MyFile, [write, compressed]),
- NewString = "aaaaaaaaaaa",
- ok = ?PRIM_FILE:write(Fd2, NewString),
- ok = ?PRIM_FILE:close(Fd2),
- {ok, Fd3} = ?PRIM_FILE:open(MyFile, [read, compressed]),
- {ok, NewString} = ?PRIM_FILE:read(Fd3, 1024),
- ok = ?PRIM_FILE:close(Fd3),
-
- ok.
-
-compress_errors(Config) when is_list(Config) ->
- Data = proplists:get_value(data_dir, Config),
- {error, enoent} = ?PRIM_FILE:open("non_existing__",
- [compressed, read]),
- {error, einval} = ?PRIM_FILE:open("non_existing__",
- [compressed, read, write]),
-
- %% Read a corrupted .gz file.
-
- Corrupted = filename:join(Data, "corrupted.gz"),
- {ok, Fd} = ?PRIM_FILE:open(Corrupted, [read, compressed]),
- {error, eio} = ?PRIM_FILE:read(Fd, 100),
- ?PRIM_FILE:close(Fd),
-
- ok.
-
-
-%% Test creating a hard link.
-make_link_a(Config) when is_list(Config) ->
- make_link(Config, [], "_a").
-
-%% Test creating a hard link.
-make_link_b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = make_link(Config, Handle, "_b"),
- ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-make_link(Config, Handle, Suffix) ->
+make_link(Config) when is_list(Config) ->
RootDir = proplists:get_value(priv_dir, Config),
NewDir = filename:join(RootDir,
atom_to_list(?MODULE)
- ++"_make_link"++Suffix),
- ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]),
+ ++"_make_link"),
+ ok = ?PRIM_FILE:make_dir(NewDir),
Name = filename:join(NewDir, "a_file"),
ok = ?PRIM_FILE:write_file(Name, "some contents\n"),
Alias = filename:join(NewDir, "an_alias"),
Result =
- case ?PRIM_FILE_call(make_link, Handle, [Name, Alias]) of
+ case ?PRIM_FILE:make_link(Name, Alias) of
{error, enotsup} ->
{skipped, "Links not supported on this platform"};
ok ->
@@ -1814,12 +1521,12 @@ make_link(Config, Handle, Suffix) ->
%% since they are not used on symbolic links.
{ok, Info} =
- ?PRIM_FILE_call(read_link_info, Handle, [Name]),
+ ?PRIM_FILE:read_link_info(Name),
{ok, Info} =
- ?PRIM_FILE_call(read_link_info, Handle, [Alias]),
+ ?PRIM_FILE:read_link_info(Alias),
#file_info{links = 2, type = regular} = Info,
{error, eexist} =
- ?PRIM_FILE_call(make_link, Handle, [Name, Alias]),
+ ?PRIM_FILE:make_link(Name, Alias),
ok
end,
@@ -1831,30 +1538,19 @@ read_link_info_for_non_link(Config) when is_list(Config) ->
{ok, #file_info{type=directory}} = ?PRIM_FILE:read_link_info("."),
ok.
-%% Test operations on symbolic links (for Unix).
-symlinks_a(Config) when is_list(Config) ->
- symlinks(Config, [], "_a").
-
-%% Test operations on symbolic links (for Unix).
-symlinks_b(Config) when is_list(Config) ->
- {ok, Handle} = ?PRIM_FILE:start(),
- Result = symlinks(Config, Handle, "_b"),
- ok = ?PRIM_FILE:stop(Handle),
- Result.
-
-symlinks(Config, Handle, Suffix) ->
+symlinks(Config) when is_list(Config) ->
RootDir = proplists:get_value(priv_dir, Config),
NewDir = filename:join(RootDir,
atom_to_list(?MODULE)
- ++"_make_symlink"++Suffix),
- ok = ?PRIM_FILE_call(make_dir, Handle, [NewDir]),
+ ++"_make_symlink"),
+ ok = ?PRIM_FILE:make_dir(NewDir),
Name = filename:join(NewDir, "a_plain_file"),
ok = ?PRIM_FILE:write_file(Name, "some stupid content\n"),
Alias = filename:join(NewDir, "a_symlink_alias"),
Result =
- case ?PRIM_FILE_call(make_symlink, Handle, [Name, Alias]) of
+ case ?PRIM_FILE:make_symlink(Name, Alias) of
{error, enotsup} ->
{skipped, "Links not supported on this platform"};
{error, eperm} ->
@@ -1862,20 +1558,20 @@ symlinks(Config, Handle, Suffix) ->
{skipped, "Windows user not privileged to create links"};
ok ->
{ok, Info1} =
- ?PRIM_FILE_call(read_file_info, Handle, [Name]),
+ ?PRIM_FILE:read_file_info(Name),
{ok, Info1} =
- ?PRIM_FILE_call(read_file_info, Handle, [Alias]),
+ ?PRIM_FILE:read_file_info(Alias),
{ok, Info1} =
- ?PRIM_FILE_call(read_link_info, Handle, [Name]),
+ ?PRIM_FILE:read_link_info(Name),
#file_info{links = 1, type = regular} = Info1,
{ok, Info2} =
- ?PRIM_FILE_call(read_link_info, Handle, [Alias]),
+ ?PRIM_FILE:read_link_info(Alias),
#file_info{links=1, type=symlink} = Info2,
{ok, Name} =
- ?PRIM_FILE_call(read_link, Handle, [Alias]),
+ ?PRIM_FILE:read_link(Alias),
{ok, Name} =
- ?PRIM_FILE_call(read_link_all, Handle, [Alias]),
+ ?PRIM_FILE:read_link_all(Alias),
%% If all is good, delete dir again (avoid hanging dir on windows)
rm_rf(?PRIM_FILE,NewDir),
ok
@@ -1895,10 +1591,9 @@ list_dir_limit(Config) when is_list(Config) ->
RootDir = proplists:get_value(priv_dir, Config),
NewDir = filename:join(RootDir,
atom_to_list(?MODULE)++"_list_dir_limit"),
- {ok, Handle1} = ?PRIM_FILE:start(),
- ok = ?PRIM_FILE_call(make_dir, Handle1, [NewDir]),
+ ok = ?PRIM_FILE:make_dir(NewDir),
Ref = erlang:start_timer(MaxTime*1000, self(), []),
- Result = list_dir_limit_loop(NewDir, Handle1, Ref, MaxNumber, 0),
+ Result = list_dir_limit_loop(NewDir, Ref, MaxNumber, 0),
Time = case erlang:cancel_timer(Ref) of
false -> MaxTime;
T -> MaxTime - (T div 1000)
@@ -1908,21 +1603,18 @@ list_dir_limit(Config) when is_list(Config) ->
{error, _Reason, N} -> N;
_ -> 0
end,
- {ok, Handle2} = ?PRIM_FILE:start(),
- list_dir_limit_cleanup(NewDir, Handle2, Number, 0),
- ok = ?PRIM_FILE:stop(Handle1),
- ok = ?PRIM_FILE:stop(Handle2),
+ list_dir_limit_cleanup(NewDir, Number, 0),
{ok, Number} = Result,
{comment,
"Created " ++ integer_to_list(Number) ++ " files in "
++ integer_to_list(Time) ++ " seconds."}.
-list_dir_limit_loop(Dir, Handle, _Ref, N, Cnt) when Cnt >= N ->
- list_dir_check(Dir, Handle, Cnt);
-list_dir_limit_loop(Dir, Handle, Ref, N, Cnt) ->
+list_dir_limit_loop(Dir, _Ref, N, Cnt) when Cnt >= N ->
+ list_dir_check(Dir, Cnt);
+list_dir_limit_loop(Dir, Ref, N, Cnt) ->
receive
{timeout, Ref, []} ->
- list_dir_check(Dir, Handle, Cnt)
+ list_dir_check(Dir, Cnt)
after 0 ->
Name = integer_to_list(Cnt),
case ?PRIM_FILE:write_file(filename:join(Dir, Name), Name) of
@@ -1930,23 +1622,23 @@ list_dir_limit_loop(Dir, Handle, Ref, N, Cnt) ->
Next = Cnt + 1,
case Cnt rem 100 of
0 ->
- case list_dir_check(Dir, Handle, Next) of
+ case list_dir_check(Dir, Next) of
{ok, Next} ->
list_dir_limit_loop(
- Dir, Handle, Ref, N, Next);
+ Dir, Ref, N, Next);
Other ->
Other
end;
_ ->
- list_dir_limit_loop(Dir, Handle, Ref, N, Next)
+ list_dir_limit_loop(Dir, Ref, N, Next)
end;
{error, Reason} ->
{error, Reason, Cnt}
end
end.
-list_dir_check(Dir, Handle, Cnt) ->
- case ?PRIM_FILE:list_dir(Handle, Dir) of
+list_dir_check(Dir, Cnt) ->
+ case ?PRIM_FILE:list_dir(Dir) of
{ok, ListDir} ->
case length(ListDir) of
Cnt ->
@@ -1963,18 +1655,18 @@ list_dir_check(Dir, Handle, Cnt) ->
%% Deletes N files while ignoring errors, then continues deleting
%% as long as they exist.
-list_dir_limit_cleanup(Dir, Handle, N, Cnt) when Cnt >= N ->
+list_dir_limit_cleanup(Dir, N, Cnt) when Cnt >= N ->
Name = integer_to_list(Cnt),
- case ?PRIM_FILE:delete(Handle, filename:join(Dir, Name)) of
+ case ?PRIM_FILE:delete(filename:join(Dir, Name)) of
ok ->
- list_dir_limit_cleanup(Dir, Handle, N, Cnt+1);
+ list_dir_limit_cleanup(Dir, N, Cnt+1);
_ ->
ok
end;
-list_dir_limit_cleanup(Dir, Handle, N, Cnt) ->
+list_dir_limit_cleanup(Dir, N, Cnt) ->
Name = integer_to_list(Cnt),
- ?PRIM_FILE:delete(Handle, filename:join(Dir, Name)),
- list_dir_limit_cleanup(Dir, Handle, N, Cnt+1).
+ ?PRIM_FILE:delete(filename:join(Dir, Name)),
+ list_dir_limit_cleanup(Dir, N, Cnt+1).
%%%
%%% Test list_dir() on a non-existing pathname.
@@ -1983,7 +1675,7 @@ list_dir_limit_cleanup(Dir, Handle, N, Cnt) ->
list_dir_error(Config) ->
Priv = proplists:get_value(priv_dir, Config),
NonExisting = filename:join(Priv, "non-existing-dir"),
- {error,enoent} = prim_file:list_dir(NonExisting),
+ {error,enoent} = ?PRIM_FILE:list_dir(NonExisting),
ok.
%%%
@@ -2022,11 +1714,13 @@ run_large_file_test(Config, Run, Name) ->
{{unix,sunos},OsVersion} when OsVersion < {5,5,1} ->
{skip,"Only supported on Win32, Unix or SunOS >= 5.5.1"};
{{unix,_},_} ->
- N = unix_free(proplists:get_value(priv_dir, Config)),
- io:format("Free disk: ~w KByte~n", [N]),
- if N < 5 bsl 20 ->
+ DiscFree = unix_free(proplists:get_value(priv_dir, Config)),
+ MemFree = free_memory(),
+ io:format("Free disk: ~w KByte~n", [DiscFree]),
+ io:format("Free mem: ~w MByte~n", [MemFree]),
+ if DiscFree < 5 bsl 20; MemFree < 5 bsl 10 ->
%% Less than 5 GByte free
- {skip,"Less than 5 GByte free disk"};
+ {skip,"Less than 5 GByte free disk/mem"};
true ->
do_run_large_file_test(Config, Run, Name)
end;
@@ -2049,7 +1743,7 @@ do_run_large_file_test(Config, Run, Name0) ->
{'DOWN',Mref,_,_,_} -> ok;
{Tester,done} -> ok
end,
- prim_file:delete(Name)
+ ?PRIM_FILE:delete(Name)
end),
%% Run the test case.
@@ -2079,6 +1773,40 @@ zip_data([], Bs) ->
zip_data(As, []) ->
As.
+%% Stolen from emulator -> alloc_SUITE
+free_memory() ->
+ %% Free memory in MB.
+ try
+ SMD = memsup:get_system_memory_data(),
+ {value, {free_memory, Free}} = lists:keysearch(free_memory, 1, SMD),
+ TotFree = (Free +
+ case lists:keysearch(cached_memory, 1, SMD) of
+ {value, {cached_memory, Cached}} -> Cached;
+ false -> 0
+ end +
+ case lists:keysearch(buffered_memory, 1, SMD) of
+ {value, {buffered_memory, Buffed}} -> Buffed;
+ false -> 0
+ end),
+ usable_mem(TotFree) div (1024*1024)
+ catch
+ error : undef ->
+ ct:fail({"os_mon not built"})
+ end.
+
+usable_mem(Memory) ->
+ case test_server:is_valgrind() of
+ true ->
+ %% Valgrind uses extra memory for the V- and A-bits.
+ %% http://valgrind.org/docs/manual/mc-manual.html#mc-manual.value
+ %% Docs says it uses "compression to represent the V bits compactly"
+ %% but let's be conservative and cut usable memory in half.
+ Memory div 2;
+ false ->
+ Memory
+ end.
+
+
%%%-----------------------------------------------------------------
%%% Utilities
rm_rf(Mod,Dir) ->
diff --git a/lib/kernel/test/sendfile_SUITE.erl b/lib/kernel/test/sendfile_SUITE.erl
index 2673c38494..ad060aa05c 100644
--- a/lib/kernel/test/sendfile_SUITE.erl
+++ b/lib/kernel/test/sendfile_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2011-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2011-2017. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -23,30 +23,41 @@
-include_lib("common_test/include/ct.hrl").
-include_lib("kernel/include/file.hrl").
--compile(export_all).
-
-all() -> [{group,async_threads},
- {group,no_async_threads}].
-
-groups() ->
- [{async_threads,[],tcs()},
- {no_async_threads,[],tcs()}].
-
-tcs() ->
- [t_sendfile_small
- ,t_sendfile_big_all
- ,t_sendfile_big_size
- ,t_sendfile_many_small
- ,t_sendfile_partial
- ,t_sendfile_offset
- ,t_sendfile_sendafter
- ,t_sendfile_recvafter
- ,t_sendfile_recvafter_remoteclose
- ,t_sendfile_sendduring
- ,t_sendfile_recvduring
- ,t_sendfile_closeduring
- ,t_sendfile_crashduring
- ].
+-export([all/0, init_per_suite/1, end_per_suite/1, init_per_testcase/2]).
+
+-export([sendfile_server/2, sendfile_do_recv/2, init/1, handle_event/2]).
+
+-export(
+ [t_sendfile_small/1,
+ t_sendfile_big_all/1,
+ t_sendfile_big_size/1,
+ t_sendfile_many_small/1,
+ t_sendfile_partial/1,
+ t_sendfile_offset/1,
+ t_sendfile_sendafter/1,
+ t_sendfile_recvafter/1,
+ t_sendfile_recvafter_remoteclose/1,
+ t_sendfile_sendduring/1,
+ t_sendfile_recvduring/1,
+ t_sendfile_closeduring/1,
+ t_sendfile_crashduring/1,
+ t_sendfile_arguments/1]).
+
+all() ->
+ [t_sendfile_small,
+ t_sendfile_big_all,
+ t_sendfile_big_size,
+ t_sendfile_many_small,
+ t_sendfile_partial,
+ t_sendfile_offset,
+ t_sendfile_sendafter,
+ t_sendfile_recvafter,
+ t_sendfile_recvafter_remoteclose,
+ t_sendfile_sendduring,
+ t_sendfile_recvduring,
+ t_sendfile_closeduring,
+ t_sendfile_crashduring,
+ t_sendfile_arguments].
init_per_suite(Config) ->
case {os:type(),os:version()} of
@@ -72,43 +83,32 @@ init_per_suite(Config) ->
end_per_suite(Config) ->
file:delete(proplists:get_value(big_file, Config)).
-init_per_group(async_threads,Config) ->
- case erlang:system_info(thread_pool_size) of
- 0 ->
- {skip,"No async threads"};
- _ ->
- [{sendfile_opts,[{use_threads,true}]}|Config]
- end;
-init_per_group(no_async_threads,Config) ->
- [{sendfile_opts,[{use_threads,false}]}|Config].
-
-end_per_group(_,_Config) ->
- ok.
-
init_per_testcase(TC,Config) when TC == t_sendfile_recvduring;
TC == t_sendfile_sendduring ->
Filename = proplists:get_value(small_file, Config),
Send = fun(Sock) ->
{_Size, Data} = sendfile_file_info(Filename),
- {ok,D} = file:open(Filename, [raw,binary,read]),
- prim_file:sendfile(D, Sock, 0, 0, 0,
- [],[],[]),
+ {ok,Fd} = file:open(Filename, [raw,binary,read]),
+ %% Determine whether the driver has native support by
+ %% hitting the raw module directly; file:sendfile/5 will
+ %% land in the fallback if it doesn't.
+ RawModule = Fd#file_descriptor.module,
+ {ok, _Ignored} = RawModule:sendfile(Fd,Sock,0,0,0,[],[],[]),
Data
end,
%% Check if sendfile is supported on this platform
case catch sendfile_send(Send) of
ok ->
- Config;
+ init_per_testcase(t_sendfile, Config);
Error ->
ct:log("Error: ~p",[Error]),
{skip,"Not supported"}
end;
-init_per_testcase(_Tc,Config) ->
+init_per_testcase(_TC,Config) ->
Config.
-
t_sendfile_small(Config) when is_list(Config) ->
Filename = proplists:get_value(small_file, Config),
@@ -124,7 +124,7 @@ t_sendfile_small(Config) when is_list(Config) ->
t_sendfile_many_small(Config) when is_list(Config) ->
Filename = proplists:get_value(small_file, Config),
FileOpts = proplists:get_value(file_opts, Config, []),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
error_logger:add_report_handler(?MODULE,[self()]),
@@ -151,7 +151,7 @@ t_sendfile_many_small(Config) when is_list(Config) ->
t_sendfile_big_all(Config) when is_list(Config) ->
Filename = proplists:get_value(big_file, Config),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
Send = fun(Sock) ->
{ok, #file_info{size = Size}} =
@@ -165,7 +165,7 @@ t_sendfile_big_all(Config) when is_list(Config) ->
t_sendfile_big_size(Config) ->
Filename = proplists:get_value(big_file, Config),
FileOpts = proplists:get_value(file_opts, Config, []),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
SendAll = fun(Sock) ->
{ok, #file_info{size = Size}} =
@@ -180,7 +180,7 @@ t_sendfile_big_size(Config) ->
t_sendfile_partial(Config) ->
Filename = proplists:get_value(small_file, Config),
FileOpts = proplists:get_value(file_opts, Config, []),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
SendSingle = fun(Sock) ->
{_Size, <<Data:5/binary,_/binary>>} =
@@ -217,7 +217,7 @@ t_sendfile_partial(Config) ->
t_sendfile_offset(Config) ->
Filename = proplists:get_value(small_file, Config),
FileOpts = proplists:get_value(file_opts, Config, []),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
Send = fun(Sock) ->
{_Size, <<_:5/binary,Data:3/binary,_/binary>> = AllData} =
@@ -233,7 +233,7 @@ t_sendfile_offset(Config) ->
t_sendfile_sendafter(Config) ->
Filename = proplists:get_value(small_file, Config),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
Send = fun(Sock) ->
{Size, Data} = sendfile_file_info(Filename),
@@ -246,7 +246,7 @@ t_sendfile_sendafter(Config) ->
t_sendfile_recvafter(Config) ->
Filename = proplists:get_value(small_file, Config),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
Send = fun(Sock) ->
{Size, Data} = sendfile_file_info(Filename),
@@ -279,7 +279,7 @@ t_sendfile_recvafter_remoteclose(Config) ->
t_sendfile_sendduring(Config) ->
Filename = proplists:get_value(big_file, Config),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
Send = fun(Sock) ->
{ok, #file_info{size = Size}} =
@@ -296,7 +296,7 @@ t_sendfile_sendduring(Config) ->
t_sendfile_recvduring(Config) ->
Filename = proplists:get_value(big_file, Config),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
Send = fun(Sock) ->
{ok, #file_info{size = Size}} =
@@ -315,7 +315,7 @@ t_sendfile_recvduring(Config) ->
t_sendfile_closeduring(Config) ->
Filename = proplists:get_value(big_file, Config),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
Send = fun(Sock,SFServPid) ->
spawn_link(fun() ->
@@ -341,11 +341,25 @@ t_sendfile_closeduring(Config) ->
-1
end,
- ok = sendfile_send({127,0,0,1}, Send, 0).
+ ok = sendfile_send({127,0,0,1}, Send, 0, [{active,false}]),
+ [] = flush(),
+ ok = sendfile_send({127,0,0,1}, Send, 0, [{active,true}]),
+ [] = flush(),
+ ok.
+
+flush() ->
+ lists:reverse(flush([])).
+
+flush(Acc) ->
+ receive M ->
+ flush([M | Acc])
+ after 0 ->
+ Acc
+ end.
t_sendfile_crashduring(Config) ->
Filename = proplists:get_value(big_file, Config),
- SendfileOpts = proplists:get_value(sendfile_opts, Config),
+ SendfileOpts = proplists:get_value(sendfile_opts, Config, []),
error_logger:add_report_handler(?MODULE,[self()]),
@@ -373,18 +387,52 @@ t_sendfile_crashduring(Config) ->
end
end.
+t_sendfile_arguments(Config) ->
+ Filename = proplists:get_value(small_file, Config),
+
+ {ok, Listener} = gen_tcp:listen(0,
+ [{packet, 0}, {active, false}, {reuseaddr, true}]),
+ {ok, Port} = inet:port(Listener),
+
+ ErrorCheck =
+ fun(Reason, Offset, Length, Opts) ->
+ {ok, Sender} = gen_tcp:connect({127, 0, 0, 1}, Port,
+ [{packet, 0}, {active, false}]),
+ {ok, Receiver} = gen_tcp:accept(Listener),
+ {ok, Fd} = file:open(Filename, [read, raw]),
+ {error, Reason} = file:sendfile(Fd, Sender, Offset, Length, Opts),
+ gen_tcp:close(Receiver),
+ gen_tcp:close(Sender),
+ file:close(Fd)
+ end,
+
+ ErrorCheck(einval, -1, 0, []),
+ ErrorCheck(einval, 0, -1, []),
+ ErrorCheck(badarg, gurka, 0, []),
+ ErrorCheck(badarg, 0, gurka, []),
+ ErrorCheck(badarg, 0, 0, gurka),
+ ErrorCheck(badarg, 0, 0, [{chunk_size, gurka}]),
+
+ gen_tcp:close(Listener),
+
+ ok.
+
%% Generic sendfile server code
sendfile_send(Send) ->
sendfile_send({127,0,0,1},Send).
sendfile_send(Host, Send) ->
sendfile_send(Host, Send, []).
sendfile_send(Host, Send, Orig) ->
+ sendfile_send(Host, Send, Orig, [{active,false}]).
+
+sendfile_send(Host, Send, Orig, SockOpts) ->
+
SFServer = spawn_link(?MODULE, sendfile_server, [self(), Orig]),
receive
{server, Port} ->
- {ok, Sock} = gen_tcp:connect(Host, Port,
- [binary,{packet,0},
- {active,false}]),
+ Opts = [binary,{packet,0}|SockOpts],
+ io:format("connect with opts = ~p\n", [Opts]),
+ {ok, Sock} = gen_tcp:connect(Host, Port, Opts),
Data = case proplists:get_value(arity,erlang:fun_info(Send)) of
1 ->
Send(Sock);
diff --git a/lib/kernel/test/seq_trace_SUITE.erl b/lib/kernel/test/seq_trace_SUITE.erl
index be23a1933f..83a94ab087 100644
--- a/lib/kernel/test/seq_trace_SUITE.erl
+++ b/lib/kernel/test/seq_trace_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -19,13 +19,18 @@
%%
-module(seq_trace_SUITE).
+%% label_capability_mismatch needs to run a part of the test on an OTP 20 node.
+-compile(r20).
+
-export([all/0, suite/0,groups/0,init_per_suite/1, end_per_suite/1,
init_per_group/2,end_per_group/2,
init_per_testcase/2,end_per_testcase/2]).
-export([token_set_get/1, tracer_set_get/1, print/1,
+ old_heap_token/1,
send/1, distributed_send/1, recv/1, distributed_recv/1,
trace_exit/1, distributed_exit/1, call/1, port/1,
- match_set_seq_token/1, gc_seq_token/1]).
+ match_set_seq_token/1, gc_seq_token/1, label_capability_mismatch/1,
+ send_literal/1]).
%% internal exports
-export([simple_tracer/2, one_time_receiver/0, one_time_receiver/1,
@@ -44,10 +49,11 @@ suite() ->
{timetrap,{minutes,1}}].
all() ->
- [token_set_get, tracer_set_get, print, send,
+ [token_set_get, tracer_set_get, print, send, send_literal,
distributed_send, recv, distributed_recv, trace_exit,
+ old_heap_token,
distributed_exit, call, port, match_set_seq_token,
- gc_seq_token].
+ gc_seq_token, label_capability_mismatch].
groups() ->
[].
@@ -90,8 +96,8 @@ do_token_set_get(TsType) ->
%% Test that initial seq_trace is disabled
[] = seq_trace:get_token(),
%% Test setting and reading the different fields
- 0 = seq_trace:set_token(label,17),
- {label,17} = seq_trace:get_token(label),
+ 0 = seq_trace:set_token(label,{my_label,1}),
+ {label,{my_label,1}} = seq_trace:get_token(label),
false = seq_trace:set_token(print,true),
{print,true} = seq_trace:get_token(print),
false = seq_trace:set_token(send,true),
@@ -101,12 +107,12 @@ do_token_set_get(TsType) ->
false = seq_trace:set_token(TsType,true),
{TsType,true} = seq_trace:get_token(TsType),
%% Check the whole token
- {Flags,17,0,Self,0} = seq_trace:get_token(), % all flags are set
+ {Flags,{my_label,1},0,Self,0} = seq_trace:get_token(), % all flags are set
%% Test setting and reading the 'serial' field
{0,0} = seq_trace:set_token(serial,{3,5}),
{serial,{3,5}} = seq_trace:get_token(serial),
%% Check the whole token, test that a whole token can be set and get
- {Flags,17,5,Self,3} = seq_trace:get_token(),
+ {Flags,{my_label,1},5,Self,3} = seq_trace:get_token(),
seq_trace:set_token({Flags,19,7,Self,5}),
{Flags,19,7,Self,5} = seq_trace:get_token(),
%% Check that receive timeout does not reset token
@@ -145,34 +151,66 @@ tracer_set_get(Config) when is_list(Config) ->
ok.
print(Config) when is_list(Config) ->
- lists:foreach(fun do_print/1, ?TIMESTAMP_MODES).
+ [do_print(TsType, Label) || TsType <- ?TIMESTAMP_MODES,
+ Label <- [17, "label"]].
-do_print(TsType) ->
+do_print(TsType, Label) ->
start_tracer(),
+ seq_trace:set_token(label, Label),
set_token_flags([print, TsType]),
- seq_trace:print(0,print1),
+ seq_trace:print(Label,print1),
seq_trace:print(1,print2),
seq_trace:print(print3),
seq_trace:reset_trace(),
- [{0,{print,_,_,[],print1}, Ts0},
- {0,{print,_,_,[],print3}, Ts1}] = stop_tracer(2),
+ [{Label,{print,_,_,[],print1}, Ts0},
+ {Label,{print,_,_,[],print3}, Ts1}] = stop_tracer(2),
check_ts(TsType, Ts0),
check_ts(TsType, Ts1).
-
+
send(Config) when is_list(Config) ->
lists:foreach(fun do_send/1, ?TIMESTAMP_MODES).
do_send(TsType) ->
+ do_send(TsType, send).
+
+do_send(TsType, Msg) ->
seq_trace:reset_trace(),
start_tracer(),
Receiver = spawn(?MODULE,one_time_receiver,[]),
+ Label = make_ref(),
+ seq_trace:set_token(label,Label),
set_token_flags([send, TsType]),
- Receiver ! send,
+ Receiver ! Msg,
Self = self(),
seq_trace:reset_trace(),
- [{0,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1),
+ [{Label,{send,_,Self,Receiver,Msg}, Ts}] = stop_tracer(1),
check_ts(TsType, Ts).
+%% This testcase tests that we do not segfault when we have a
+%% literal as the message and the message is copied onto the
+%% heap during a GC.
+send_literal(Config) when is_list(Config) ->
+ lists:foreach(fun do_send_literal/1,
+ [atom, make_ref(), ets:new(hej,[]), 1 bsl 64,
+ "gurka", {tuple,test,with,#{}}, #{}]).
+
+do_send_literal(Msg) ->
+ N = 10000,
+ seq_trace:reset_trace(),
+ start_tracer(),
+ Label = make_ref(),
+ seq_trace:set_token(label,Label),
+ set_token_flags([send, 'receive', no_timestamp]),
+ Receiver = spawn_link(fun() -> receive ok -> ok end end),
+ [Receiver ! Msg || _ <- lists:seq(1, N)],
+ erlang:garbage_collect(Receiver),
+ [Receiver ! Msg || _ <- lists:seq(1, N)],
+ erlang:garbage_collect(Receiver),
+ Self = self(),
+ seq_trace:reset_trace(),
+ [{Label,{send,_,Self,Receiver,Msg}, Ts} | _] = stop_tracer(N),
+ check_ts(no_timestamp, Ts).
+
distributed_send(Config) when is_list(Config) ->
lists:foreach(fun do_distributed_send/1, ?TIMESTAMP_MODES).
@@ -184,14 +222,19 @@ do_distributed_send(TsType) ->
seq_trace:reset_trace(),
start_tracer(),
Receiver = spawn(Node,?MODULE,one_time_receiver,[]),
+
+ %% Make sure complex labels survive the trip.
+ Label = make_ref(),
+ seq_trace:set_token(label,Label),
set_token_flags([send,TsType]),
+
Receiver ! send,
Self = self(),
seq_trace:reset_trace(),
stop_node(Node),
- [{0,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1),
+ [{Label,{send,_,Self,Receiver,send}, Ts}] = stop_tracer(1),
check_ts(TsType, Ts).
-
+
recv(Config) when is_list(Config) ->
lists:foreach(fun do_recv/1, ?TIMESTAMP_MODES).
@@ -220,7 +263,12 @@ do_distributed_recv(TsType) ->
seq_trace:reset_trace(),
rpc:call(Node,?MODULE,start_tracer,[]),
Receiver = spawn(Node,?MODULE,one_time_receiver,[]),
+
+ %% Make sure complex labels survive the trip.
+ Label = make_ref(),
+ seq_trace:set_token(label,Label),
set_token_flags(['receive',TsType]),
+
Receiver ! 'receive',
%% let the other process receive the message:
receive after 1 -> ok end,
@@ -229,7 +277,7 @@ do_distributed_recv(TsType) ->
Result = rpc:call(Node,?MODULE,stop_tracer,[1]),
stop_node(Node),
ok = io:format("~p~n",[Result]),
- [{0,{'receive',_,Self,Receiver,'receive'}, Ts}] = Result,
+ [{Label,{'receive',_,Self,Receiver,'receive'}, Ts}] = Result,
check_ts(TsType, Ts).
trace_exit(Config) when is_list(Config) ->
@@ -240,7 +288,12 @@ do_trace_exit(TsType) ->
start_tracer(),
Receiver = spawn_link(?MODULE, one_time_receiver, [exit]),
process_flag(trap_exit, true),
+
+ %% Make sure complex labels survive the trip.
+ Label = make_ref(),
+ seq_trace:set_token(label,Label),
set_token_flags([send, TsType]),
+
Receiver ! {before, exit},
%% let the other process receive the message:
receive
@@ -254,8 +307,8 @@ do_trace_exit(TsType) ->
Result = stop_tracer(2),
seq_trace:reset_trace(),
ok = io:format("~p~n", [Result]),
- [{0, {send, {0,1}, Self, Receiver, {before, exit}}, Ts0},
- {0, {send, {1,2}, Receiver, Self,
+ [{Label, {send, {0,1}, Self, Receiver, {before, exit}}, Ts0},
+ {Label, {send, {1,2}, Receiver, Self,
{'EXIT', Receiver, {exit, {before, exit}}}}, Ts1}] = Result,
check_ts(TsType, Ts0),
check_ts(TsType, Ts1).
@@ -291,6 +344,74 @@ do_distributed_exit(TsType) ->
{'EXIT', Receiver, {exit, {before, exit}}}}, Ts}] = Result,
check_ts(TsType, Ts).
+label_capability_mismatch(Config) when is_list(Config) ->
+ Releases = ["20_latest"],
+ Available = [Rel || Rel <- Releases, test_server:is_release_available(Rel)],
+ case Available of
+ [] -> {skipped, "No incompatible releases available"};
+ _ ->
+ lists:foreach(fun do_incompatible_labels/1, Available),
+ lists:foreach(fun do_compatible_labels/1, Available),
+ ok
+ end.
+
+do_incompatible_labels(Rel) ->
+ Cookie = atom_to_list(erlang:get_cookie()),
+ {ok, Node} = test_server:start_node(
+ list_to_atom(atom_to_list(?MODULE)++"_"++Rel), peer,
+ [{args, " -setcookie "++Cookie}, {erl, [{release, Rel}]}]),
+
+ {_,Dir} = code:is_loaded(?MODULE),
+ Mdir = filename:dirname(Dir),
+ true = rpc:call(Node,code,add_patha,[Mdir]),
+ seq_trace:reset_trace(),
+ true = is_pid(rpc:call(Node,?MODULE,start_tracer,[])),
+ Receiver = spawn(Node,?MODULE,one_time_receiver,[]),
+
+ %% This node does not support arbitrary labels, so it must fail with a
+ %% timeout as the token is dropped silently.
+ seq_trace:set_token(label,make_ref()),
+ seq_trace:set_token('receive',true),
+
+ Receiver ! 'receive',
+ %% let the other process receive the message:
+ receive after 10 -> ok end,
+ seq_trace:reset_trace(),
+
+ {error,timeout} = rpc:call(Node,?MODULE,stop_tracer,[1]),
+ stop_node(Node),
+ ok.
+
+do_compatible_labels(Rel) ->
+ Cookie = atom_to_list(erlang:get_cookie()),
+ {ok, Node} = test_server:start_node(
+ list_to_atom(atom_to_list(?MODULE)++"_"++Rel), peer,
+ [{args, " -setcookie "++Cookie}, {erl, [{release, Rel}]}]),
+
+ {_,Dir} = code:is_loaded(?MODULE),
+ Mdir = filename:dirname(Dir),
+ true = rpc:call(Node,code,add_patha,[Mdir]),
+ seq_trace:reset_trace(),
+ true = is_pid(rpc:call(Node,?MODULE,start_tracer,[])),
+ Receiver = spawn(Node,?MODULE,one_time_receiver,[]),
+
+ %% This node does not support arbitrary labels, but small integers should
+ %% still work.
+ Label = 1234,
+ seq_trace:set_token(label,Label),
+ seq_trace:set_token('receive',true),
+
+ Receiver ! 'receive',
+ %% let the other process receive the message:
+ receive after 10 -> ok end,
+ Self = self(),
+ seq_trace:reset_trace(),
+ Result = rpc:call(Node,?MODULE,stop_tracer,[1]),
+ stop_node(Node),
+ ok = io:format("~p~n",[Result]),
+ [{Label,{'receive',_,Self,Receiver,'receive'}, _}] = Result,
+ ok.
+
call(doc) ->
"Tests special forms {is_seq_trace} and {get_seq_token} "
"in trace match specs.";
@@ -446,6 +567,24 @@ get_port_message(Port) ->
end.
+%% OTP-15849 ERL-700
+%% Verify changing label on existing token when it resides on old heap.
+%% Bug caused faulty ref from old to new heap.
+old_heap_token(Config) when is_list(Config) ->
+ seq_trace:set_token(label, 1),
+ erlang:garbage_collect(self(), [{type, minor}]),
+ erlang:garbage_collect(self(), [{type, minor}]),
+ %% Now token tuple should be on old-heap.
+ %% Set a new non-literal label which should reside on new-heap.
+ NewLabel = {self(), "new label"},
+ 1 = seq_trace:set_token(label, NewLabel),
+
+ %% If bug, we now have a ref from old to new heap. Yet another minor gc
+ %% will make that a ref to deallocated memory.
+ erlang:garbage_collect(self(), [{type, minor}]),
+ {label,NewLabel} = seq_trace:get_token(label),
+ ok.
+
match_set_seq_token(doc) ->
["Tests that match spec function set_seq_token does not "
@@ -698,6 +837,24 @@ do_shrink(N) ->
erlang:garbage_collect(),
do_shrink(N-1).
+%% Test that messages from a port does not clear the token
+port_clean_token(Config) when is_list(Config) ->
+ seq_trace:reset_trace(),
+ Label = make_ref(),
+ seq_trace:set_token(label, Label),
+ {label,Label} = seq_trace:get_token(label),
+
+ %% Create a port and get messages from it
+ %% We use os:cmd as a convenience as it does
+ %% open_port, port_command, port_close and receives replies.
+ %% Maybe it is not ideal to rely on the internal implementation
+ %% of os:cmd but it will have to do.
+ os:cmd("ls"),
+
+ %% Make sure that the seq_trace token is still there
+ {label,Label} = seq_trace:get_token(label),
+
+ ok.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
diff --git a/lib/kernel/test/wrap_log_reader_SUITE.erl b/lib/kernel/test/wrap_log_reader_SUITE.erl
index 40a016aed0..59b088ca73 100644
--- a/lib/kernel/test/wrap_log_reader_SUITE.erl
+++ b/lib/kernel/test/wrap_log_reader_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -428,13 +428,14 @@ stop() ->
ok = wrap_log_test:stop(),
dl_wait().
-%% Give disk logs opened by 'logger' and 'wlt' time to close after
+%% Give disk logs opened by 'wlr_logger' and 'wlt' time to close after
%% receiving EXIT signals.
dl_wait() ->
case disk_log:accessible_logs() of
{[], []} ->
ok;
- _ ->
+ _X ->
+ erlang:display(_X),
timer:sleep(100),
dl_wait()
end.
@@ -507,27 +508,27 @@ add_ext(Name, Ext) ->
%% disk_log.
open(Log, File, Where) ->
- logger ! {open, self(), Log, File},
+ wlr_logger ! {open, self(), Log, File},
rec1(ok, Where).
open_ext(Log, File, Where) ->
- logger ! {open_ext, self(), Log, File},
+ wlr_logger ! {open_ext, self(), Log, File},
rec1(ok, Where).
close(Log) ->
- logger ! {close, self(), Log},
+ wlr_logger ! {close, self(), Log},
rec(ok, ?LINE).
sync(Log) ->
- logger ! {sync, self(), Log},
+ wlr_logger ! {sync, self(), Log},
rec(ok, ?LINE).
log_terms(File, Terms) ->
- logger ! {log_terms, self(), File, Terms},
+ wlr_logger ! {log_terms, self(), File, Terms},
rec(ok, ?LINE).
blog_terms(File, Terms) ->
- logger ! {blog_terms, self(), File, Terms},
+ wlr_logger ! {blog_terms, self(), File, Terms},
rec(ok, ?LINE).
rec1(M, Where) ->
diff --git a/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl b/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl
index 38449b6bb3..d2bac40192 100644
--- a/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl
+++ b/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -36,9 +36,9 @@
-endif.
init() ->
- spawn(fun() -> start(logger) end),
+ spawn(fun() -> start(wlr_logger) end),
spawn(fun() -> start2(wlt) end),
- wait_registered(logger),
+ wait_registered(wlr_logger),
wait_registered(wlt),
ok.
@@ -52,9 +52,9 @@ wait_registered(Name) ->
end.
stop() ->
- catch logger ! exit,
+ catch wlr_logger ! exit,
catch wlt ! exit,
- wait_unregistered(logger),
+ wait_unregistered(wlr_logger),
wait_unregistered(wlt),
ok.
@@ -82,47 +82,47 @@ loop() ->
{open, Pid, Name, File} ->
R = disk_log:open([{name, Name}, {type, wrap}, {file, File},
{size, {?fsize, ?fno}}]),
- ?format("logger: open ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: open ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{open_ext, Pid, Name, File} ->
R = disk_log:open([{name, Name}, {type, wrap}, {file, File},
{format, external}, {size, {?fsize, ?fno}}]),
- ?format("logger: open ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: open ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{close, Pid, Name} ->
R = disk_log:close(Name),
- ?format("logger: close ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: close ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{sync, Pid, Name} ->
R = disk_log:sync(Name),
- ?format("logger: sync ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: sync ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{log_terms, Pid, Name, Terms} ->
R = disk_log:log_terms(Name, Terms),
- ?format("logger: log_terms ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: log_terms ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{blog_terms, Pid, Name, Terms} ->
R = disk_log:blog_terms(Name, Terms),
- ?format("logger: blog_terms ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: blog_terms ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
exit ->
- ?format("Stopping logger~n", []),
+ ?format("Stopping wlr_logger~n", []),
exit(normal);
_Else ->
- ?format("logger: ignored: ~p~n", [_Else]),
+ ?format("wlr_logger: ignored: ~p~n", [_Else]),
loop()
end.
diff --git a/lib/kernel/test/zlib_SUITE.erl b/lib/kernel/test/zlib_SUITE.erl
index f8257bd20c..52ae1b3ae6 100644
--- a/lib/kernel/test/zlib_SUITE.erl
+++ b/lib/kernel/test/zlib_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 2005-2016. All Rights Reserved.
+%% Copyright Ericsson AB 2005-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -21,60 +21,56 @@
-module(zlib_SUITE).
-include_lib("common_test/include/ct.hrl").
-
--compile(export_all).
-
--define(error(Format,Args),
- put(test_server_loc,{?MODULE,?LINE}),
- error(Format,Args,?MODULE,?LINE)).
-
-%% Learn erts team how to really write tests ;-)
--define(m(ExpectedRes,Expr),
- fun() ->
- ACtual1 = (catch (Expr)),
- try case ACtual1 of
- ExpectedRes -> ACtual1
- end
- catch
- error:{case_clause,ACtuAl} ->
- ?error("Not Matching Actual result was:~n ~p ~n",
- [ACtuAl]),
- ACtuAl
- end
- end()).
-
--define(BARG, {'EXIT',{badarg,[{zlib,_,_,_}|_]}}).
--define(DATA_ERROR, {'EXIT',{data_error,[{zlib,_,_,_}|_]}}).
-
-init_per_testcase(_Func, Config) ->
- Config.
-
-end_per_testcase(_Func, _Config) ->
- ok.
-
-error(Format, Args, File, Line) ->
- io:format("~p:~p: ERROR: " ++ Format, [File,Line|Args]),
- group_leader() ! {failed, File, Line}.
-
-%% Hopefully I don't need this to get it to work with the testserver..
-%% Fail = #'REASON'{file = filename:basename(File),
-%% line = Line,
-%% desc = Args},
-%% case global:whereis_name(mnesia_test_case_sup) of
-%% undefined ->
-%% ignore;
-%% Pid ->
-%% Pid ! Fail
-%% %% global:send(mnesia_test_case_sup, Fail),
-%% end,
-%% log("<>ERROR<>~n" ++ Format, Args, File, Line).
+-include_lib("common_test/include/ct_event.hrl").
+
+-export([suite/0, all/0, groups/0]).
+
+%% API group
+-export([api_open_close/1]).
+-export([api_deflateInit/1, api_deflateSetDictionary/1, api_deflateReset/1,
+ api_deflateParams/1, api_deflate/1, api_deflateEnd/1]).
+-export([api_inflateInit/1, api_inflateReset/1, api_inflate2/1, api_inflate3/1,
+ api_inflateChunk/1, api_safeInflate/1, api_inflateEnd/1]).
+-export([api_inflateSetDictionary/1, api_inflateGetDictionary/1]).
+-export([api_crc32/1, api_adler32/1]).
+-export([api_un_compress/1, api_un_zip/1, api_g_un_zip/1]).
+
+%% Examples group
+-export([intro/1]).
+
+%% Usage group
+-export([zip_usage/1, gz_usage/1, gz_usage2/1, compress_usage/1,
+ dictionary_usage/1, large_deflate/1, crc/1, adler/1,
+ only_allow_owner/1, sub_heap_binaries/1]).
+
+%% Bench group
+-export([inflate_bench_zeroed/1, inflate_bench_rand/1,
+ deflate_bench_zeroed/1, deflate_bench_rand/1,
+ chunk_bench_zeroed/1, chunk_bench_rand/1]).
+
+%% Others
+-export([smp/1, otp_9981/1, otp_7359/1]).
+
+-define(m(Guard, Expression),
+ fun() ->
+ Actual = (catch (Expression)),
+ case Actual of
+ Guard -> Actual;
+ _Other ->
+ ct:fail("Failed to match ~p, actual result was ~p",
+ [??Guard, Actual])
+ end
+ end()).
+
+-define(EXIT(Reason), {'EXIT',{Reason,[{_,_,_,_}|_]}}).
suite() ->
[{ct_hooks,[ts_install_cth]},
{timetrap,{minutes,1}}].
all() ->
- [{group, api}, {group, examples}, {group, func}, smp,
+ [{group, api}, {group, examples}, {group, func},
+ {group, bench}, smp,
otp_9981,
otp_7359].
@@ -83,29 +79,20 @@ groups() ->
[api_open_close, api_deflateInit,
api_deflateSetDictionary, api_deflateReset,
api_deflateParams, api_deflate, api_deflateEnd,
- api_inflateInit, api_inflateSetDictionary,
- api_inflateSync, api_inflateReset, api_inflate, api_inflateChunk,
- api_inflateEnd, api_setBufsz, api_getBufsz, api_crc32,
- api_adler32, api_getQSize, api_un_compress, api_un_zip,
+ api_inflateInit, api_inflateSetDictionary, api_inflateGetDictionary,
+ api_inflateReset, api_inflate2, api_inflate3, api_inflateChunk,
+ api_safeInflate, api_inflateEnd, api_crc32,
+ api_adler32, api_un_compress, api_un_zip,
api_g_un_zip]},
{examples, [], [intro]},
{func, [],
[zip_usage, gz_usage, gz_usage2, compress_usage,
- dictionary_usage, large_deflate, crc, adler]}].
-
-init_per_suite(Config) ->
- Config.
-
-end_per_suite(_Config) ->
- ok.
-
-init_per_group(_GroupName, Config) ->
- Config.
-
-end_per_group(_GroupName, Config) ->
- Config.
-
-
+ dictionary_usage, large_deflate, crc, adler,
+ only_allow_owner, sub_heap_binaries]},
+ {bench,
+ [inflate_bench_zeroed, inflate_bench_rand,
+ deflate_bench_zeroed, deflate_bench_rand,
+ chunk_bench_zeroed, chunk_bench_rand]}].
%% Test open/0 and close/1.
api_open_close(Config) when is_list(Config) ->
@@ -113,7 +100,7 @@ api_open_close(Config) when is_list(Config) ->
Fd2 = zlib:open(),
?m(false,Fd1 == Fd2),
?m(ok,zlib:close(Fd1)),
- ?m(?BARG, zlib:close(Fd1)),
+ ?m(?EXIT(not_initialized), zlib:close(Fd1)),
?m(ok,zlib:close(Fd2)),
%% Make sure that we don't get any EXIT messages if trap_exit is enabled.
@@ -128,9 +115,11 @@ api_open_close(Config) when is_list(Config) ->
%% Test deflateInit/2 and /6.
api_deflateInit(Config) when is_list(Config) ->
Z1 = zlib:open(),
- ?m(?BARG, zlib:deflateInit(gurka, none)),
- ?m(?BARG, zlib:deflateInit(gurka, gurka)),
- ?m(?BARG, zlib:deflateInit(Z1, gurka)),
+
+ ?m(?EXIT(badarg), zlib:deflateInit(gurka, none)),
+
+ ?m(?EXIT(bad_compression_level), zlib:deflateInit(gurka, gurka)),
+ ?m(?EXIT(bad_compression_level), zlib:deflateInit(Z1, gurka)),
Levels = [none, default, best_speed, best_compression] ++ lists:seq(0,9),
lists:foreach(fun(Level) ->
Z = zlib:open(),
@@ -138,20 +127,30 @@ api_deflateInit(Config) when is_list(Config) ->
?m(ok,zlib:close(Z))
end, Levels),
%% /6
- ?m(?BARG, zlib:deflateInit(Z1,gurka,deflated,-15,8,default)),
-
- ?m(?BARG, zlib:deflateInit(Z1,default,undefined,-15,8,default)),
-
- ?m(?BARG, zlib:deflateInit(Z1,default,deflated,48,8,default)),
- ?m(?BARG, zlib:deflateInit(Z1,default,deflated,-20,8,default)),
- ?m(?BARG, zlib:deflateInit(Z1,default,deflated,-7,8,default)),
- ?m(?BARG, zlib:deflateInit(Z1,default,deflated,7,8,default)),
-
- ?m(?BARG, zlib:deflateInit(Z1,default,deflated,-15,0,default)),
- ?m(?BARG, zlib:deflateInit(Z1,default,deflated,-15,10,default)),
-
- ?m(?BARG, zlib:deflateInit(Z1,default,deflated,-15,8,0)),
- ?m(?BARG, zlib:deflateInit(Z1,default,deflated,-15,8,undefined)),
+ ?m(?EXIT(bad_compression_level),
+ zlib:deflateInit(Z1,gurka,deflated,-15,8,default)),
+
+ ?m(?EXIT(bad_compression_method),
+ zlib:deflateInit(Z1,default,undefined,-15,8,default)),
+
+ ?m(?EXIT(bad_compression_strategy),
+ zlib:deflateInit(Z1,default,deflated,-15,8,0)),
+ ?m(?EXIT(bad_compression_strategy),
+ zlib:deflateInit(Z1,default,deflated,-15,8,undefined)),
+
+ ?m(?EXIT(bad_windowbits),
+ zlib:deflateInit(Z1,default,deflated,48,8,default)),
+ ?m(?EXIT(bad_windowbits),
+ zlib:deflateInit(Z1,default,deflated,-20,8,default)),
+ ?m(?EXIT(bad_windowbits),
+ zlib:deflateInit(Z1,default,deflated,-7,8,default)),
+ ?m(?EXIT(bad_windowbits),
+ zlib:deflateInit(Z1,default,deflated,7,8,default)),
+
+ ?m(?EXIT(bad_memlevel),
+ zlib:deflateInit(Z1,default,deflated,-15,0,default)),
+ ?m(?EXIT(bad_memlevel),
+ zlib:deflateInit(Z1,default,deflated,-15,10,default)),
lists:foreach(fun(Level) ->
Z = zlib:open(),
@@ -167,7 +166,7 @@ api_deflateInit(Config) when is_list(Config) ->
?m(ok, zlib:deflateInit(Z12,default,deflated,-Wbits,8,default)),
?m(ok,zlib:close(Z11)),
?m(ok,zlib:close(Z12))
- end, lists:seq(8, 15)),
+ end, lists:seq(9, 15)),
lists:foreach(fun(MemLevel) ->
Z = zlib:open(),
@@ -183,7 +182,11 @@ api_deflateInit(Config) when is_list(Config) ->
?m(ok,zlib:close(Z))
end, Strategies),
?m(ok, zlib:deflateInit(Z1,default,deflated,-15,8,default)),
- ?m({'EXIT',_}, zlib:deflateInit(Z1,none,deflated,-15,8,default)), %% ??
+
+ %% Let it crash for any reason; we don't care about the order in which the
+ %% parameters are checked.
+ ?m(?EXIT(_), zlib:deflateInit(Z1,none,deflated,-15,8,default)),
+
?m(ok, zlib:close(Z1)).
%% Test deflateSetDictionary.
@@ -192,17 +195,17 @@ api_deflateSetDictionary(Config) when is_list(Config) ->
?m(ok, zlib:deflateInit(Z1, default)),
?m(Id when is_integer(Id), zlib:deflateSetDictionary(Z1, <<1,1,2,3,4,5,1>>)),
?m(Id when is_integer(Id), zlib:deflateSetDictionary(Z1, [1,1,2,3,4,5,1])),
- ?m(?BARG, zlib:deflateSetDictionary(Z1, gurka)),
- ?m(?BARG, zlib:deflateSetDictionary(Z1, 128)),
- ?m(_, zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, none)),
- ?m({'EXIT',{stream_error,_}},zlib:deflateSetDictionary(Z1,<<1,1,2,3,4,5,1>>)),
+ ?m(?EXIT(badarg), zlib:deflateSetDictionary(Z1, gurka)),
+ ?m(?EXIT(badarg), zlib:deflateSetDictionary(Z1, 128)),
+ ?m(L when is_list(L), zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, none)),
+ ?m(?EXIT(stream_error), zlib:deflateSetDictionary(Z1,<<1,1,2,3,4,5,1>>)),
?m(ok, zlib:close(Z1)).
%% Test deflateReset.
api_deflateReset(Config) when is_list(Config) ->
Z1 = zlib:open(),
?m(ok, zlib:deflateInit(Z1, default)),
- ?m(_, zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, none)),
+ ?m(L when is_list(L), zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, none)),
?m(ok, zlib:deflateReset(Z1)),
?m(ok, zlib:deflateReset(Z1)),
%% FIXME how do I make this go wrong??
@@ -210,12 +213,46 @@ api_deflateReset(Config) when is_list(Config) ->
%% Test deflateParams.
api_deflateParams(Config) when is_list(Config) ->
+ Levels = [none, default, best_speed, best_compression] ++ lists:seq(0, 9),
+ Strategies = [filtered, huffman_only, rle, default],
+
Z1 = zlib:open(),
?m(ok, zlib:deflateInit(Z1, default)),
- ?m(_, zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, none)),
- ?m(ok, zlib:deflateParams(Z1, best_compression, huffman_only)),
- ?m(_, zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, sync)),
- ?m(ok, zlib:close(Z1)).
+
+ ApiTest =
+ fun(Level, Strategy) ->
+ ?m(ok, zlib:deflateParams(Z1, Level, Strategy)),
+ ?m(ok, zlib:deflateReset(Z1))
+ end,
+
+ [ ApiTest(Level, Strategy) || Level <- Levels, Strategy <- Strategies ],
+
+ ?m(ok, zlib:close(Z1)),
+
+ FlushTest =
+ fun FlushTest(Size, Level, Strategy) ->
+ Z = zlib:open(),
+ ok = zlib:deflateInit(Z, default),
+ Data = gen_determ_rand_bytes(Size),
+ case zlib:deflate(Z, Data, none) of
+ [<<120, 156>>] ->
+ %% All data is present in the internal zlib state, and will
+ %% be flushed on deflateParams.
+
+ ok = zlib:deflateParams(Z, Level, Strategy),
+ Compressed = [<<120, 156>>, zlib:deflate(Z, <<>>, finish)],
+ Data = zlib:uncompress(Compressed),
+ zlib:close(Z),
+
+ FlushTest(Size + (1 bsl 10), Level, Strategy);
+ _Other ->
+ ok
+ end
+ end,
+
+ [ FlushTest(1, Level, Strategy) || Level <- Levels, Strategy <- Strategies ],
+
+ ok.
%% Test deflate.
api_deflate(Config) when is_list(Config) ->
@@ -231,11 +268,13 @@ api_deflate(Config) when is_list(Config) ->
?m(B when is_list(B), zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, full)),
?m(B when is_list(B), zlib:deflate(Z1, <<>>, finish)),
- ?m(?BARG, zlib:deflate(gurka, <<1,1,1,1,1,1,1,1,1>>, full)),
- ?m(?BARG, zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, asdj)),
- ?m(?BARG, zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, 198)),
+ ?m(?EXIT(badarg), zlib:deflate(gurka, <<1,1,1,1,1,1,1,1,1>>, full)),
+
+ ?m(?EXIT(bad_flush_mode), zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, asdj)),
+ ?m(?EXIT(bad_flush_mode), zlib:deflate(Z1, <<1,1,1,1,1,1,1,1,1>>, 198)),
+
%% Causes problems ERROR REPORT
- ?m(?BARG, zlib:deflate(Z1, [asdj,asd], none)),
+ ?m(?EXIT(badarg), zlib:deflate(Z1, [asdj,asd], none)),
?m(ok, zlib:close(Z1)).
@@ -244,11 +283,11 @@ api_deflateEnd(Config) when is_list(Config) ->
Z1 = zlib:open(),
?m(ok, zlib:deflateInit(Z1, default)),
?m(ok, zlib:deflateEnd(Z1)),
- ?m({'EXIT', {einval,_}}, zlib:deflateEnd(Z1)), %% ??
- ?m(?BARG, zlib:deflateEnd(gurka)),
+ ?m(?EXIT(not_initialized), zlib:deflateEnd(Z1)),
+ ?m(?EXIT(badarg), zlib:deflateEnd(gurka)),
?m(ok, zlib:deflateInit(Z1, default)),
?m(B when is_list(B), zlib:deflate(Z1, <<"Kilroy was here">>)),
- ?m({'EXIT', {data_error,_}}, zlib:deflateEnd(Z1)),
+ ?m(?EXIT(data_error), zlib:deflateEnd(Z1)),
?m(ok, zlib:deflateInit(Z1, default)),
?m(B when is_list(B), zlib:deflate(Z1, <<"Kilroy was here">>)),
?m(B when is_list(B), zlib:deflate(Z1, <<"Kilroy was here">>, finish)),
@@ -259,9 +298,9 @@ api_deflateEnd(Config) when is_list(Config) ->
%% Test inflateInit /1 and /2.
api_inflateInit(Config) when is_list(Config) ->
Z1 = zlib:open(),
- ?m(?BARG, zlib:inflateInit(gurka)),
+ ?m(?EXIT(badarg), zlib:inflateInit(gurka)),
?m(ok, zlib:inflateInit(Z1)),
- ?m({'EXIT',{einval,_}}, zlib:inflateInit(Z1, 15)), %% ??
+ ?m(?EXIT(already_initialized), zlib:inflateInit(Z1, 15)),
lists:foreach(fun(Wbits) ->
Z11 = zlib:open(),
?m(ok, zlib:inflateInit(Z11,Wbits)),
@@ -270,64 +309,90 @@ api_inflateInit(Config) when is_list(Config) ->
?m(ok,zlib:close(Z11)),
?m(ok,zlib:close(Z12))
end, lists:seq(8,15)),
- ?m(?BARG, zlib:inflateInit(gurka, -15)),
- ?m(?BARG, zlib:inflateInit(Z1, 7)),
- ?m(?BARG, zlib:inflateInit(Z1, -7)),
- ?m(?BARG, zlib:inflateInit(Z1, 48)),
- ?m(?BARG, zlib:inflateInit(Z1, -16)),
+ ?m(?EXIT(badarg), zlib:inflateInit(gurka, -15)),
+ ?m(?EXIT(bad_windowbits), zlib:inflateInit(Z1, 7)),
+ ?m(?EXIT(bad_windowbits), zlib:inflateInit(Z1, -7)),
+ ?m(?EXIT(bad_windowbits), zlib:inflateInit(Z1, 48)),
+ ?m(?EXIT(bad_windowbits), zlib:inflateInit(Z1, -16)),
?m(ok, zlib:close(Z1)).
%% Test inflateSetDictionary.
api_inflateSetDictionary(Config) when is_list(Config) ->
Z1 = zlib:open(),
?m(ok, zlib:inflateInit(Z1)),
- ?m(?BARG, zlib:inflateSetDictionary(gurka,<<1,1,1,1,1>>)),
- ?m(?BARG, zlib:inflateSetDictionary(Z1,102)),
- ?m(?BARG, zlib:inflateSetDictionary(Z1,gurka)),
+ ?m(?EXIT(badarg), zlib:inflateSetDictionary(gurka,<<1,1,1,1,1>>)),
+ ?m(?EXIT(badarg), zlib:inflateSetDictionary(Z1,102)),
+ ?m(?EXIT(badarg), zlib:inflateSetDictionary(Z1,gurka)),
Dict = <<1,1,1,1,1>>,
- ?m({'EXIT',{stream_error,_}}, zlib:inflateSetDictionary(Z1,Dict)),
+ ?m(?EXIT(stream_error), zlib:inflateSetDictionary(Z1,Dict)),
?m(ok, zlib:close(Z1)).
-%% Test inflateSync.
-api_inflateSync(Config) when is_list(Config) ->
- {skip,"inflateSync/1 sucks"}.
-%% Z1 = zlib:open(),
-%% ?m(ok, zlib:deflateInit(Z1)),
-%% B1list0 = zlib:deflate(Z1, "gurkan gurra ger galna tunnor", full),
-%% B2 = zlib:deflate(Z1, "grodan boll", finish),
-%% io:format("~p\n", [B1list0]),
-%% io:format("~p\n", [B2]),
-%% ?m(ok, zlib:deflateEnd(Z1)),
-%% B1 = clobber(14, list_to_binary(B1list0)),
-%% Compressed = list_to_binary([B1,B2]),
-%% io:format("~p\n", [Compressed]),
-
-%% ?m(ok, zlib:inflateInit(Z1)),
-%% ?m(?BARG, zlib:inflateSync(gurka)),
-%% ?m({'EXIT',{data_error,_}}, zlib:inflate(Z1, Compressed)),
-%% ?m(ok, zlib:inflateSync(Z1)),
-%% Ubs = zlib:inflate(Z1, []),
-%% <<"grodan boll">> = list_to_binary(Ubs),
-%% ?m(ok, zlib:close(Z1)).
-
-clobber(N, Bin) when is_binary(Bin) ->
- T = list_to_tuple(binary_to_list(Bin)),
- Byte = case element(N, T) of
- 255 -> 254;
- B -> B+1
- end,
- list_to_binary(tuple_to_list(setelement(N, T, Byte))).
+%% Test inflateGetDictionary.
+api_inflateGetDictionary(Config) when is_list(Config) ->
+ Z1 = zlib:open(),
+ zlib:inflateInit(Z1),
+ IsOperationSupported =
+ case catch zlib:inflateGetDictionary(Z1) of
+ ?EXIT(not_supported) -> false;
+ _ -> true
+ end,
+ zlib:close(Z1),
+ api_inflateGetDictionary_if_supported(IsOperationSupported).
+
+api_inflateGetDictionary_if_supported(false) ->
+ {skip, "inflateGetDictionary/1 unsupported in current setup"};
+api_inflateGetDictionary_if_supported(true) ->
+ % Compress payload using custom dictionary
+ Z1 = zlib:open(),
+ ?m(ok, zlib:deflateInit(Z1)),
+ Dict = <<"foobar barfoo foo bar far boo">>,
+ Checksum = zlib:deflateSetDictionary(Z1, Dict),
+ Payload = <<"foobarbarbar">>,
+ Compressed = zlib:deflate(Z1, Payload, finish),
+ ?m(ok, zlib:close(Z1)),
+
+ % Decompress and test dictionary extraction with inflate/2
+ Z2 = zlib:open(),
+ ?m(ok, zlib:inflateInit(Z2)),
+ ?m(<<>>, iolist_to_binary(zlib:inflateGetDictionary(Z2))),
+ ?m(?EXIT(stream_error), zlib:inflateSetDictionary(Z2, Dict)),
+ ?m(?EXIT({need_dictionary,Checksum}), zlib:inflate(Z2, Compressed)),
+ ?m(ok, zlib:inflateSetDictionary(Z2, Dict)),
+ ?m(Dict, iolist_to_binary(zlib:inflateGetDictionary(Z2))),
+ Payload = iolist_to_binary(zlib:inflate(Z2, [])),
+ ?m(ok, zlib:close(Z2)),
+ ?m(?EXIT(not_initialized), zlib:inflateSetDictionary(Z2, Dict)),
+
+ %% ... And do the same for inflate/3
+ Z3 = zlib:open(),
+ ?m(ok, zlib:inflateInit(Z3)),
+ ?m(<<>>, iolist_to_binary(zlib:inflateGetDictionary(Z3))),
+ ?m(?EXIT(stream_error), zlib:inflateSetDictionary(Z3, Dict)),
+
+ {need_dictionary, Checksum, _Output = []} =
+ zlib:inflate(Z3, Compressed, [{exception_on_need_dict, false}]),
+
+ ?m(ok, zlib:inflateSetDictionary(Z3, Dict)),
+ ?m(Dict, iolist_to_binary(zlib:inflateGetDictionary(Z3))),
+
+ Payload = iolist_to_binary(
+ zlib:inflate(Z3, [], [{exception_on_need_dict, false}])),
+
+ ?m(ok, zlib:close(Z3)),
+ ?m(?EXIT(not_initialized), zlib:inflateSetDictionary(Z3, Dict)),
+
+ ok.
%% Test inflateReset.
api_inflateReset(Config) when is_list(Config) ->
Z1 = zlib:open(),
?m(ok, zlib:inflateInit(Z1)),
- ?m(?BARG, zlib:inflateReset(gurka)),
+ ?m(?EXIT(badarg), zlib:inflateReset(gurka)),
?m(ok, zlib:inflateReset(Z1)),
?m(ok, zlib:close(Z1)).
-%% Test inflate.
-api_inflate(Config) when is_list(Config) ->
+%% Test inflate/2
+api_inflate2(Config) when is_list(Config) ->
Data = [<<1,2,2,3,3,3,4,4,4,4>>],
Compressed = zlib:compress(Data),
Z1 = zlib:open(),
@@ -337,12 +402,32 @@ api_inflate(Config) when is_list(Config) ->
?m(ok, zlib:inflateEnd(Z1)),
?m(ok, zlib:inflateInit(Z1)),
?m(Data, zlib:inflate(Z1, Compressed)),
- ?m(?BARG, zlib:inflate(gurka, Compressed)),
- ?m(?BARG, zlib:inflate(Z1, 4384)),
- ?m(?BARG, zlib:inflate(Z1, [atom_list])),
+ ?m(?EXIT(badarg), zlib:inflate(gurka, Compressed)),
+ ?m(?EXIT(badarg), zlib:inflate(Z1, 4384)),
+ ?m(?EXIT(badarg), zlib:inflate(Z1, [atom_list])),
?m(ok, zlib:inflateEnd(Z1)),
?m(ok, zlib:inflateInit(Z1)),
- ?m({'EXIT',{data_error,_}}, zlib:inflate(Z1, <<2,1,2,1,2>>)),
+ ?m(?EXIT(data_error), zlib:inflate(Z1, <<2,1,2,1,2>>)),
+ ?m(ok, zlib:close(Z1)).
+
+%% Test inflate/3; same as inflate/2 but with the default options inverted.
+api_inflate3(Config) when is_list(Config) ->
+ Data = [<<1,2,2,3,3,3,4,4,4,4>>],
+ Options = [{exception_on_need_dict, false}],
+ Compressed = zlib:compress(Data),
+ Z1 = zlib:open(),
+ ?m(ok, zlib:inflateInit(Z1)),
+ ?m([], zlib:inflate(Z1, <<>>, Options)),
+ ?m(Data, zlib:inflate(Z1, Compressed)),
+ ?m(ok, zlib:inflateEnd(Z1)),
+ ?m(ok, zlib:inflateInit(Z1)),
+ ?m(Data, zlib:inflate(Z1, Compressed, Options)),
+ ?m(?EXIT(badarg), zlib:inflate(gurka, Compressed, Options)),
+ ?m(?EXIT(badarg), zlib:inflate(Z1, 4384, Options)),
+ ?m(?EXIT(badarg), zlib:inflate(Z1, [atom_list], Options)),
+ ?m(ok, zlib:inflateEnd(Z1)),
+ ?m(ok, zlib:inflateInit(Z1)),
+ ?m(?EXIT(data_error), zlib:inflate(Z1, <<2,1,2,1,2>>, Options)),
?m(ok, zlib:close(Z1)).
%% Test inflateChunk.
@@ -352,69 +437,109 @@ api_inflateChunk(Config) when is_list(Config) ->
Part1 = binary:part(Data, 0, ChunkSize),
Part2 = binary:part(Data, ChunkSize, ChunkSize),
Part3 = binary:part(Data, ChunkSize * 2, ChunkSize),
+
Compressed = zlib:compress(Data),
Z1 = zlib:open(),
+
zlib:setBufSize(Z1, ChunkSize),
+
?m(ok, zlib:inflateInit(Z1)),
- ?m([], zlib:inflateChunk(Z1, <<>>)),
- ?m({more, Part1}, zlib:inflateChunk(Z1, Compressed)),
- ?m({more, Part2}, zlib:inflateChunk(Z1)),
- ?m(Part3, zlib:inflateChunk(Z1)),
- ?m(ok, zlib:inflateEnd(Z1)),
+ 0 = iolist_size(zlib:inflateChunk(Z1, <<>>)),
+
+ {more, Part1AsIOList} = zlib:inflateChunk(Z1, Compressed),
+ {more, Part2AsIOList} = zlib:inflateChunk(Z1),
+ {more, Part3AsIOList} = zlib:inflateChunk(Z1),
+
+ [] = zlib:inflateChunk(Z1),
+ [] = zlib:inflateChunk(Z1),
+ [] = zlib:inflateChunk(Z1),
+
+ ?m(Part1, iolist_to_binary(Part1AsIOList)),
+ ?m(Part2, iolist_to_binary(Part2AsIOList)),
+ ?m(Part3, iolist_to_binary(Part3AsIOList)),
+
+ ?m(ok, zlib:inflateEnd(Z1)),
?m(ok, zlib:inflateInit(Z1)),
- ?m({more, Part1}, zlib:inflateChunk(Z1, Compressed)),
+
+ ?m({more, Part1AsIOList}, zlib:inflateChunk(Z1, Compressed)),
?m(ok, zlib:inflateReset(Z1)),
- zlib:setBufSize(Z1, size(Data)),
- ?m(Data, zlib:inflateChunk(Z1, Compressed)),
- ?m(ok, zlib:inflateEnd(Z1)),
+ zlib:setBufSize(Z1, byte_size(Data) + 1),
+ DataAsIOList = zlib:inflateChunk(Z1, Compressed),
+ ?m(Data, iolist_to_binary(DataAsIOList)),
+
+ ?m(ok, zlib:inflateEnd(Z1)),
?m(ok, zlib:inflateInit(Z1)),
- ?m(?BARG, zlib:inflateChunk(gurka, Compressed)),
- ?m(?BARG, zlib:inflateChunk(Z1, 4384)),
- ?m({'EXIT',{data_error,_}}, zlib:inflateEnd(Z1)),
+
+ ?m(?EXIT(badarg), zlib:inflateChunk(gurka, Compressed)),
+ ?m(?EXIT(badarg), zlib:inflateChunk(Z1, 4384)),
+
+ ?m(?EXIT(data_error), zlib:inflateEnd(Z1)),
+
?m(ok, zlib:close(Z1)).
-%% Test inflateEnd.
-api_inflateEnd(Config) when is_list(Config) ->
+%% Test safeInflate as a mirror of inflateChunk, but ignore the stuff about
+%% exact chunk sizes.
+api_safeInflate(Config) when is_list(Config) ->
+ Data = << <<(I rem 150)>> || I <- lists:seq(1, 20 bsl 10) >>,
+ Compressed = zlib:compress(Data),
Z1 = zlib:open(),
- ?m({'EXIT',{einval,_}}, zlib:inflateEnd(Z1)),
- ?m(ok, zlib:inflateInit(Z1)),
- ?m(?BARG, zlib:inflateEnd(gurka)),
- ?m({'EXIT',{data_error,_}}, zlib:inflateEnd(Z1)),
- ?m({'EXIT',{einval,_}}, zlib:inflateEnd(Z1)),
+
?m(ok, zlib:inflateInit(Z1)),
- ?m(B when is_list(B), zlib:inflate(Z1, zlib:compress("abc"))),
+
+ SafeInflateLoop =
+ fun
+ Loop({continue, Chunk}, Output) ->
+ Loop(zlib:safeInflate(Z1, []), [Output, Chunk]);
+ Loop({finished, Chunk}, Output) ->
+ [Output, Chunk]
+ end,
+
+ Decompressed = SafeInflateLoop(zlib:safeInflate(Z1, Compressed), []),
+ Data = iolist_to_binary(Decompressed),
+
?m(ok, zlib:inflateEnd(Z1)),
- ?m(ok, zlib:close(Z1)).
+ ?m(ok, zlib:inflateInit(Z1)),
-%% Test getBufsz.
-api_getBufsz(Config) when is_list(Config) ->
- Z1 = zlib:open(),
- ?m(Val when is_integer(Val), zlib:getBufSize(Z1)),
- ?m(?BARG, zlib:getBufSize(gurka)),
- ?m(ok, zlib:close(Z1)).
+ {continue, Partial} = zlib:safeInflate(Z1, Compressed),
+ PBin = iolist_to_binary(Partial),
+ PSize = byte_size(PBin),
+ <<PBin:PSize/binary, Rest/binary>> = Data,
-%% Test setBufsz.
-api_setBufsz(Config) when is_list(Config) ->
- Z1 = zlib:open(),
- ?m(?BARG, zlib:setBufSize(Z1, gurka)),
- ?m(?BARG, zlib:setBufSize(gurka, 1232330)),
- Sz = ?m( Val when is_integer(Val), zlib:getBufSize(Z1)),
- ?m(ok, zlib:setBufSize(Z1, Sz*2)),
- DSz = Sz*2,
- ?m(DSz, zlib:getBufSize(Z1)),
+ ?m(ok, zlib:inflateReset(Z1)),
+
+ {continue, Partial} = zlib:safeInflate(Z1, Compressed),
+ PBin = iolist_to_binary(Partial),
+ PSize = byte_size(PBin),
+ <<PBin:PSize/binary, Rest/binary>> = Data,
+
+ ?m(ok, zlib:inflateReset(Z1)),
+
+ SafeInflateLoop(zlib:safeInflate(Z1, Compressed), []),
+
+ ?m({finished, []}, zlib:safeInflate(Z1, Compressed)),
+ ?m({finished, []}, zlib:safeInflate(Z1, Compressed)),
+
+ ?m(ok, zlib:inflateReset(Z1)),
+ ?m(?EXIT(badarg), zlib:safeInflate(gurka, Compressed)),
+ ?m(?EXIT(badarg), zlib:safeInflate(Z1, 4384)),
+ ?m(?EXIT(data_error), zlib:inflateEnd(Z1)),
?m(ok, zlib:close(Z1)).
-%%% Debug function ??
-%% Test getQSize.
-api_getQSize(Config) when is_list(Config) ->
+%% Test inflateEnd.
+api_inflateEnd(Config) when is_list(Config) ->
Z1 = zlib:open(),
- Q = ?m(Val when is_integer(Val), zlib:getQSize(Z1)),
- io:format("QSize ~p ~n", [Q]),
- ?m(?BARG, zlib:getQSize(gurka)),
+ ?m(?EXIT(not_initialized), zlib:inflateEnd(Z1)),
+ ?m(ok, zlib:inflateInit(Z1)),
+ ?m(?EXIT(badarg), zlib:inflateEnd(gurka)),
+ ?m(?EXIT(data_error), zlib:inflateEnd(Z1)),
+ ?m(?EXIT(not_initialized), zlib:inflateEnd(Z1)),
+ ?m(ok, zlib:inflateInit(Z1)),
+ ?m(B when is_list(B), zlib:inflate(Z1, zlib:compress("abc"))),
+ ?m(ok, zlib:inflateEnd(Z1)),
?m(ok, zlib:close(Z1)).
%% Test crc32.
@@ -422,8 +547,8 @@ api_crc32(Config) when is_list(Config) ->
Z1 = zlib:open(),
?m(ok, zlib:deflateInit(Z1,best_speed,deflated,-15,8,default)),
Bin = <<1,1,1,1,1,1,1,1,1>>,
- Compressed1 = ?m(_, zlib:deflate(Z1, Bin, none)),
- Compressed2 = ?m(_, zlib:deflate(Z1, <<>>, finish)),
+ Compressed1 = ?m(L when is_list(L), zlib:deflate(Z1, Bin, none)),
+ Compressed2 = ?m(L when is_list(L), zlib:deflate(Z1, <<>>, finish)),
Compressed = list_to_binary(Compressed1 ++ Compressed2),
CRC1 = ?m( CRC1 when is_integer(CRC1), zlib:crc32(Z1)),
?m(CRC1 when is_integer(CRC1), zlib:crc32(Z1,Bin)),
@@ -431,15 +556,15 @@ api_crc32(Config) when is_list(Config) ->
?m(CRC2 when is_integer(CRC2), zlib:crc32(Z1,Compressed)),
CRC2 = ?m(CRC2 when is_integer(CRC2), zlib:crc32(Z1,0,Compressed)),
?m(CRC3 when CRC2 /= CRC3, zlib:crc32(Z1,234,Compressed)),
- ?m(?BARG, zlib:crc32(gurka)),
- ?m(?BARG, zlib:crc32(Z1, not_a_binary)),
- ?m(?BARG, zlib:crc32(gurka, <<1,1,2,4,4>>)),
- ?m(?BARG, zlib:crc32(Z1, 2298929, not_a_binary)),
- ?m(?BARG, zlib:crc32(Z1, not_an_int, <<123,123,123,35,231>>)),
- ?m(?BARG, zlib:crc32_combine(Z1, not_an_int, 123123, 123)),
- ?m(?BARG, zlib:crc32_combine(Z1, noint, 123123, 123)),
- ?m(?BARG, zlib:crc32_combine(Z1, 123123, noint, 123)),
- ?m(?BARG, zlib:crc32_combine(Z1, 123123, 123, noint)),
+ ?m(?EXIT(badarg), zlib:crc32(gurka)),
+ ?m(?EXIT(badarg), zlib:crc32(Z1, not_a_binary)),
+ ?m(?EXIT(badarg), zlib:crc32(gurka, <<1,1,2,4,4>>)),
+ ?m(?EXIT(badarg), zlib:crc32(Z1, 2298929, not_a_binary)),
+ ?m(?EXIT(badarg), zlib:crc32(Z1, not_an_int, <<123,123,123,35,231>>)),
+ ?m(?EXIT(badarg), zlib:crc32_combine(Z1, not_an_int, 123123, 123)),
+ ?m(?EXIT(badarg), zlib:crc32_combine(Z1, noint, 123123, 123)),
+ ?m(?EXIT(badarg), zlib:crc32_combine(Z1, 123123, noint, 123)),
+ ?m(?EXIT(badarg), zlib:crc32_combine(Z1, 123123, 123, noint)),
?m(ok, zlib:deflateEnd(Z1)),
?m(ok, zlib:close(Z1)).
@@ -448,74 +573,129 @@ api_adler32(Config) when is_list(Config) ->
Z1 = zlib:open(),
?m(ok, zlib:deflateInit(Z1,best_speed,deflated,-15,8,default)),
Bin = <<1,1,1,1,1,1,1,1,1>>,
- Compressed1 = ?m(_, zlib:deflate(Z1, Bin, none)),
- Compressed2 = ?m(_, zlib:deflate(Z1, <<>>, finish)),
+ Compressed1 = ?m(L when is_list(L), zlib:deflate(Z1, Bin, none)),
+ Compressed2 = ?m(L when is_list(L), zlib:deflate(Z1, <<>>, finish)),
Compressed = list_to_binary(Compressed1 ++ Compressed2),
?m(ADLER1 when is_integer(ADLER1), zlib:adler32(Z1,Bin)),
?m(ADLER1 when is_integer(ADLER1), zlib:adler32(Z1,binary_to_list(Bin))),
ADLER2 = ?m(ADLER2 when is_integer(ADLER2), zlib:adler32(Z1,Compressed)),
?m(ADLER2 when is_integer(ADLER2), zlib:adler32(Z1,1,Compressed)),
?m(ADLER3 when ADLER2 /= ADLER3, zlib:adler32(Z1,234,Compressed)),
- ?m(?BARG, zlib:adler32(Z1, not_a_binary)),
- ?m(?BARG, zlib:adler32(gurka, <<1,1,2,4,4>>)),
- ?m(?BARG, zlib:adler32(Z1, 2298929, not_a_binary)),
- ?m(?BARG, zlib:adler32(Z1, not_an_int, <<123,123,123,35,231>>)),
- ?m(?BARG, zlib:adler32_combine(Z1, noint, 123123, 123)),
- ?m(?BARG, zlib:adler32_combine(Z1, 123123, noint, 123)),
- ?m(?BARG, zlib:adler32_combine(Z1, 123123, 123, noint)),
+ ?m(?EXIT(badarg), zlib:adler32(Z1, not_a_binary)),
+ ?m(?EXIT(badarg), zlib:adler32(gurka, <<1,1,2,4,4>>)),
+ ?m(?EXIT(badarg), zlib:adler32(Z1, 2298929, not_a_binary)),
+ ?m(?EXIT(badarg), zlib:adler32(Z1, not_an_int, <<123,123,123,35,231>>)),
+ ?m(?EXIT(badarg), zlib:adler32_combine(Z1, noint, 123123, 123)),
+ ?m(?EXIT(badarg), zlib:adler32_combine(Z1, 123123, noint, 123)),
+ ?m(?EXIT(badarg), zlib:adler32_combine(Z1, 123123, 123, noint)),
?m(ok, zlib:deflateEnd(Z1)),
?m(ok, zlib:close(Z1)).
%% Test compress.
api_un_compress(Config) when is_list(Config) ->
- ?m(?BARG,zlib:compress(not_a_binary)),
+ ?m(?EXIT(badarg),zlib:compress(not_a_binary)),
Bin = <<1,11,1,23,45>>,
Comp = zlib:compress(Bin),
- ?m(?BARG,zlib:uncompress(not_a_binary)),
- ?m({'EXIT',{data_error,_}}, zlib:uncompress(<<171,171,171,171,171>>)),
- ?m({'EXIT',{data_error,_}}, zlib:uncompress(<<>>)),
- ?m({'EXIT',{data_error,_}}, zlib:uncompress(<<120>>)),
- ?m({'EXIT',{data_error,_}}, zlib:uncompress(<<120,156>>)),
- ?m({'EXIT',{data_error,_}}, zlib:uncompress(<<120,156,3>>)),
- ?m({'EXIT',{data_error,_}}, zlib:uncompress(<<120,156,3,0>>)),
- ?m({'EXIT',{data_error,_}}, zlib:uncompress(<<0,156,3,0,0,0,0,1>>)),
+ ?m(?EXIT(badarg),zlib:uncompress(not_a_binary)),
+ ?m(?EXIT(data_error), zlib:uncompress(<<171,171,171,171,171>>)),
+ ?m(?EXIT(data_error), zlib:uncompress(<<>>)),
+ ?m(?EXIT(data_error), zlib:uncompress(<<120>>)),
+ ?m(?EXIT(data_error), zlib:uncompress(<<120,156>>)),
+ ?m(?EXIT(data_error), zlib:uncompress(<<120,156,3>>)),
+ ?m(?EXIT(data_error), zlib:uncompress(<<120,156,3,0>>)),
+ ?m(?EXIT(data_error), zlib:uncompress(<<0,156,3,0,0,0,0,1>>)),
?m(Bin, zlib:uncompress(binary_to_list(Comp))),
?m(Bin, zlib:uncompress(Comp)).
%% Test zip.
api_un_zip(Config) when is_list(Config) ->
- ?m(?BARG,zlib:zip(not_a_binary)),
+ ?m(?EXIT(badarg),zlib:zip(not_a_binary)),
Bin = <<1,11,1,23,45>>,
Comp = zlib:zip(Bin),
?m(Comp, zlib:zip(binary_to_list(Bin))),
- ?m(?BARG,zlib:unzip(not_a_binary)),
- ?m({'EXIT',{data_error,_}}, zlib:unzip(<<171,171,171,171,171>>)),
- ?m({'EXIT',{data_error,_}}, zlib:unzip(<<>>)),
+ ?m(?EXIT(badarg),zlib:unzip(not_a_binary)),
+ ?m(?EXIT(data_error), zlib:unzip(<<171,171,171,171,171>>)),
+ ?m(?EXIT(data_error), zlib:unzip(<<>>)),
?m(Bin, zlib:unzip(Comp)),
?m(Bin, zlib:unzip(binary_to_list(Comp))),
%% OTP-6396
- B = <<131,104,19,100,0,13,99,95,99,105,100,95,99,115,103,115,110,95,50,97,1,107,0,4,208,161,246,29,107,0,3,237,166,224,107,0,6,66,240,153,0,2,10,1,0,8,97,116,116,97,99,104,101,100,104,2,100,0,22,117,112,100,97,116,101,95,112,100,112,95,99,111,110,116,101,120,116,95,114,101,113,107,0,114,69,3,12,1,11,97,31,113,150,64,104,132,61,64,104,12,3,197,31,113,150,64,104,132,61,64,104,12,1,11,97,31,115,150,64,104,116,73,64,104,0,0,0,0,0,0,65,149,16,61,65,149,16,61,1,241,33,4,5,0,33,4,4,10,6,10,181,4,10,6,10,181,38,15,99,111,109,109,97,110,100,1,114,45,97,112,110,45,49,3,99,111,109,5,109,110,99,57,57,6,109,99,99,50,52,48,4,103,112,114,115,8,0,104,2,104,2,100,0,8,97,99,116,105,118,97,116,101,104,23,100,0,11,112,100,112,95,99,111,110,116,1,120,116,100,0,7,112,114,105,109,97,114,121,97,1,100,0,9,117,110,100,101,102,105,110,101,100,97,1,97,4,97,4,97,7,100,0,9,117,110,100,101,102,105,110,101,100,100,0,9,117,110,100,101,102,105,110,10100,100,0,9,117,110,100,101,102,105,110,101,100,100,0,5,102,97,108,115,101,100,0,9,117,110,100,101,102,105,110,101,100,100,0,9,117,110,100,101,102,105,110,101,100,100,0,9,117,110,100,101,102,105,1,101,100,97,0,100,0,9,117,110,100,101,102,105,110,101,100,107,0,4,16,0,1,144,107,0,4,61,139,186,181,107,0,4,10,8,201,49,100,0,9,117,110,100,101,102,105,110,101,100,100,0,9,117,110,100,101,102,105,0,101,100,100,0,9,117,110,100,101,102,105,110,101,100,104,2,104,3,98,0,0,7,214,97,11,97,20,104,3,97,17,97,16,97,21,106,108,0,0,0,3,104,2,97,1,104,2,104,3,98,0,0,7,214,97,11,97,20,104,3,97,17,97,167,20,104,2,97,4,104,2,104,3,98,0,0,7,214,97,11,97,20,104,3,97,17,97,16,97,21,104,2,97,10,104,2,104,3,98,0,0,7,214,97,11,97,20,104,3,97,17,97,16,97,26,106,100,0,5,118,101,114,57,57,100,0,9,117,110,0,101,102,105,110,101,100,107,0,2,0,244,107,0,4,10,6,102,195,107,0,4,10,6,102,195,100,0,9,117,110,100,101,102,105,110,101,100,100,0,9,117,110,100,101,102,105,110,101,100,107,0,125,248,143,0,203,25115,157,116,65,185,65,172,55,87,164,88,225,50,203,251,115,157,116,65,185,65,172,55,87,164,88,225,50,0,0,82,153,50,0,200,98,87,148,237,193,185,65,149,167,69,144,14,16,153,50,3,81,70,94,13,109,193,1,120,5,181,113,198,118,50,3,81,70,94,13,109,193,185,120,5,181,113,198,118,153,3,81,70,94,13,109,193,185,120,5,181,113,198,118,153,50,16,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,113,92,2,119,128,0,0,108,0,0,1,107,0,114,69,3,12,1,11,97,31,113,150,64,104,132,61,64,104,12,3,11,97,31,113,150,64,104,132,61,64,104,12,1,11,97,31,115,150,64,104,116,73,64,104,0,0,0,0,0,0,65,149,16,61,65,149,16,61,1,241,33,4,0,33,4,4,10,6,10,181,4,10,6,10,181,38,15,99,111,109,109,97,110,100,101,114,45,97,112,110,45,49,3,99,111,109,5,109,110,99,57,57,6,109,99,99,50,52,48,4,103,112,114,115,8,0,106>>,
+ B =
+ <<131,104,19,100,0,13,99,95,99,105,100,95,99,115,103,115,110,95,50,97,
+ 1,107,0,4,208,161,246,29,107,0,3,237,166,224,107,0,6,66,240,153,0,2,
+ 10,1,0,8,97,116,116,97,99,104,101,100,104,2,100,0,22,117,112,100,97,
+ 116,101,95,112,100,112,95,99,111,110,116,101,120,116,95,114,101,113,
+ 107,0,114,69,3,12,1,11,97,31,113,150,64,104,132,61,64,104,12,3,197,
+ 31,113,150,64,104,132,61,64,104,12,1,11,97,31,115,150,64,104,116,73,
+ 64,104,0,0,0,0,0,0,65,149,16,61,65,149,16,61,1,241,33,4,5,0,33,4,4,10
+ ,6,10,181,4,10,6,10,181,38,15,99,111,109,109,97,110,100,1,114,45,97,
+ 112,110,45,49,3,99,111,109,5,109,110,99,57,57,6,109,99,99,50,52,48,4,
+ 103,112,114,115,8,0,104,2,104,2,100,0,8,97,99,116,105,118,97,116,101,
+ 104,23,100,0,11,112,100,112,95,99,111,110,116,1,120,116,100,0,7,112,
+ 114,105,109,97,114,121,97,1,100,0,9,117,110,100,101,102,105,110,101,
+ 100,97,1,97,4,97,4,97,7,100,0,9,117,110,100,101,102,105,110,101,100,
+ 100,0,9,117,110,100,101,102,105,110,10100,100,0,9,117,110,100,101,
+ 102,105,110,101,100,100,0,5,102,97,108,115,101,100,0,9,117,110,100,
+ 101,102,105,110,101,100,100,0,9,117,110,100,101,102,105,110,101,100,
+ 100,0,9,117,110,100,101,102,105,1,101,100,97,0,100,0,9,117,110,100,
+ 101,102,105,110,101,100,107,0,4,16,0,1,144,107,0,4,61,139,186,181,
+ 107,0,4,10,8,201,49,100,0,9,117,110,100,101,102,105,110,101,100,100,
+ 0,9,117,110,100,101,102,105,0,101,100,100,0,9,117,110,100,101,102,
+ 105,110,101,100,104,2,104,3,98,0,0,7,214,97,11,97,20,104,3,97,17,97,
+ 16,97,21,106,108,0,0,0,3,104,2,97,1,104,2,104,3,98,0,0,7,214,97,11,
+ 97,20,104,3,97,17,97,167,20,104,2,97,4,104,2,104,3,98,0,0,7,214,97,
+ 11,97,20,104,3,97,17,97,16,97,21,104,2,97,10,104,2,104,3,98,0,0,7,
+ 214,97,11,97,20,104,3,97,17,97,16,97,26,106,100,0,5,118,101,114,57,
+ 57,100,0,9,117,110,0,101,102,105,110,101,100,107,0,2,0,244,107,0,4,
+ 10,6,102,195,107,0,4,10,6,102,195,100,0,9,117,110,100,101,102,105,
+ 110,101,100,100,0,9,117,110,100,101,102,105,110,101,100,107,0,125,
+ 248,143,0,203,25115,157,116,65,185,65,172,55,87,164,88,225,50,203,
+ 251,115,157,116,65,185,65,172,55,87,164,88,225,50,0,0,82,153,50,0,
+ 200,98,87,148,237,193,185,65,149,167,69,144,14,16,153,50,3,81,70,94,
+ 13,109,193,1,120,5,181,113,198,118,50,3,81,70,94,13,109,193,185,120,
+ 5,181,113,198,118,153,3,81,70,94,13,109,193,185,120,5,181,113,198,
+ 118,153,50,16,1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,113,92,2,119,128,0,0,
+ 108,0,0,1,107,0,114,69,3,12,1,11,97,31,113,150,64,104,132,61,64,104,
+ 12,3,11,97,31,113,150,64,104,132,61,64,104,12,1,11,97,31,115,150,64,
+ 104,116,73,64,104,0,0,0,0,0,0,65,149,16,61,65,149,16,61,1,241,33,4,0,
+ 33,4,4,10,6,10,181,4,10,6,10,181,38,15,99,111,109,109,97,110,100,101,
+ 114,45,97,112,110,45,49,3,99,111,109,5,109,110,99,57,57,6,109,99,99,
+ 50,52,48,4,103,112,114,115,8,0,106>>,
+
Z = zlib:zip(B),
?m(B, zlib:unzip(Z)).
%% Test gunzip.
api_g_un_zip(Config) when is_list(Config) ->
- ?m(?BARG,zlib:gzip(not_a_binary)),
+ ?m(?EXIT(badarg),zlib:gzip(not_a_binary)),
Bin = <<1,11,1,23,45>>,
Comp = zlib:gzip(Bin),
+
?m(Comp, zlib:gzip(binary_to_list(Bin))),
- ?m(?BARG, zlib:gunzip(not_a_binary)),
- ?m(?DATA_ERROR, zlib:gunzip(<<171,171,171,171,171>>)),
- ?m(?DATA_ERROR, zlib:gunzip(<<>>)),
+ ?m(?EXIT(badarg), zlib:gunzip(not_a_binary)),
+ ?m(?EXIT(data_error), zlib:gunzip(<<171,171,171,171,171>>)),
+ ?m(?EXIT(data_error), zlib:gunzip(<<>>)),
?m(Bin, zlib:gunzip(Comp)),
?m(Bin, zlib:gunzip(binary_to_list(Comp))),
+ %% RFC 1952:
+ %%
+ %% "A gzip file consists of a series of "members" (compressed data
+ %% sets). [...] The members simply appear one after another in the file,
+ %% with no additional information before, between, or after them."
+ Concatenated = <<Bin/binary, Bin/binary>>,
+ ?m(Concatenated, zlib:gunzip([Comp, Comp])),
+
+ %% Don't explode if the uncompressed size is a perfect multiple of the
+ %% internal inflate chunk size.
+ ChunkSizedData = <<0:16384/unit:8>>,
+ ?m(ChunkSizedData, zlib:gunzip(zlib:gzip(ChunkSizedData))),
+
%% Bad CRC; bad length.
BadCrc = bad_crc_data(),
- ?m({'EXIT',{data_error,_}},(catch zlib:gunzip(BadCrc))),
+ ?m(?EXIT(data_error),(catch zlib:gunzip(BadCrc))),
BadLen = bad_len_data(),
- ?m({'EXIT',{data_error,_}},(catch zlib:gunzip(BadLen))),
+ ?m(?EXIT(data_error),(catch zlib:gunzip(BadLen))),
ok.
bad_crc_data() ->
@@ -558,30 +738,15 @@ intro(Config) when is_list(Config) ->
large_deflate(Config) when is_list(Config) ->
large_deflate_do().
large_deflate_do() ->
- Z = zlib:open(),
- Plain = rand_bytes(zlib:getBufSize(Z)*5),
- ok = zlib:deflateInit(Z),
- _ZlibHeader = zlib:deflate(Z, [], full),
- Deflated = zlib:deflate(Z, Plain, full),
- ?m(ok, zlib:close(Z)),
- ?m(Plain, zlib:unzip(list_to_binary([Deflated, 3, 0]))).
-
-rand_bytes(Sz) ->
- L = <<8,2,3,6,1,2,3,2,3,4,8,7,3,7,2,3,4,7,5,8,9,3>>,
- rand_bytes(erlang:md5(L),Sz).
-
-rand_bytes(Bin, Sz) when byte_size(Bin) >= Sz ->
- <<Res:Sz/binary, _/binary>> = Bin,
- Res;
-rand_bytes(Bin, Sz) ->
- rand_bytes(<<(erlang:md5(Bin))/binary, Bin/binary>>, Sz).
-
+ Plain = gen_determ_rand_bytes(64 bsl 10),
+ Deflated = zlib:zip(Plain),
+ ?m(Plain, zlib:unzip(Deflated)).
%% Test a standard compressed zip file.
zip_usage(Config) when is_list(Config) ->
zip_usage(zip_usage({get_arg,Config}));
zip_usage({get_arg,Config}) ->
- Out = conf(data_dir,Config),
+ Out = get_data_dir(Config),
{ok,ZIP} = file:read_file(filename:join(Out,"zipdoc.zip")),
{ok,ORIG} = file:read_file(filename:join(Out,"zipdoc")),
{run,ZIP,ORIG};
@@ -636,13 +801,13 @@ zip_usage({run,ZIP,ORIG}) ->
?m(ok, zlib:deflateInit(Z, default, deflated, -15, 8, default)),
C2 = zlib:deflate(Z, ORIG, finish),
- ?m(true, C1 == list_to_binary(C2)),
+ ?m(ORIG, zlib:unzip(C2)),
?m(ok, zlib:deflateEnd(Z)),
?m(ok, zlib:deflateInit(Z, none, deflated, -15, 8, filtered)),
?m(ok, zlib:deflateParams(Z, default, default)),
C3 = zlib:deflate(Z, ORIG, finish),
- ?m(true, C1 == list_to_binary(C3)),
+ ?m(ORIG, zlib:unzip(C3)),
?m(ok, zlib:deflateEnd(Z)),
ok = zlib:close(Z),
@@ -652,7 +817,7 @@ zip_usage({run,ZIP,ORIG}) ->
gz_usage(Config) when is_list(Config) ->
gz_usage(gz_usage({get_arg,Config}));
gz_usage({get_arg,Config}) ->
- Out = conf(data_dir,Config),
+ Out = get_data_dir(Config),
{ok,GZIP} = file:read_file(filename:join(Out,"zipdoc.1.gz")),
{ok,ORIG} = file:read_file(filename:join(Out,"zipdoc")),
{ok,GZIP2} = file:read_file(filename:join(Out,"zipdoc.txt.gz")),
@@ -673,7 +838,7 @@ gz_usage2(Config) ->
case os:find_executable("gzip") of
Name when is_list(Name) ->
Z = zlib:open(),
- Out = conf(data_dir,Config),
+ Out = get_data_dir(Config),
{ok,ORIG} = file:read_file(filename:join(Out,"zipdoc")),
Compressed = zlib:gzip(ORIG),
GzOutFile = filename:join(Out,"out.gz"),
@@ -701,7 +866,7 @@ gz_usage2(Config) ->
compress_usage(Config) when is_list(Config) ->
compress_usage(compress_usage({get_arg,Config}));
compress_usage({get_arg,Config}) ->
- Out = conf(data_dir,Config),
+ Out = get_data_dir(Config),
{ok,C1} = file:read_file(filename:join(Out,"png-compressed.zlib")),
{run,C1};
compress_usage({run,C1}) ->
@@ -756,7 +921,7 @@ compress_usage({run,C1}) ->
crc(Config) when is_list(Config) ->
crc(crc({get_arg,Config}));
crc({get_arg,Config}) ->
- Out = conf(data_dir,Config),
+ Out = get_data_dir(Config),
{ok,C1} = file:read_file(filename:join(Out,"zipdoc")),
{run,C1};
crc({run,C1}) ->
@@ -785,7 +950,7 @@ crc({run,C1}) ->
adler(Config) when is_list(Config) ->
adler(adler({get_arg,Config}));
adler({get_arg,Config}) ->
- Out = conf(data_dir,Config),
+ Out = get_data_dir(Config),
File1 = filename:join(Out,"zipdoc"),
{ok,C1} = file:read_file(File1),
{run,C1};
@@ -833,10 +998,14 @@ dictionary_usage({run}) ->
%% Now uncompress.
Z2 = zlib:open(),
?m(ok, zlib:inflateInit(Z2)),
- {'EXIT',{{need_dictionary,DictID},_}} = (catch zlib:inflate(Z2, Compressed)),
+
+ ?m(?EXIT({need_dictionary, DictID}), zlib:inflate(Z2, Compressed)),
+
?m(ok, zlib:inflateSetDictionary(Z2, Dict)),
?m(ok, zlib:inflateSetDictionary(Z2, binary_to_list(Dict))),
+
Uncompressed = ?m(B when is_list(B), zlib:inflate(Z2, [])),
+
?m(ok, zlib:inflateEnd(Z2)),
?m(ok, zlib:close(Z2)),
?m(Data, list_to_binary(Uncompressed)).
@@ -846,33 +1015,73 @@ split_bin(<<Part:1997/binary,Rest/binary>>, Acc) ->
split_bin(Last,Acc) ->
lists:reverse([Last|Acc]).
+only_allow_owner(Config) when is_list(Config) ->
+ Z = zlib:open(),
+ Owner = self(),
+
+ ?m(ok, zlib:inflateInit(Z)),
+ ?m(ok, zlib:inflateReset(Z)),
+
+ {Pid, Ref} = spawn_monitor(
+ fun() ->
+ ?m(?EXIT(not_on_controlling_process), zlib:inflateReset(Z)),
+ Owner ! '$transfer_ownership',
+ receive
+ '$ownership_transferred' ->
+ ?m(ok, zlib:inflateReset(Z))
+ after 200 ->
+ ct:fail("Never received transfer signal.")
+ end
+ end),
+ ownership_transfer_check(Z, Pid, Ref).
+
+ownership_transfer_check(Z, WorkerPid, Ref) ->
+ receive
+ '$transfer_ownership' ->
+ zlib:set_controlling_process(Z, WorkerPid),
+ WorkerPid ! '$ownership_transferred',
+ ownership_transfer_check(Z, WorkerPid, Ref);
+ {'DOWN', Ref, process, WorkerPid, normal} ->
+ ok;
+ {'DOWN', Ref, process, WorkerPid, Reason} ->
+ ct:fail("Spawned worker crashed with reason ~p.", [Reason])
+ after 200 ->
+ ct:fail("Spawned worker timed out.")
+ end.
+
+sub_heap_binaries(Config) when is_list(Config) ->
+ Compressed = zlib:compress(<<"gurka">>),
+ ConfLen = erlang:length(Config),
+
+ HeapBin = <<ConfLen:8/integer, Compressed/binary>>,
+ <<_:8/integer, SubHeapBin/binary>> = HeapBin,
+
+ ?m(<<"gurka">>, zlib:uncompress(SubHeapBin)),
+ ok.
%% Check concurrent access to zlib driver.
smp(Config) ->
- case erlang:system_info(smp_support) of
- true ->
- NumOfProcs = lists:min([8,erlang:system_info(schedulers)]),
- io:format("smp starting ~p workers\n",[NumOfProcs]),
-
- %% Tests to run in parallel.
- Funcs = [zip_usage, gz_usage, compress_usage, dictionary_usage,
- crc, adler],
-
- %% We get all function arguments here to avoid repeated parallel
- %% file read access.
- FnAList = lists:map(fun(F) -> {F,?MODULE:F({get_arg,Config})}
- end, Funcs),
-
- Pids = [spawn_link(?MODULE, worker, [rand:uniform(9999),
- list_to_tuple(FnAList),
- self()])
- || _ <- lists:seq(1,NumOfProcs)],
- wait_pids(Pids);
+ NumOfProcs = lists:min([8,erlang:system_info(schedulers)]),
+ io:format("smp starting ~p workers\n",[NumOfProcs]),
- false ->
- {skipped,"No smp support"}
- end.
+ %% Tests to run in parallel.
+ Funcs =
+ [zip_usage, gz_usage, compress_usage, dictionary_usage,
+ crc, adler],
+
+ %% We get all function arguments here to avoid repeated parallel
+ %% file read access.
+ UsageArgs =
+ list_to_tuple([{F, ?MODULE:F({get_arg,Config})} || F <- Funcs]),
+ Parent = self(),
+ WorkerFun =
+ fun() ->
+ worker(rand:uniform(9999), UsageArgs, Parent)
+ end,
+
+ Pids = [spawn_link(WorkerFun) || _ <- lists:seq(1, NumOfProcs)],
+ wait_pids(Pids).
worker(Seed, FnATpl, Parent) ->
io:format("smp worker ~p, seed=~p~n",[self(),Seed]),
@@ -963,43 +1172,98 @@ otp_9981(Config) when is_list(Config) ->
Ports = lists:sort(erlang:ports()),
ok.
+-define(BENCH_SIZE, (16 bsl 20)).
+
+-define(DECOMPRESS_BENCH(Name, What, Data),
+ Name(Config) when is_list(Config) ->
+ Uncompressed = Data,
+ Compressed = zlib:compress(Uncompressed),
+ What(Compressed, byte_size(Uncompressed))).
+-define(COMPRESS_BENCH(Name, What, Data),
+ Name(Config) when is_list(Config) ->
+ Compressed = Data,
+ What(Compressed, byte_size(Compressed))).
+
+?DECOMPRESS_BENCH(inflate_bench_zeroed, throughput_bench_inflate,
+ <<0:(8 * ?BENCH_SIZE)>>).
+?DECOMPRESS_BENCH(inflate_bench_rand, throughput_bench_inflate,
+ gen_determ_rand_bytes(?BENCH_SIZE)).
+
+?DECOMPRESS_BENCH(chunk_bench_zeroed, throughput_bench_chunk,
+ <<0:(8 * ?BENCH_SIZE)>>).
+?DECOMPRESS_BENCH(chunk_bench_rand, throughput_bench_chunk,
+ gen_determ_rand_bytes(?BENCH_SIZE)).
+
+?COMPRESS_BENCH(deflate_bench_zeroed, throughput_bench_deflate,
+ <<0:(8 * ?BENCH_SIZE)>>).
+?COMPRESS_BENCH(deflate_bench_rand, throughput_bench_deflate,
+ gen_determ_rand_bytes(?BENCH_SIZE)).
+
+throughput_bench_inflate(Compressed, Size) ->
+ Z = zlib:open(),
+ zlib:inflateInit(Z),
+
+ submit_throughput_results(Size,
+ fun() ->
+ zlib:inflate(Z, Compressed)
+ end).
+
+throughput_bench_deflate(Uncompressed, Size) ->
+ Z = zlib:open(),
+ zlib:deflateInit(Z),
+
+ submit_throughput_results(Size,
+ fun() ->
+ zlib:deflate(Z, Uncompressed, finish)
+ end).
+
+throughput_bench_chunk(Compressed, Size) ->
+ Z = zlib:open(),
+ zlib:inflateInit(Z),
+
+ ChunkLoop =
+ fun
+ Loop({more, _}) -> Loop(zlib:inflateChunk(Z));
+ Loop(_) -> ok
+ end,
+
+ submit_throughput_results(Size,
+ fun() ->
+ ChunkLoop(zlib:inflateChunk(Z, Compressed))
+ end).
+
+submit_throughput_results(Size, Fun) ->
+ TimeTaken = measure_perf_counter(Fun, millisecond),
+
+ KBPS = trunc((Size bsr 10) / (TimeTaken / 1000)),
+ ct_event:notify(#event{ name = benchmark_data, data = [{value,KBPS}] }),
+ {comment, io_lib:format("~p ms, ~p KBPS", [TimeTaken, KBPS])}.
+
+measure_perf_counter(Fun, Unit) ->
+ Start = os:perf_counter(Unit),
+ Fun(),
+ os:perf_counter(Unit) - Start.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%%% Helps with testing directly %%%%%%%%%%%%%
-conf(What,Config) ->
- try proplists:get_value(What,Config) of
- undefined ->
- "./zlib_SUITE_data";
- Dir ->
- Dir
+get_data_dir(Config) ->
+ try proplists:get_value(data_dir,Config) of
+ undefined ->
+ "./zlib_SUITE_data";
+ Dir ->
+ Dir
catch
- _:_ -> "./zlib_SUITE_data"
+ _:_ -> "./zlib_SUITE_data"
end.
-t() -> t([all]).
-
-t(What) when not is_list(What) ->
- t([What]);
-t(What) ->
- lists:foreach(fun(T) ->
- try ?MODULE:T([])
- catch _E:_R ->
- Line = get(test_server_loc),
- io:format("Failed ~p:~p ~p ~p ~p~n",
- [T,Line,_E,_R, erlang:get_stacktrace()])
- end
- end, expand(What)).
-
-expand(All) ->
- lists:reverse(expand(All,[])).
-expand([H|T], Acc) ->
- case ?MODULE:H(suite) of
- [] -> expand(T,[H|Acc]);
- Cs ->
- R = expand(Cs, Acc),
- expand(T, R)
- end;
-expand([], Acc) -> Acc.
-
+%% Generates a bunch of statistically random bytes using the size as seed.
+gen_determ_rand_bytes(Size) ->
+ gen_determ_rand_bytes(Size, erlang:md5_init(), <<>>).
+gen_determ_rand_bytes(Size, _Context, Acc) when Size =< 0 ->
+ Acc;
+gen_determ_rand_bytes(Size, Context0, Acc) when Size > 0 ->
+ Context = erlang:md5_update(Context0, <<Size/integer>>),
+ Checksum = erlang:md5_final(Context),
+ gen_determ_rand_bytes(Size - 16, Context, <<Acc/binary, Checksum/binary>>).
diff --git a/lib/kernel/test/zzz_SUITE.erl b/lib/kernel/test/zzz_SUITE.erl
new file mode 100644
index 0000000000..59c7fd7404
--- /dev/null
+++ b/lib/kernel/test/zzz_SUITE.erl
@@ -0,0 +1,37 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2006-2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(zzz_SUITE).
+
+%% The sole purpose of this test suite is for things we want to run last
+%% before the VM terminates.
+
+-export([all/0]).
+
+-export([lc_graph/1]).
+
+
+all() ->
+ [lc_graph].
+
+lc_graph(_Config) ->
+ %% Create "lc_graph" file in current working dir
+ %% if lock checker is enabled.
+ erts_debug:lc_graph(),
+ ok.