aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--bootstrap/bin/no_dot_erlang.bootbin5951 -> 6292 bytes
-rw-r--r--bootstrap/bin/start.bootbin5951 -> 6292 bytes
-rw-r--r--bootstrap/bin/start_clean.bootbin5951 -> 6292 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/application_controller.beambin30684 -> 31272 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/code_server.beambin23996 -> 24140 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/error_logger.beambin6280 -> 5412 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/kernel.app15
-rw-r--r--bootstrap/lib/kernel/ebin/kernel.appup2
-rw-r--r--bootstrap/lib/kernel/ebin/kernel.beambin3920 -> 3672 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/logger.beambin0 -> 11448 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/logger_backend.beambin0 -> 2600 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/logger_config.beambin0 -> 2940 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/logger_disk_log_h.beambin0 -> 8508 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/logger_filters.beambin0 -> 1760 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/logger_formatter.beambin0 -> 5100 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/logger_h_common.beambin0 -> 5156 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/logger_server.beambin0 -> 7200 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/logger_simple.beambin0 -> 4524 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/logger_std_h.beambin0 -> 10132 bytes
-rw-r--r--bootstrap/lib/kernel/ebin/logger_sup.beambin0 -> 524 bytes
-rw-r--r--bootstrap/lib/kernel/include/logger.hrl49
-rw-r--r--bootstrap/lib/stdlib/ebin/gen_event.beambin13432 -> 13984 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/gen_fsm.beambin11048 -> 11636 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/gen_server.beambin14284 -> 15008 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/gen_statem.beambin20136 -> 20528 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/proc_lib.beambin11568 -> 12436 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/supervisor.beambin21880 -> 23348 bytes
-rw-r--r--bootstrap/lib/stdlib/ebin/supervisor_bridge.beambin2008 -> 2412 bytes
-rw-r--r--erts/configure.in13
-rw-r--r--erts/doc/src/absform.xml10
-rw-r--r--erts/emulator/beam/atom.names1
-rw-r--r--erts/emulator/beam/beam_bif_load.c6
-rw-r--r--erts/emulator/beam/beam_debug.c2
-rw-r--r--erts/emulator/beam/dist.c16
-rw-r--r--erts/emulator/beam/erl_db.c2
-rw-r--r--erts/emulator/beam/erl_gc.c34
-rw-r--r--erts/emulator/beam/erl_message.c148
-rw-r--r--erts/emulator/beam/erl_message.h27
-rw-r--r--erts/emulator/beam/erl_nif.c7
-rw-r--r--erts/emulator/beam/erl_port.h16
-rw-r--r--erts/emulator/beam/erl_proc_sig_queue.c235
-rw-r--r--erts/emulator/beam/erl_proc_sig_queue.h12
-rw-r--r--erts/emulator/beam/erl_process.c23
-rw-r--r--erts/emulator/beam/erl_process.h7
-rw-r--r--erts/emulator/beam/erl_term.h48
-rw-r--r--erts/emulator/beam/erl_trace.c12
-rw-r--r--erts/emulator/beam/utils.c162
-rw-r--r--erts/emulator/test/dgawd_handler.erl4
-rw-r--r--erts/emulator/test/statistics_SUITE.erl7
-rw-r--r--erts/preloaded/ebin/erl_prim_loader.beambin54148 -> 54476 bytes
-rw-r--r--erts/preloaded/ebin/init.beambin50572 -> 51452 bytes
-rw-r--r--erts/preloaded/ebin/prim_file.beambin27428 -> 27768 bytes
-rw-r--r--erts/preloaded/src/erl_prim_loader.erl8
-rw-r--r--erts/preloaded/src/init.erl58
-rw-r--r--erts/preloaded/src/prim_file.erl9
-rw-r--r--erts/test/upgrade_SUITE.erl44
-rw-r--r--lib/common_test/src/Makefile2
-rw-r--r--lib/common_test/src/cth_log_redirect.erl241
-rw-r--r--lib/common_test/test/ct_hooks_SUITE.erl21
-rw-r--r--lib/common_test/test/ct_hooks_SUITE_data/cth/tests/cth_log_SUITE.erl2
-rw-r--r--lib/common_test/test/ct_pre_post_test_io_SUITE.erl4
-rw-r--r--lib/compiler/src/beam_validator.erl2
-rw-r--r--lib/compiler/src/sys_core_fold.erl68
-rw-r--r--lib/compiler/test/core_fold_SUITE.erl18
-rw-r--r--lib/compiler/test/fun_SUITE.erl11
-rw-r--r--lib/crypto/c_src/crypto.c175
-rw-r--r--lib/crypto/doc/src/crypto.xml1
-rw-r--r--lib/crypto/test/crypto_SUITE.erl21
-rw-r--r--lib/diameter/doc/src/diameter.xml6
-rw-r--r--lib/diameter/doc/src/diameter_codec.xml2
-rw-r--r--lib/diameter/doc/src/diameter_dict.xml6
-rw-r--r--lib/diameter/doc/src/diameter_soc.xml2
-rw-r--r--lib/diameter/doc/src/notes.xml2
-rw-r--r--lib/diameter/src/diameter.appup.src6
-rw-r--r--lib/diameter/vsn.mk2
-rw-r--r--lib/edoc/src/edoc_doclet.erl4
-rw-r--r--lib/kernel/doc/src/Makefile22
-rw-r--r--lib/kernel/doc/src/book.xml3
-rw-r--r--lib/kernel/doc/src/error_logger.xml19
-rw-r--r--lib/kernel/doc/src/introduction_chapter.xml64
-rw-r--r--lib/kernel/doc/src/kernel_app.xml154
-rw-r--r--lib/kernel/doc/src/logger.xml478
-rw-r--r--lib/kernel/doc/src/logger_arch.pngbin0 -> 31459 bytes
-rw-r--r--lib/kernel/doc/src/logger_chapter.xml815
-rw-r--r--lib/kernel/doc/src/logger_disk_log_h.xml146
-rw-r--r--lib/kernel/doc/src/logger_filters.xml191
-rw-r--r--lib/kernel/doc/src/logger_formatter.xml157
-rw-r--r--lib/kernel/doc/src/logger_std_h.xml133
-rw-r--r--lib/kernel/doc/src/part.xml40
-rw-r--r--lib/kernel/doc/src/ref_man.xml5
-rw-r--r--lib/kernel/doc/src/specs.xml5
-rw-r--r--lib/kernel/include/logger.hrl49
-rw-r--r--lib/kernel/src/Makefile29
-rw-r--r--lib/kernel/src/application_controller.erl35
-rw-r--r--lib/kernel/src/code_server.erl14
-rw-r--r--lib/kernel/src/error_logger.erl545
-rw-r--r--lib/kernel/src/kernel.app.src15
-rw-r--r--lib/kernel/src/kernel.appup.src10
-rw-r--r--lib/kernel/src/kernel.erl26
-rw-r--r--lib/kernel/src/logger.erl803
-rw-r--r--lib/kernel/src/logger_backend.erl133
-rw-r--r--lib/kernel/src/logger_config.erl151
-rw-r--r--lib/kernel/src/logger_disk_log_h.erl694
-rw-r--r--lib/kernel/src/logger_filters.erl123
-rw-r--r--lib/kernel/src/logger_formatter.erl295
-rw-r--r--lib/kernel/src/logger_h_common.erl301
-rw-r--r--lib/kernel/src/logger_h_common.hrl262
-rw-r--r--lib/kernel/src/logger_internal.hrl98
-rw-r--r--lib/kernel/src/logger_server.erl440
-rw-r--r--lib/kernel/src/logger_simple.erl236
-rw-r--r--lib/kernel/src/logger_std_h.erl799
-rw-r--r--lib/kernel/src/logger_sup.erl53
-rw-r--r--lib/kernel/test/Makefile14
-rw-r--r--lib/kernel/test/application_SUITE.erl3
-rw-r--r--lib/kernel/test/error_logger_SUITE.erl46
-rw-r--r--lib/kernel/test/init_SUITE.erl6
-rw-r--r--lib/kernel/test/kernel.spec1
-rw-r--r--lib/kernel/test/logger.cover14
-rw-r--r--lib/kernel/test/logger.spec11
-rw-r--r--lib/kernel/test/logger_SUITE.erl828
-rw-r--r--lib/kernel/test/logger_bench_SUITE.erl500
-rw-r--r--lib/kernel/test/logger_bench_SUITE_data/Emakefile1
-rw-r--r--lib/kernel/test/logger_bench_SUITE_data/lager_helper.erl73
-rw-r--r--lib/kernel/test/logger_disk_log_h_SUITE.erl1417
-rw-r--r--lib/kernel/test/logger_env_var_SUITE.erl451
-rw-r--r--lib/kernel/test/logger_filters_SUITE.erl214
-rw-r--r--lib/kernel/test/logger_formatter_SUITE.erl558
-rw-r--r--lib/kernel/test/logger_legacy_SUITE.erl282
-rw-r--r--lib/kernel/test/logger_simple_SUITE.erl247
-rw-r--r--lib/kernel/test/logger_std_h_SUITE.erl1396
-rw-r--r--lib/kernel/test/wrap_log_reader_SUITE.erl17
-rw-r--r--lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl24
-rw-r--r--lib/public_key/doc/src/public_key.xml6
-rw-r--r--lib/reltool/src/reltool_target.erl2
-rw-r--r--lib/runtime_tools/src/appmon_info.erl2
-rw-r--r--lib/sasl/doc/src/appup.xml14
-rw-r--r--lib/sasl/doc/src/sasl_app.xml183
-rw-r--r--lib/sasl/src/sasl.app.src3
-rw-r--r--lib/sasl/src/sasl.appup.src8
-rw-r--r--lib/sasl/src/sasl.erl90
-rw-r--r--lib/sasl/src/systools_make.erl8
-rw-r--r--lib/sasl/test/sasl_SUITE.erl14
-rw-r--r--lib/sasl/test/sasl_report_SUITE.erl18
-rw-r--r--lib/ssh/doc/specs/.gitignore1
-rw-r--r--lib/ssh/doc/src/Makefile23
-rw-r--r--lib/ssh/doc/src/configure_algos.xml5
-rw-r--r--lib/ssh/doc/src/introduction.xml2
-rw-r--r--lib/ssh/doc/src/ref_man.xml3
-rw-r--r--lib/ssh/doc/src/specs.xml13
-rw-r--r--lib/ssh/doc/src/ssh.xml1791
-rw-r--r--lib/ssh/doc/src/ssh_app.xml3
-rw-r--r--lib/ssh/doc/src/ssh_client_channel.xml (renamed from lib/ssh/doc/src/ssh_channel.xml)163
-rw-r--r--lib/ssh/doc/src/ssh_client_key_api.xml49
-rw-r--r--lib/ssh/doc/src/ssh_connection.xml110
-rw-r--r--lib/ssh/doc/src/ssh_protocol.xml6
-rw-r--r--lib/ssh/doc/src/ssh_server_channel.xml176
-rw-r--r--lib/ssh/doc/src/ssh_server_key_api.xml43
-rw-r--r--lib/ssh/doc/src/ssh_sftp.xml4
-rw-r--r--lib/ssh/doc/src/ssh_sftpd.xml3
-rw-r--r--lib/ssh/doc/src/using_ssh.xml5
-rw-r--r--lib/ssh/src/Makefile44
-rw-r--r--lib/ssh/src/ssh.app.src4
-rw-r--r--lib/ssh/src/ssh.erl195
-rw-r--r--lib/ssh/src/ssh.hrl281
-rw-r--r--lib/ssh/src/ssh_acceptor_sup.erl2
-rw-r--r--lib/ssh/src/ssh_channel.erl388
-rw-r--r--lib/ssh/src/ssh_cli.erl21
-rw-r--r--lib/ssh/src/ssh_client_channel.erl456
-rw-r--r--lib/ssh/src/ssh_client_key_api.erl35
-rw-r--r--lib/ssh/src/ssh_connect.hrl4
-rw-r--r--lib/ssh/src/ssh_connection.erl128
-rw-r--r--lib/ssh/src/ssh_connection_handler.erl63
-rw-r--r--lib/ssh/src/ssh_daemon_channel.erl36
-rw-r--r--lib/ssh/src/ssh_file.erl21
-rw-r--r--lib/ssh/src/ssh_info.erl8
-rw-r--r--lib/ssh/src/ssh_options.erl27
-rw-r--r--lib/ssh/src/ssh_server_channel.erl55
-rw-r--r--lib/ssh/src/ssh_server_channel_sup.erl (renamed from lib/ssh/src/ssh_channel_sup.erl)8
-rw-r--r--lib/ssh/src/ssh_server_key_api.erl12
-rw-r--r--lib/ssh/src/ssh_sftp.erl18
-rw-r--r--lib/ssh/src/ssh_sftpd.erl18
-rw-r--r--lib/ssh/src/ssh_shell.erl21
-rw-r--r--lib/ssh/src/ssh_subsystem_sup.erl12
-rw-r--r--lib/ssh/test/property_test/ssh_eqc_subsys.erl2
-rw-r--r--lib/ssh/test/ssh_bench_dev_null.erl2
-rw-r--r--lib/ssh/test/ssh_echo_server.erl2
-rw-r--r--lib/ssh/test/ssh_options_SUITE.erl10
-rw-r--r--lib/ssh/test/ssh_peername_sockname_server.erl2
-rw-r--r--lib/ssh/test/ssh_sup_SUITE.erl16
-rw-r--r--lib/ssl/src/ssl_connection.erl8
-rw-r--r--lib/ssl/src/ssl_connection.hrl2
-rw-r--r--lib/ssl/src/ssl_handshake.erl6
-rw-r--r--lib/ssl/test/ssl_certificate_verify_SUITE.erl92
-rw-r--r--lib/stdlib/doc/src/calendar.xml88
-rw-r--r--lib/stdlib/doc/src/io_lib.xml39
-rw-r--r--lib/stdlib/doc/src/sys.xml10
-rw-r--r--lib/stdlib/src/Makefile7
-rw-r--r--lib/stdlib/src/calendar.erl185
-rw-r--r--lib/stdlib/src/erl_lint.erl14
-rw-r--r--lib/stdlib/src/gen_event.erl87
-rw-r--r--lib/stdlib/src/gen_fsm.erl53
-rw-r--r--lib/stdlib/src/gen_server.erl110
-rw-r--r--lib/stdlib/src/gen_statem.erl125
-rw-r--r--lib/stdlib/src/io_lib.erl147
-rw-r--r--lib/stdlib/src/io_lib_format.erl242
-rw-r--r--lib/stdlib/src/io_lib_pretty.erl680
-rw-r--r--lib/stdlib/src/proc_lib.erl125
-rw-r--r--lib/stdlib/src/shell.erl2
-rw-r--r--lib/stdlib/src/stdlib.appup.src6
-rw-r--r--lib/stdlib/src/supervisor.erl55
-rw-r--r--lib/stdlib/src/supervisor_bridge.erl27
-rw-r--r--lib/stdlib/src/sys.erl59
-rw-r--r--lib/stdlib/test/calendar_SUITE.erl161
-rw-r--r--lib/stdlib/test/erl_eval_SUITE.erl56
-rw-r--r--lib/stdlib/test/error_logger_h_SUITE.erl5
-rw-r--r--lib/stdlib/test/io_SUITE.erl156
-rw-r--r--lib/stdlib/test/proc_lib_SUITE.erl11
-rw-r--r--lib/stdlib/test/sys_SUITE.erl26
-rw-r--r--system/doc/design_principles/spec_proc.xml2
219 files changed, 18811 insertions, 3433 deletions
diff --git a/bootstrap/bin/no_dot_erlang.boot b/bootstrap/bin/no_dot_erlang.boot
index e425b58f12..fe11c1d256 100644
--- a/bootstrap/bin/no_dot_erlang.boot
+++ b/bootstrap/bin/no_dot_erlang.boot
Binary files differ
diff --git a/bootstrap/bin/start.boot b/bootstrap/bin/start.boot
index e425b58f12..fe11c1d256 100644
--- a/bootstrap/bin/start.boot
+++ b/bootstrap/bin/start.boot
Binary files differ
diff --git a/bootstrap/bin/start_clean.boot b/bootstrap/bin/start_clean.boot
index e425b58f12..fe11c1d256 100644
--- a/bootstrap/bin/start_clean.boot
+++ b/bootstrap/bin/start_clean.boot
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/application_controller.beam b/bootstrap/lib/kernel/ebin/application_controller.beam
index e060d2bd9a..869c46939b 100644
--- a/bootstrap/lib/kernel/ebin/application_controller.beam
+++ b/bootstrap/lib/kernel/ebin/application_controller.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/code_server.beam b/bootstrap/lib/kernel/ebin/code_server.beam
index c42c641c17..736190c08c 100644
--- a/bootstrap/lib/kernel/ebin/code_server.beam
+++ b/bootstrap/lib/kernel/ebin/code_server.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/error_logger.beam b/bootstrap/lib/kernel/ebin/error_logger.beam
index 4490480184..752c0f2bb1 100644
--- a/bootstrap/lib/kernel/ebin/error_logger.beam
+++ b/bootstrap/lib/kernel/ebin/error_logger.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/kernel.app b/bootstrap/lib/kernel/ebin/kernel.app
index 2e09684f73..b93a65e8fb 100644
--- a/bootstrap/lib/kernel/ebin/kernel.app
+++ b/bootstrap/lib/kernel/ebin/kernel.app
@@ -60,6 +60,17 @@
kernel_refc,
local_tcp,
local_udp,
+ logger,
+ logger_backend,
+ logger_config,
+ logger_disk_log_h,
+ logger_filters,
+ logger_formatter,
+ logger_h_common,
+ logger_server,
+ logger_simple,
+ logger_std_h,
+ logger_sup,
net,
net_adm,
net_kernel,
@@ -117,6 +128,8 @@
kernel_config,
kernel_refc,
kernel_sup,
+ logger,
+ logger_sup,
net_kernel,
net_sup,
rex,
@@ -127,7 +140,7 @@
inet_db,
pg2]},
{applications, []},
- {env, [{error_logger, tty}]},
+ {env, []},
{mod, {kernel, []}},
{runtime_dependencies, ["erts-10.0", "stdlib-3.5", "sasl-3.0"]}
]
diff --git a/bootstrap/lib/kernel/ebin/kernel.appup b/bootstrap/lib/kernel/ebin/kernel.appup
index aad99fd30b..a55db0a55f 100644
--- a/bootstrap/lib/kernel/ebin/kernel.appup
+++ b/bootstrap/lib/kernel/ebin/kernel.appup
@@ -16,7 +16,7 @@
%% limitations under the License.
%%
%% %CopyrightEnd%
-{"5.4.2",
+{"5.4.3",
%% Up from - max one major revision back
[{<<"5\\.[0-3](\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-19.*, OTP-20.0
{<<"5\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-20.1+
diff --git a/bootstrap/lib/kernel/ebin/kernel.beam b/bootstrap/lib/kernel/ebin/kernel.beam
index 3b99f49822..15dfd19ff8 100644
--- a/bootstrap/lib/kernel/ebin/kernel.beam
+++ b/bootstrap/lib/kernel/ebin/kernel.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/logger.beam b/bootstrap/lib/kernel/ebin/logger.beam
new file mode 100644
index 0000000000..698505d097
--- /dev/null
+++ b/bootstrap/lib/kernel/ebin/logger.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/logger_backend.beam b/bootstrap/lib/kernel/ebin/logger_backend.beam
new file mode 100644
index 0000000000..ccc03d104c
--- /dev/null
+++ b/bootstrap/lib/kernel/ebin/logger_backend.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/logger_config.beam b/bootstrap/lib/kernel/ebin/logger_config.beam
new file mode 100644
index 0000000000..0fc49960b4
--- /dev/null
+++ b/bootstrap/lib/kernel/ebin/logger_config.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/logger_disk_log_h.beam b/bootstrap/lib/kernel/ebin/logger_disk_log_h.beam
new file mode 100644
index 0000000000..c8f20360fd
--- /dev/null
+++ b/bootstrap/lib/kernel/ebin/logger_disk_log_h.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/logger_filters.beam b/bootstrap/lib/kernel/ebin/logger_filters.beam
new file mode 100644
index 0000000000..0e8c97c98e
--- /dev/null
+++ b/bootstrap/lib/kernel/ebin/logger_filters.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/logger_formatter.beam b/bootstrap/lib/kernel/ebin/logger_formatter.beam
new file mode 100644
index 0000000000..ce8308c503
--- /dev/null
+++ b/bootstrap/lib/kernel/ebin/logger_formatter.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/logger_h_common.beam b/bootstrap/lib/kernel/ebin/logger_h_common.beam
new file mode 100644
index 0000000000..ad27475332
--- /dev/null
+++ b/bootstrap/lib/kernel/ebin/logger_h_common.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/logger_server.beam b/bootstrap/lib/kernel/ebin/logger_server.beam
new file mode 100644
index 0000000000..61f50c8bef
--- /dev/null
+++ b/bootstrap/lib/kernel/ebin/logger_server.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/logger_simple.beam b/bootstrap/lib/kernel/ebin/logger_simple.beam
new file mode 100644
index 0000000000..be8017391f
--- /dev/null
+++ b/bootstrap/lib/kernel/ebin/logger_simple.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/logger_std_h.beam b/bootstrap/lib/kernel/ebin/logger_std_h.beam
new file mode 100644
index 0000000000..6258544c15
--- /dev/null
+++ b/bootstrap/lib/kernel/ebin/logger_std_h.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/ebin/logger_sup.beam b/bootstrap/lib/kernel/ebin/logger_sup.beam
new file mode 100644
index 0000000000..9bef3c861c
--- /dev/null
+++ b/bootstrap/lib/kernel/ebin/logger_sup.beam
Binary files differ
diff --git a/bootstrap/lib/kernel/include/logger.hrl b/bootstrap/lib/kernel/include/logger.hrl
new file mode 100644
index 0000000000..2143ccd297
--- /dev/null
+++ b/bootstrap/lib/kernel/include/logger.hrl
@@ -0,0 +1,49 @@
+-ifndef(LOGGER_HRL).
+-define(LOGGER_HRL,true).
+-define(LOG_EMERGENCY(A),?DO_LOG(emergency,[A])).
+-define(LOG_EMERGENCY(A,B),?DO_LOG(emergency,[A,B])).
+-define(LOG_EMERGENCY(A,B,C),?DO_LOG(emergency,[A,B,C])).
+
+-define(LOG_ALERT(A),?DO_LOG(alert,[A])).
+-define(LOG_ALERT(A,B),?DO_LOG(alert,[A,B])).
+-define(LOG_ALERT(A,B,C),?DO_LOG(alert,[A,B,C])).
+
+-define(LOG_CRITICAL(A),?DO_LOG(critical,[A])).
+-define(LOG_CRITICAL(A,B),?DO_LOG(critical,[A,B])).
+-define(LOG_CRITICAL(A,B,C),?DO_LOG(critical,[A,B,C])).
+
+-define(LOG_ERROR(A),?DO_LOG(error,[A])).
+-define(LOG_ERROR(A,B),?DO_LOG(error,[A,B])).
+-define(LOG_ERROR(A,B,C),?DO_LOG(error,[A,B,C])).
+
+-define(LOG_WARNING(A),?DO_LOG(warning,[A])).
+-define(LOG_WARNING(A,B),?DO_LOG(warning,[A,B])).
+-define(LOG_WARNING(A,B,C),?DO_LOG(warning,[A,B,C])).
+
+-define(LOG_NOTICE(A),?DO_LOG(notice,[A])).
+-define(LOG_NOTICE(A,B),?DO_LOG(notice,[A,B])).
+-define(LOG_NOTICE(A,B,C),?DO_LOG(notice,[A,B,C])).
+
+-define(LOG_INFO(A),?DO_LOG(info,[A])).
+-define(LOG_INFO(A,B),?DO_LOG(info,[A,B])).
+-define(LOG_INFO(A,B,C),?DO_LOG(info,[A,B,C])).
+
+-define(LOG_DEBUG(A),?DO_LOG(debug,[A])).
+-define(LOG_DEBUG(A,B),?DO_LOG(debug,[A,B])).
+-define(LOG_DEBUG(A,B,C),?DO_LOG(debug,[A,B,C])).
+
+-define(LOCATION,#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY},
+ line=>?LINE,
+ file=>?FILE}).
+
+%%%-----------------------------------------------------------------
+%%% Internal, i.e. not intended for direct use in code - use above
+%%% macros instead!
+-define(DO_LOG(Level,Args),
+ case logger:allow(Level,?MODULE) of
+ true ->
+ apply(logger,macro_log,[?LOCATION,Level|Args]);
+ false ->
+ ok
+ end).
+-endif.
diff --git a/bootstrap/lib/stdlib/ebin/gen_event.beam b/bootstrap/lib/stdlib/ebin/gen_event.beam
index 4973a8eee9..bb1d4f69ba 100644
--- a/bootstrap/lib/stdlib/ebin/gen_event.beam
+++ b/bootstrap/lib/stdlib/ebin/gen_event.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/gen_fsm.beam b/bootstrap/lib/stdlib/ebin/gen_fsm.beam
index 5db1d6b014..769f82a528 100644
--- a/bootstrap/lib/stdlib/ebin/gen_fsm.beam
+++ b/bootstrap/lib/stdlib/ebin/gen_fsm.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/gen_server.beam b/bootstrap/lib/stdlib/ebin/gen_server.beam
index cce5da1bfb..7828b04453 100644
--- a/bootstrap/lib/stdlib/ebin/gen_server.beam
+++ b/bootstrap/lib/stdlib/ebin/gen_server.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/gen_statem.beam b/bootstrap/lib/stdlib/ebin/gen_statem.beam
index d37ad51119..88dd0efc8a 100644
--- a/bootstrap/lib/stdlib/ebin/gen_statem.beam
+++ b/bootstrap/lib/stdlib/ebin/gen_statem.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/proc_lib.beam b/bootstrap/lib/stdlib/ebin/proc_lib.beam
index 7b6c20a0b0..9025f68b68 100644
--- a/bootstrap/lib/stdlib/ebin/proc_lib.beam
+++ b/bootstrap/lib/stdlib/ebin/proc_lib.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/supervisor.beam b/bootstrap/lib/stdlib/ebin/supervisor.beam
index 37c6e31aab..1cb1178884 100644
--- a/bootstrap/lib/stdlib/ebin/supervisor.beam
+++ b/bootstrap/lib/stdlib/ebin/supervisor.beam
Binary files differ
diff --git a/bootstrap/lib/stdlib/ebin/supervisor_bridge.beam b/bootstrap/lib/stdlib/ebin/supervisor_bridge.beam
index dbd598ab13..cb61f6e6f9 100644
--- a/bootstrap/lib/stdlib/ebin/supervisor_bridge.beam
+++ b/bootstrap/lib/stdlib/ebin/supervisor_bridge.beam
Binary files differ
diff --git a/erts/configure.in b/erts/configure.in
index 2d0d6c6444..5d9dc9aa43 100644
--- a/erts/configure.in
+++ b/erts/configure.in
@@ -2661,18 +2661,6 @@ AC_CHECK_PROG(M4, m4, m4)
if test X${enable_hipe} != Xno; then
- if test X$ac_cv_sizeof_void_p != X4 && test X$ARCH = Xamd64; then
- dnl HiPE cannot run on x86_64 without MAP_FIXED and MAP_NORESERVE
- AC_CHECK_DECLS([MAP_FIXED, MAP_NORESERVE], [], [], [#include <sys/mman.h>])
- if test X$ac_cv_have_decl_MAP_FIXED != Xyes || test X$ac_cv_have_decl_MAP_NORESERVE != Xyes; then
- if test X${enable_hipe} = Xyes; then
- AC_MSG_ERROR([HiPE on x86_64 needs MAP_FIXED and MAP_NORESERVE flags for mmap()])
- else
- enable_hipe=no
- AC_MSG_WARN([Disable HiPE due to lack of MAP_FIXED and MAP_NORESERVE flags for mmap()])
- fi
- fi
- else
dnl HiPE cannot run without mprotect()
if test X$ac_cv_func_mprotect != Xyes; then
if test X${enable_hipe} = Xyes; then
@@ -2682,7 +2670,6 @@ if test X${enable_hipe} != Xno; then
AC_MSG_WARN([Disable HiPE due to lack of mprotect()])
fi
fi
- fi
fi
dnl check to auto-enable hipe here...
diff --git a/erts/doc/src/absform.xml b/erts/doc/src/absform.xml
index 2ada903edb..158f4dc4e8 100644
--- a/erts/doc/src/absform.xml
+++ b/erts/doc/src/absform.xml
@@ -407,9 +407,8 @@
</item>
<item>
<p>If E is a map creation <c>#{A_1, ..., A_k}</c>,
- where each <c>A_i</c> is an association <c>E_i_1 => E_i_2</c>
- or <c>E_i_1 := E_i_2</c>, then Rep(E) =
- <c>{map,LINE,[Rep(A_1), ..., Rep(A_k)]}</c>.
+ where each <c>A_i</c> is an association <c>E_i_1 => E_i_2</c>,
+ then Rep(E) = <c>{map,LINE,[Rep(A_1), ..., Rep(A_k)]}</c>.
For Rep(A), see below.</p>
</item>
<item>
@@ -731,9 +730,8 @@
</item>
<item>
<p>If Gt is a map creation <c>#{A_1, ..., A_k}</c>,
- where each <c>A_i</c> is an association <c>Gt_i_1 => Gt_i_2</c>
- or <c>Gt_i_1 := Gt_i_2</c>, then Rep(Gt) =
- <c>{map,LINE,[Rep(A_1), ..., Rep(A_k)]}</c>.
+ where each <c>A_i</c> is an association <c>Gt_i_1 => Gt_i_2</c>,
+ then Rep(Gt) = <c>{map,LINE,[Rep(A_1), ..., Rep(A_k)]}</c>.
For Rep(A), see above.</p>
</item>
<item>
diff --git a/erts/emulator/beam/atom.names b/erts/emulator/beam/atom.names
index cceca66850..fba0611042 100644
--- a/erts/emulator/beam/atom.names
+++ b/erts/emulator/beam/atom.names
@@ -358,6 +358,7 @@ atom loaded
atom load_cancelled
atom load_failure
atom local
+atom logger
atom long_gc
atom long_schedule
atom low
diff --git a/erts/emulator/beam/beam_bif_load.c b/erts/emulator/beam/beam_bif_load.c
index 5c76aafae7..d9312f4df8 100644
--- a/erts/emulator/beam/beam_bif_load.c
+++ b/erts/emulator/beam/beam_bif_load.c
@@ -1757,11 +1757,11 @@ BIF_RETTYPE erts_internal_purge_module_2(BIF_ALIST_2)
release_literal_areas.last = ref;
}
erts_mtx_unlock(&release_literal_areas.mtx);
- erts_queue_message(erts_literal_area_collector,
+ erts_queue_proc_message(BIF_P,
+ erts_literal_area_collector,
0,
erts_alloc_message(0, NULL),
- am_copy_literals,
- BIF_P->common.id);
+ am_copy_literals);
}
return ret;
diff --git a/erts/emulator/beam/beam_debug.c b/erts/emulator/beam/beam_debug.c
index 5eb68b817e..b8a8d06315 100644
--- a/erts/emulator/beam/beam_debug.c
+++ b/erts/emulator/beam/beam_debug.c
@@ -1191,7 +1191,7 @@ dirty_send_message(Process *c_p, Eterm to, Eterm tag)
mp = erts_alloc_message_heap(rp, &rp_locks, 3, &hp, &ohp);
msg = TUPLE2(hp, tag, c_p->common.id);
- erts_queue_message(rp, rp_locks, mp, msg, c_p->common.id);
+ erts_queue_proc_message(c_p, rp, rp_locks, mp, msg);
if (rp == real_c_p)
rp_locks &= ~c_p_locks;
diff --git a/erts/emulator/beam/dist.c b/erts/emulator/beam/dist.c
index f203d85ca9..026f0a62d4 100644
--- a/erts/emulator/beam/dist.c
+++ b/erts/emulator/beam/dist.c
@@ -1471,22 +1471,16 @@ int erts_net_message(Port *prt,
mdp = erts_monitor_create(ERTS_MON_TYPE_DIST_PROC,
ref, watcher, pid, name);
-#ifdef DEBUG
- code =
-#endif
- erts_monitor_dist_insert(&mdp->origin, dep->mld);
- ASSERT(code);
+ code = erts_monitor_dist_insert(&mdp->origin, dep->mld);
+ ASSERT(code); (void)code;
if (erts_proc_sig_send_monitor(&mdp->target, pid))
break; /* done */
/* Failed to send to local proc; cleanup reply noproc... */
-#ifdef DEBUG
- code =
-#endif
- erts_monitor_dist_delete(&mdp->origin);
- ASSERT(code);
+ code = erts_monitor_dist_delete(&mdp->origin);
+ ASSERT(code); (void)code;
erts_monitor_release_both(mdp);
}
@@ -3593,7 +3587,7 @@ int erts_auto_connect(DistEntry* dep, Process *proc, ErtsProcLocks proc_locks)
dhandle = erts_build_dhandle(&hp, ohp, dep);
msg = TUPLE4(hp, am_auto_connect, dep->sysname, make_small(conn_id),
dhandle);
- erts_queue_message(net_kernel, nk_locks, mp, msg, proc->common.id);
+ erts_queue_proc_message(proc, net_kernel, nk_locks, mp, msg);
erts_proc_unlock(net_kernel, nk_locks);
}
diff --git a/erts/emulator/beam/erl_db.c b/erts/emulator/beam/erl_db.c
index f7ee408991..ca2ebb7c27 100644
--- a/erts/emulator/beam/erl_db.c
+++ b/erts/emulator/beam/erl_db.c
@@ -3547,7 +3547,7 @@ send_ets_transfer_message(Process *c_p, Process *proc,
hd_copy = copy_struct(heir_data, hd_sz, &hp, ohp);
sender = c_p->common.id;
msg = TUPLE4(hp, am_ETS_TRANSFER, tid, sender, hd_copy);
- erts_queue_message(proc, *locks, mp, msg, sender);
+ erts_queue_proc_message(c_p, proc, *locks, mp, msg);
}
diff --git a/erts/emulator/beam/erl_gc.c b/erts/emulator/beam/erl_gc.c
index b498fd9cf9..0692cea0ee 100644
--- a/erts/emulator/beam/erl_gc.c
+++ b/erts/emulator/beam/erl_gc.c
@@ -61,7 +61,7 @@
# define ERTS_GC_ASSERT(B) ((void) 1)
#endif
-#if defined(DEBUG) && 1
+#if defined(DEBUG) && 0
# define HARDDEBUG 1
#endif
@@ -223,23 +223,6 @@ ERTS_SCHED_PREF_QUICK_ALLOC_IMPL(gcireq,
5,
ERTS_ALC_T_GC_INFO_REQ)
-static ERTS_INLINE void
-ensure_sigq_roots_available(Process *p)
-{
- ERTS_LC_ASSERT(ERTS_PROC_LOCK_MAIN == erts_proc_lc_my_proc_locks(p));
- switch (p->flags & (F_OFF_HEAP_MSGQ|F_OFF_HEAP_MSGQ_CHNG)) {
- case F_OFF_HEAP_MSGQ_CHNG:
- case 0:
- erts_proc_lock(p, ERTS_PROC_LOCK_MSGQ);
- erts_proc_sig_fetch(p);
- erts_proc_unlock(p, ERTS_PROC_LOCK_MSGQ);
- break;
- default:
- break;
- }
-}
-
-
/*
* Initialize GC global data.
*/
@@ -450,8 +433,6 @@ erts_gc_after_bif_call_lhf(Process* p, ErlHeapFragment *live_hf_end,
live_hf_end = ERTS_INVALID_HFRAG_PTR;
}
- ensure_sigq_roots_available(p);
-
if (is_non_value(result)) {
if (p->freason == TRAP) {
#ifdef HIPE
@@ -895,11 +876,8 @@ do_major_collection:
int
erts_garbage_collect_nobump(Process* p, int need, Eterm* objv, int nobj, int fcalls)
{
- int reds;
- int reds_left;
- ensure_sigq_roots_available(p);
- reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, fcalls, 0);
- reds_left = ERTS_REDS_LEFT(p, fcalls);
+ int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, fcalls, 0);
+ int reds_left = ERTS_REDS_LEFT(p, fcalls);
if (reds > reds_left)
reds = reds_left;
ASSERT(CONTEXT_REDS - (reds_left - reds) >= erts_proc_sched_data(p)->virtual_reds);
@@ -909,9 +887,7 @@ erts_garbage_collect_nobump(Process* p, int need, Eterm* objv, int nobj, int fca
void
erts_garbage_collect(Process* p, int need, Eterm* objv, int nobj)
{
- int reds;
- ensure_sigq_roots_available(p);
- reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, p->fcalls, 0);
+ int reds = garbage_collect(p, ERTS_INVALID_HFRAG_PTR, need, objv, nobj, p->fcalls, 0);
BUMP_REDS(p, reds);
ASSERT(CONTEXT_REDS - ERTS_BIF_REDS_LEFT(p)
>= erts_proc_sched_data(p)->virtual_reds);
@@ -1137,8 +1113,6 @@ erts_garbage_collect_literals(Process* p, Eterm* literals,
* First an ordinary major collection...
*/
- ensure_sigq_roots_available(p);
-
p->flags |= F_NEED_FULLSWEEP;
if (ERTS_SCHEDULER_IS_DIRTY(erts_proc_sched_data(p)))
diff --git a/erts/emulator/beam/erl_message.c b/erts/emulator/beam/erl_message.c
index 34bd11d87c..bea7a0fe86 100644
--- a/erts/emulator/beam/erl_message.c
+++ b/erts/emulator/beam/erl_message.c
@@ -303,8 +303,10 @@ erts_queue_dist_message(Process *rcvr,
erts_cleanup_messages(mp);
}
else {
+ LINK_MESSAGE(rcvr, mp);
- LINK_MESSAGE(rcvr, mp, &mp->next, 1);
+ if (rcvr_locks & ERTS_PROC_LOCK_MAIN)
+ erts_proc_sig_fetch(rcvr);
if (!(rcvr_locks & ERTS_PROC_LOCK_MSGQ))
erts_proc_unlock(rcvr, ERTS_PROC_LOCK_MSGQ);
@@ -314,9 +316,8 @@ erts_queue_dist_message(Process *rcvr,
}
/* Add messages last in message queue */
-static Sint
+static void
queue_messages(Process* receiver,
- erts_aint32_t *receiver_state,
ErtsProcLocks receiver_locks,
ErtsMessage* first,
ErtsMessage** last,
@@ -325,86 +326,121 @@ queue_messages(Process* receiver,
int locked_msgq = 0;
erts_aint32_t state;
- ASSERT(is_value(ERL_MESSAGE_TERM(first)));
- ASSERT(is_value(ERL_MESSAGE_FROM(first)));
- ASSERT(ERL_MESSAGE_TOKEN(first) == am_undefined ||
- ERL_MESSAGE_TOKEN(first) == NIL ||
- is_tuple(ERL_MESSAGE_TOKEN(first)));
-
-#ifdef ERTS_ENABLE_LOCK_CHECK
- ERTS_LC_ASSERT(erts_proc_lc_my_proc_locks(receiver) < ERTS_PROC_LOCK_MSGQ ||
- receiver_locks == erts_proc_lc_my_proc_locks(receiver));
+#ifdef DEBUG
+ {
+ ErtsMessage* fmsg = ERTS_SIG_IS_MSG(first) ? first : first->next;
+ ASSERT(fmsg);
+ ASSERT(is_value(ERL_MESSAGE_TERM(fmsg)));
+ ASSERT(is_value(ERL_MESSAGE_FROM(fmsg)));
+ ASSERT(ERL_MESSAGE_TOKEN(fmsg) == am_undefined ||
+ ERL_MESSAGE_TOKEN(fmsg) == NIL ||
+ is_tuple(ERL_MESSAGE_TOKEN(fmsg)));
+ }
#endif
- if (!(receiver_locks & ERTS_PROC_LOCK_MSGQ)) {
- if (erts_proc_trylock(receiver, ERTS_PROC_LOCK_MSGQ) == EBUSY) {
- ErtsProcLocks need_locks;
-
- if (receiver_state)
- state = *receiver_state;
- else
- state = erts_atomic32_read_nob(&receiver->state);
- if (state & ERTS_PSFLG_EXITING)
- goto exiting;
+ ERTS_LC_ASSERT((erts_proc_lc_my_proc_locks(receiver) & ERTS_PROC_LOCK_MSGQ)
+ == (receiver_locks & ERTS_PROC_LOCK_MSGQ));
- need_locks = receiver_locks & ERTS_PROC_LOCKS_HIGHER_THAN(ERTS_PROC_LOCK_MSGQ);
- if (need_locks) {
- erts_proc_unlock(receiver, need_locks);
- }
- need_locks |= ERTS_PROC_LOCK_MSGQ;
- erts_proc_lock(receiver, need_locks);
- }
+ if (!(receiver_locks & ERTS_PROC_LOCK_MSGQ)) {
+ erts_proc_lock(receiver, ERTS_PROC_LOCK_MSGQ);
locked_msgq = 1;
}
-
state = erts_atomic32_read_nob(&receiver->state);
if (state & ERTS_PSFLG_EXITING) {
- exiting:
/* Drop message if receiver is exiting or has a pending exit... */
if (locked_msgq)
- erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
+ erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
+ if (ERTS_SIG_IS_NON_MSG(first)) {
+ ErtsSchedulerData* esdp = erts_get_scheduler_data();
+ ASSERT(esdp);
+ ASSERT(!esdp->pending_signal.sig);
+ esdp->pending_signal.sig = (ErtsSignal*) first;
+ esdp->pending_signal.to = receiver->common.id;
+ first = first->next;
+ }
erts_cleanup_messages(first);
- return 0;
+ return;
}
- LINK_MESSAGE(receiver, first, last, len);
+ if (last == &first->next) {
+ ASSERT(len == 1);
+ LINK_MESSAGE(receiver, first);
+ }
+ else {
+ erts_enqueue_signals(receiver, first, last, NULL, len, state);
+ }
+
+ if (receiver_locks & ERTS_PROC_LOCK_MAIN)
+ erts_proc_sig_fetch(receiver);
if (locked_msgq) {
erts_proc_unlock(receiver, ERTS_PROC_LOCK_MSGQ);
}
erts_proc_notify_new_message(receiver, receiver_locks);
- return 0;
}
-static Sint
-queue_message(Process* receiver,
- erts_aint32_t *receiver_state,
- ErtsProcLocks receiver_locks,
- ErtsMessage* mp, Eterm msg, Eterm from)
+static ERTS_INLINE
+ErtsMessage* prepend_pending_sig_maybe(Process* sender, Process* receiver,
+ ErtsMessage* mp)
{
- ERL_MESSAGE_TERM(mp) = msg;
- ERL_MESSAGE_FROM(mp) = from;
- return queue_messages(receiver, receiver_state, receiver_locks,
- mp, &mp->next, 1);
+ ErtsSchedulerData* esdp = sender->scheduler_data;
+ ErtsSignal* pend_sig;
+
+ if (!esdp || esdp->pending_signal.to != receiver->common.id)
+ return mp;
+
+ pend_sig = esdp->pending_signal.sig;
+
+ ASSERT(esdp->pending_signal.dbg_from == sender);
+ esdp->pending_signal.sig = NULL;
+ esdp->pending_signal.to = THE_NON_VALUE;
+ pend_sig->common.next = mp;
+ pend_sig->common.specific.next = NULL;
+ return (ErtsMessage*) pend_sig;
}
-Sint
+/**
+ *
+ * @brief Send one message from *NOT* a local process.
+ *
+ */
+void
erts_queue_message(Process* receiver, ErtsProcLocks receiver_locks,
ErtsMessage* mp, Eterm msg, Eterm from)
{
- return queue_message(receiver, NULL, receiver_locks, mp, msg, from);
+ ASSERT(is_not_internal_pid(from));
+ ERL_MESSAGE_TERM(mp) = msg;
+ ERL_MESSAGE_FROM(mp) = from;
+ queue_messages(receiver, receiver_locks, mp, &mp->next, 1);
}
+/**
+ * @brief Send one message from a local process.
+ */
+void
+erts_queue_proc_message(Process* sender,
+ Process* receiver, ErtsProcLocks receiver_locks,
+ ErtsMessage* mp, Eterm msg)
+{
+ ERL_MESSAGE_TERM(mp) = msg;
+ ERL_MESSAGE_FROM(mp) = sender->common.id;
+ queue_messages(receiver, receiver_locks,
+ prepend_pending_sig_maybe(sender, receiver, mp),
+ &mp->next, 1);
+}
-Sint
-erts_queue_messages(Process* receiver, ErtsProcLocks receiver_locks,
- ErtsMessage* first, ErtsMessage** last, Uint len)
+
+void
+erts_queue_proc_messages(Process* sender,
+ Process* receiver, ErtsProcLocks receiver_locks,
+ ErtsMessage* first, ErtsMessage** last, Uint len)
{
- return queue_messages(receiver, NULL, receiver_locks,
- first, last, len);
+ queue_messages(receiver, receiver_locks,
+ prepend_pending_sig_maybe(sender, receiver, first),
+ last, len);
}
void
@@ -541,7 +577,7 @@ erts_try_alloc_message_on_heap(Process *pp,
* Send a local message when sender & receiver processes are known.
*/
-Sint
+void
erts_send_message(Process* sender,
Process* receiver,
ErtsProcLocks *receiver_locks,
@@ -552,7 +588,6 @@ erts_send_message(Process* sender,
ErtsMessage* mp;
ErlOffHeap *ohp;
Eterm token = NIL;
- Sint res = 0;
#ifdef USE_VM_PROBES
DTRACE_CHARBUF(sender_name, 64);
DTRACE_CHARBUF(receiver_name, 64);
@@ -695,13 +730,8 @@ erts_send_message(Process* sender,
#ifdef USE_VM_PROBES
ERL_MESSAGE_DT_UTAG(mp) = utag;
#endif
- res = queue_message(receiver,
- &receiver_state,
- *receiver_locks,
- mp, message,
- sender->common.id);
- return res;
+ erts_queue_proc_message(sender, receiver, *receiver_locks, mp, message);
}
diff --git a/erts/emulator/beam/erl_message.h b/erts/emulator/beam/erl_message.h
index ee87297ba4..d120111634 100644
--- a/erts/emulator/beam/erl_message.h
+++ b/erts/emulator/beam/erl_message.h
@@ -229,7 +229,7 @@ typedef union {
typedef struct {
/* pointers to next pointers pointing to... */
ErtsMessage **next; /* ... next (non-message) signal */
- ErtsMessage **last; /* ... next (non-message) signal */
+ ErtsMessage **last; /* ... last (non-message) signal */
} ErtsMsgQNMSigs;
/* Size of default message buffer (erl_message.c) */
@@ -296,8 +296,11 @@ typedef struct {
typedef struct {
ErtsMessage* first;
ErtsMessage** last; /* point to the last next pointer */
- Sint len; /* queue length */
+ Sint len; /* number of messages in queue */
ErtsMsgQNMSigs nmsigs;
+#ifdef ERTS_PROC_SIG_HARD_DEBUG
+ int may_contain_heap_terms;
+#endif
} ErtsSignalInQueue;
typedef struct erl_trace_message_queue__ {
@@ -364,13 +367,14 @@ typedef struct erl_trace_message_queue__ {
#endif
-/* Add message last_msg in message queue */
-#define LINK_MESSAGE(p, first_msg, last_msg, num_msgs) \
+/* Add one message last in message queue */
+#define LINK_MESSAGE(p, msg) \
do { \
+ ASSERT(ERTS_SIG_IS_MSG(msg)); \
ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE__((p), "before"); \
- *(p)->sig_inq.last = (first_msg); \
- (p)->sig_inq.last = (last_msg); \
- (p)->sig_inq.len += (num_msgs); \
+ *(p)->sig_inq.last = (msg); \
+ (p)->sig_inq.last = &(msg)->next; \
+ (p)->sig_inq.len++; \
ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE__((p), "before"); \
} while(0)
@@ -437,11 +441,12 @@ ErlHeapFragment* erts_resize_message_buffer(ErlHeapFragment *, Uint,
Eterm *, Uint);
void free_message_buffer(ErlHeapFragment *);
void erts_queue_dist_message(Process*, ErtsProcLocks, ErtsDistExternal *, Eterm, Eterm);
-Sint erts_queue_message(Process*, ErtsProcLocks,ErtsMessage*, Eterm, Eterm);
-Sint erts_queue_messages(Process*, ErtsProcLocks,
- ErtsMessage*, ErtsMessage**, Uint);
+void erts_queue_message(Process*, ErtsProcLocks,ErtsMessage*, Eterm, Eterm);
+void erts_queue_proc_message(Process* from,Process* to, ErtsProcLocks,ErtsMessage*, Eterm);
+void erts_queue_proc_messages(Process* from, Process* to, ErtsProcLocks,
+ ErtsMessage*, ErtsMessage**, Uint);
void erts_deliver_exit_message(Eterm, Process*, ErtsProcLocks *, Eterm, Eterm);
-Sint erts_send_message(Process*, Process*, ErtsProcLocks*, Eterm, unsigned);
+void erts_send_message(Process*, Process*, ErtsProcLocks*, Eterm, unsigned);
void erts_link_mbuf_to_proc(Process *proc, ErlHeapFragment *bp);
Uint erts_msg_attached_data_size_aux(ErtsMessage *msg);
diff --git a/erts/emulator/beam/erl_nif.c b/erts/emulator/beam/erl_nif.c
index 260656e2d3..e208792868 100644
--- a/erts/emulator/beam/erl_nif.c
+++ b/erts/emulator/beam/erl_nif.c
@@ -665,7 +665,7 @@ int erts_flush_trace_messages(Process *c_p, ErtsProcLocks c_p_locks)
rp_locks = 0;
if (rp->common.id == c_p->common.id)
rp_locks = c_p_locks;
- erts_queue_messages(rp, rp_locks, first, last, len);
+ erts_queue_proc_messages(c_p, rp, rp_locks, first, last, len);
if (rp->common.id == c_p->common.id)
rp_locks &= ~c_p_locks;
if (rp_locks)
@@ -856,7 +856,10 @@ int enif_send(ErlNifEnv* env, const ErlNifPid* to_pid,
}
}
- erts_queue_message(rp, rp_locks, mp, msg, from);
+ if (c_p)
+ erts_queue_proc_message(c_p, rp, rp_locks, mp, msg);
+ else
+ erts_queue_message(rp, rp_locks, mp, msg, from);
done:
if (c_p == rp)
diff --git a/erts/emulator/beam/erl_port.h b/erts/emulator/beam/erl_port.h
index 2a98a6f00b..9b52b648e5 100644
--- a/erts/emulator/beam/erl_port.h
+++ b/erts/emulator/beam/erl_port.h
@@ -485,6 +485,8 @@ ERTS_GLB_INLINE Uint32 erts_portid2status(Eterm);
ERTS_GLB_INLINE int erts_is_port_alive(Eterm);
ERTS_GLB_INLINE int erts_is_valid_tracer_port(Eterm);
ERTS_GLB_INLINE int erts_port_driver_callback_epilogue(Port *, erts_aint32_t *);
+ERTS_GLB_INLINE Port *erts_get_current_port(void);
+ERTS_GLB_INLINE Eterm erts_get_current_port_id(void);
#define erts_drvport2port(Prt) erts_drvport2port_state((Prt), NULL)
@@ -812,6 +814,20 @@ erts_port_driver_callback_epilogue(Port *prt, erts_aint32_t *statep)
return reds;
}
+ERTS_GLB_INLINE
+Port *erts_get_current_port(void)
+{
+ ErtsSchedulerData *esdp = erts_get_scheduler_data();
+ return esdp ? esdp->current_port : NULL;
+}
+
+ERTS_GLB_INLINE
+Eterm erts_get_current_port_id(void)
+{
+ Port *port = erts_get_current_port();
+ return port ? port->common.id : THE_NON_VALUE;
+}
+
#endif /* #if ERTS_GLB_INLINE_INCL_FUNC_DEF */
void erts_port_resume_procs(Port *);
diff --git a/erts/emulator/beam/erl_proc_sig_queue.c b/erts/emulator/beam/erl_proc_sig_queue.c
index bcc4fc6d9b..a2e6f1d39d 100644
--- a/erts/emulator/beam/erl_proc_sig_queue.c
+++ b/erts/emulator/beam/erl_proc_sig_queue.c
@@ -308,9 +308,8 @@ destroy_sig_group_leader(ErtsSigGroupLeader *sgl)
}
static ERTS_INLINE void
-sig_enqueue_trace(Process *c_p, ErtsMessage *sig, int op,
- Process *rp, ErtsMessage **first,
- ErtsMessage **last, ErtsMessage ***last_next)
+sig_enqueue_trace(Process *c_p, ErtsMessage **sigp, int op,
+ Process *rp, ErtsMessage ***last_next)
{
switch (op) {
case ERTS_SIG_Q_OP_LINK:
@@ -326,12 +325,11 @@ sig_enqueue_trace(Process *c_p, ErtsMessage *sig, int op,
* Prepend a trace-change-state signal before the
* link signal...
*/
-
tag = ERTS_PROC_SIG_MAKE_TAG(ERTS_SIG_Q_OP_TRACE_CHANGE_STATE,
ERTS_SIG_Q_TYPE_ADJUST_TRACE_INFO,
0);
ti = erts_alloc(ERTS_ALC_T_SIG_DATA, sizeof(ErtsSigTraceInfo));
- ti->common.next = *last;
+ ti->common.next = *sigp;
ti->common.specific.next = &ti->common.next;
ti->common.tag = tag;
ti->flags_on = ERTS_TRACE_FLAGS(c_p) & TRACEE_FLAGS;
@@ -344,8 +342,9 @@ sig_enqueue_trace(Process *c_p, ErtsMessage *sig, int op,
erts_proc_unlock(c_p, ERTS_PROC_LOCKS_ALL_MINOR);
}
erts_tracer_update(&ti->tracer, ERTS_TRACER(c_p));
- *first = (ErtsMessage *) ti;
- *last_next = &ti->common.next;
+ *sigp = (ErtsMessage *) ti;
+ if (!*last_next || *last_next == sigp)
+ *last_next = &ti->common.next;
}
break;
@@ -354,6 +353,7 @@ sig_enqueue_trace(Process *c_p, ErtsMessage *sig, int op,
case ERTS_SIG_Q_OP_EXIT_LINKED:
if (DTRACE_ENABLED(process_exit_signal)) {
+ ErtsMessage* sig = *sigp;
Uint16 type = ERTS_PROC_SIG_TYPE(((ErtsSignal *) sig)->common.tag);
Eterm reason, from;
@@ -430,9 +430,24 @@ sig_enqueue_trace_cleanup(ErtsMessage *first, ErtsSignal *sig, ErtsMessage *last
}
}
+#ifdef DEBUG
+static int dbg_count_nmsigs(ErtsMessage *first)
+{
+ ErtsMessage *sig;
+ int cnt = 0;
+
+ for (sig = first; sig; sig = sig->next) {
+ if (ERTS_SIG_IS_NON_MSG(sig))
+ ++cnt;
+ }
+ return cnt;
+}
+#endif
+
static ERTS_INLINE erts_aint32_t
enqueue_signals(Process *rp, ErtsMessage *first,
- ErtsMessage *last, ErtsMessage **last_next,
+ ErtsMessage **last, ErtsMessage **last_next,
+ Uint num_msgs,
erts_aint32_t in_state)
{
erts_aint32_t state = in_state;
@@ -442,13 +457,23 @@ enqueue_signals(Process *rp, ErtsMessage *first,
ASSERT(!*this);
*this = first;
- rp->sig_inq.last = &last->next;
+ rp->sig_inq.last = last;
if (!rp->sig_inq.nmsigs.next) {
ASSERT(!rp->sig_inq.nmsigs.last);
- rp->sig_inq.nmsigs.next = this;
+ if (ERTS_SIG_IS_NON_MSG(first)) {
+ rp->sig_inq.nmsigs.next = this;
+ }
+ else if (last_next) {
+ ASSERT(first->next && ERTS_SIG_IS_NON_MSG(first->next));
+ rp->sig_inq.nmsigs.next = &first->next;
+ }
+ else
+ goto no_nmsig;
+
state = erts_atomic32_read_bor_nob(&rp->state,
ERTS_PSFLG_SIG_IN_Q);
+ no_nmsig:
ASSERT(!(state & ERTS_PSFLG_SIG_IN_Q));
}
else {
@@ -459,23 +484,41 @@ enqueue_signals(Process *rp, ErtsMessage *first,
ASSERT(sig && !sig->common.specific.next);
ASSERT(state & ERTS_PSFLG_SIG_IN_Q);
- sig->common.specific.next = this;
+ if (ERTS_SIG_IS_NON_MSG(first)) {
+ sig->common.specific.next = this;
+ }
+ else if (last_next) {
+ ASSERT(first->next && ERTS_SIG_IS_NON_MSG(first->next));
+ sig->common.specific.next = &first->next;
+ }
}
if (last_next) {
- ASSERT(first != last);
+ ASSERT(dbg_count_nmsigs(first) >= 2);
rp->sig_inq.nmsigs.last = last_next;
}
- else {
- ASSERT(first == last);
+ else if (ERTS_SIG_IS_NON_MSG(first)) {
+ ASSERT(dbg_count_nmsigs(first) == 1);
rp->sig_inq.nmsigs.last = this;
}
+ else
+ ASSERT(dbg_count_nmsigs(first) == 0);
+
+ rp->sig_inq.len += num_msgs;
ERTS_HDBG_CHECK_SIGNAL_IN_QUEUE(rp);
return state;
}
+erts_aint32_t erts_enqueue_signals(Process *rp, ErtsMessage *first,
+ ErtsMessage **last, ErtsMessage **last_next,
+ Uint num_msgs,
+ erts_aint32_t in_state)
+{
+ return enqueue_signals(rp, first, last, last_next, num_msgs, in_state);
+}
+
static ERTS_INLINE void
ensure_dirty_proc_handled(Eterm pid,
erts_aint32_t state,
@@ -516,26 +559,92 @@ proc_queue_signal(Process *c_p, Eterm pid, ErtsSignal *sig, int op)
{
int res;
Process *rp;
- ErtsMessage *first, *last, **last_next;
+ ErtsMessage *first, *last, **last_next, **sigp;
ErtsSchedulerData *esdp = erts_get_scheduler_data();
int is_normal_sched = !!esdp && esdp->type == ERTS_SCHED_NORMAL;
erts_aint32_t state;
+ ErtsSignal *pend_sig;
- if (is_normal_sched)
- rp = erts_proc_lookup_raw(pid);
- else
- rp = erts_proc_lookup_raw_inc_refc(pid);
+ if (is_normal_sched) {
+ pend_sig = esdp->pending_signal.sig;
+ if (op == ERTS_SIG_Q_OP_MONITOR
+ && ((ErtsMonitor*)sig)->type == ERTS_MON_TYPE_PROC) {
- if (!rp)
- return 0;
+ if (!pend_sig) {
+ esdp->pending_signal.sig = sig;
+ esdp->pending_signal.to = pid;
+#ifdef DEBUG
+ esdp->pending_signal.dbg_from = esdp->current_process;
+#endif
+ return 1;
+ }
+ ASSERT(esdp->pending_signal.dbg_from == esdp->current_process);
+ if (pend_sig != sig) {
+ /* Switch them and send previously pending signal instead */
+ Eterm pend_to = esdp->pending_signal.to;
+ esdp->pending_signal.sig = sig;
+ esdp->pending_signal.to = pid;
+ sig = pend_sig;
+ pid = pend_to;
+ }
+ else {
+ /* Caller wants to flush pending signal */
+ ASSERT(pid == esdp->pending_signal.to);
+ esdp->pending_signal.sig = NULL;
+ esdp->pending_signal.to = THE_NON_VALUE;
+#ifdef DEBUG
+ esdp->pending_signal.dbg_from = NULL;
+#endif
+ pend_sig = NULL;
+ }
+ rp = erts_proc_lookup_raw(pid);
+ if (!rp) {
+ erts_proc_sig_send_monitor_down((ErtsMonitor*)sig, am_noproc);
+ return 1;
+ }
+ }
+ else if (pend_sig && pid == esdp->pending_signal.to) {
+ /* Flush pending signal to maintain signal order */
+ esdp->pending_signal.sig = NULL;
+ esdp->pending_signal.to = THE_NON_VALUE;
+
+ rp = erts_proc_lookup_raw(pid);
+ if (!rp) {
+ erts_proc_sig_send_monitor_down((ErtsMonitor*)pend_sig, am_noproc);
+ return 0;
+ }
+
+ /* Prepend pending signal */
+ pend_sig->common.next = (ErtsMessage*) sig;
+ pend_sig->common.specific.next = &pend_sig->common.next;
+ first = (ErtsMessage*) pend_sig;
+ last = (ErtsMessage*) sig;
+ sigp = last_next = &pend_sig->common.next;
+ goto first_last_done;
+ }
+ else {
+ pend_sig = NULL;
+ rp = erts_proc_lookup_raw(pid);
+ if (!rp)
+ return 0;
+ }
+ }
+ else {
+ rp = erts_proc_lookup_raw_inc_refc(pid);
+ if (!rp)
+ return 0;
+ pend_sig = NULL;
+ }
- sig->common.specific.next = NULL;
first = last = (ErtsMessage *) sig;
last_next = NULL;
+ sigp = &first;
+
+first_last_done:
+ sig->common.specific.next = NULL;
/* may add signals before and/or after sig */
- sig_enqueue_trace(c_p, first, op, rp,
- &first, &last, &last_next);
+ sig_enqueue_trace(c_p, sigp, op, rp, &last_next);
last->next = NULL;
@@ -546,7 +655,7 @@ proc_queue_signal(Process *c_p, Eterm pid, ErtsSignal *sig, int op)
if (ERTS_PSFLG_FREE & state)
res = 0;
else {
- state = enqueue_signals(rp, first, last, last_next, state);
+ state = enqueue_signals(rp, first, &last->next, last_next, 0, state);
if (ERTS_UNLIKELY(op == ERTS_SIG_Q_OP_PROCESS_INFO))
check_push_msgq_len_offs_marker(rp, sig);
res = !0;
@@ -554,8 +663,21 @@ proc_queue_signal(Process *c_p, Eterm pid, ErtsSignal *sig, int op)
erts_proc_unlock(rp, ERTS_PROC_LOCK_MSGQ);
- if (res == 0)
+ if (res == 0) {
+ if (pend_sig) {
+ if (sig == pend_sig) {
+ /* We did a switch, callers signal is now pending (still ok) */
+ ASSERT(esdp->pending_signal.sig);
+ res = 1;
+ }
+ else {
+ ASSERT(first == (ErtsMessage*)pend_sig);
+ first = first->next;
+ }
+ erts_proc_sig_send_monitor_down((ErtsMonitor*)pend_sig, am_noproc);
+ }
sig_enqueue_trace_cleanup(first, sig, last);
+ }
if (!(state & (ERTS_PSFLG_EXITING
| ERTS_PSFLG_ACTIVE_SYS
@@ -572,6 +694,24 @@ proc_queue_signal(Process *c_p, Eterm pid, ErtsSignal *sig, int op)
return res;
}
+void erts_proc_sig_send_pending(ErtsSchedulerData* esdp)
+{
+ ErtsSignal* sig = esdp->pending_signal.sig;
+ int op;
+
+ ASSERT(esdp && esdp->type == ERTS_SCHED_NORMAL);
+ ASSERT(sig);
+ ASSERT(is_internal_pid(esdp->pending_signal.to));
+
+ op = ERTS_SIG_Q_OP_MONITOR;
+ ASSERT(op == ERTS_PROC_SIG_OP(sig->common.tag));
+
+ if (!proc_queue_signal(NULL, esdp->pending_signal.to, sig, op)) {
+ ErtsMonitor* mon = (ErtsMonitor*)sig;
+ erts_proc_sig_send_monitor_down(mon, am_noproc);
+ }
+}
+
static int
maybe_elevate_sig_handling_prio(Process *c_p, Eterm other)
{
@@ -612,11 +752,6 @@ maybe_elevate_sig_handling_prio(Process *c_p, Eterm other)
void
erts_proc_sig_fetch__(Process *proc)
{
-#ifdef ERTS_PROC_SIG_HARD_DEBUG
- ErtsSignalPrivQueues sig_qs = proc->sig_qs;
- ErtsSignalInQueue sig_inq = proc->sig_inq;
-#endif
-
ASSERT(proc->sig_inq.first);
if (!proc->sig_inq.nmsigs.next) {
@@ -634,9 +769,7 @@ erts_proc_sig_fetch__(Process *proc)
}
}
else {
-#ifdef DEBUG
erts_aint32_t s;
-#endif
ASSERT(proc->sig_inq.nmsigs.last);
if (!proc->sig_qs.nmsigs.last) {
ASSERT(!proc->sig_qs.nmsigs.next);
@@ -645,16 +778,13 @@ erts_proc_sig_fetch__(Process *proc)
else
proc->sig_qs.nmsigs.next = proc->sig_inq.nmsigs.next;
-#ifdef DEBUG
- s =
-#endif
- erts_atomic32_read_bset_nob(&proc->state,
+ s = erts_atomic32_read_bset_nob(&proc->state,
(ERTS_PSFLG_SIG_Q
| ERTS_PSFLG_SIG_IN_Q),
ERTS_PSFLG_SIG_Q);
ASSERT((s & (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q))
- == ERTS_PSFLG_SIG_IN_Q);
+ == ERTS_PSFLG_SIG_IN_Q); (void)s;
}
else {
ErtsSignal *sig;
@@ -667,14 +797,11 @@ erts_proc_sig_fetch__(Process *proc)
else
sig->common.specific.next = proc->sig_inq.nmsigs.next;
-#ifdef DEBUG
- s =
-#endif
- erts_atomic32_read_band_nob(&proc->state,
+ s = erts_atomic32_read_band_nob(&proc->state,
~ERTS_PSFLG_SIG_IN_Q);
ASSERT((s & (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q))
- == (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q));
+ == (ERTS_PSFLG_SIG_Q|ERTS_PSFLG_SIG_IN_Q)); (void)s;
}
if (proc->sig_inq.nmsigs.last == &proc->sig_inq.first)
proc->sig_qs.nmsigs.last = proc->sig_qs.cont_last;
@@ -1417,8 +1544,7 @@ erts_proc_sig_send_is_alive_request(Process *c_p, Eterm to, Eterm ref)
/* It wasn't alive; reply to ourselves... */
mp->next = NULL;
mp->data.attached = ERTS_MSG_COMBINED_HFRAG;
- erts_queue_message(c_p, ERTS_PROC_LOCK_MAIN,
- mp, msg, am_system);
+ erts_queue_message(c_p, ERTS_PROC_LOCK_MAIN, mp, msg, am_system);
}
}
@@ -2391,8 +2517,9 @@ destroy_process_info_request(Process *c_p, ErtsProcessInfoSig *pisig)
}
static int
-handle_process_info(Process *c_p, ErtsMessage *sig,
- ErtsMessage ***next_nm_sig, int is_alive)
+handle_process_info(Process *c_p, ErtsSigRecvTracing *tracing,
+ ErtsMessage *sig, ErtsMessage ***next_nm_sig,
+ int is_alive)
{
ErtsProcessInfoSig *pisig = (ErtsProcessInfoSig *) sig;
Uint reds = 0;
@@ -2416,7 +2543,11 @@ handle_process_info(Process *c_p, ErtsMessage *sig,
* Move messages part of message queue into inner
* signal queue...
*/
+ ASSERT(tracing);
+
if (*next_nm_sig != &c_p->sig_qs.cont) {
+ if (*next_nm_sig == tracing->messages.next)
+ tracing->messages.next = &c_p->sig_qs.cont;
*c_p->sig_qs.last = c_p->sig_qs.cont;
c_p->sig_qs.last = *next_nm_sig;
@@ -2498,7 +2629,7 @@ handle_process_info(Process *c_p, ErtsMessage *sig,
if (is_alive)
erts_factory_trim_and_close(&hfact, &msg, 1);
- erts_queue_message(rp, locks, mp, msg, c_p->common.id);
+ erts_queue_proc_message(c_p, rp, locks, mp, msg);
if (!is_alive && locks)
erts_proc_unlock(rp, locks);
@@ -2877,7 +3008,7 @@ erts_proc_sig_handle_incoming(Process *c_p, erts_aint32_t *statep,
case ERTS_SIG_Q_OP_PROCESS_INFO:
ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
- handle_process_info(c_p, sig, next_nm_sig, !0);
+ handle_process_info(c_p, &tracing, sig, next_nm_sig, !0);
ERTS_PROC_SIG_HDBG_PRIV_CHKQ(c_p, &tracing, next_nm_sig);
break;
@@ -3198,7 +3329,7 @@ erts_proc_sig_handle_exit(Process *c_p, int *redsp)
break;
case ERTS_SIG_Q_OP_PROCESS_INFO:
- handle_process_info(c_p, sig, next_nm_sig, 0);
+ handle_process_info(c_p, NULL, sig, next_nm_sig, 0);
break;
case ERTS_SIG_Q_OP_TRACE_CHANGE_STATE:
@@ -4268,7 +4399,7 @@ erts_proc_sig_hdbg_check_in_queue(Process *p, char *what, char *file, int line)
NULL,
NULL,
ERTS_PSFLG_SIG_IN_Q);
- ASSERT(p->sig_inq.len == len);
+ ASSERT(p->sig_inq.len == len); (void)len;
}
-#endif
+#endif /* ERTS_PROC_SIG_HARD_DEBUG */
diff --git a/erts/emulator/beam/erl_proc_sig_queue.h b/erts/emulator/beam/erl_proc_sig_queue.h
index d250ad820f..8b7cd35f61 100644
--- a/erts/emulator/beam/erl_proc_sig_queue.h
+++ b/erts/emulator/beam/erl_proc_sig_queue.h
@@ -733,6 +733,18 @@ Sint
erts_proc_sig_privqs_len(Process *c_p);
+/* SVERK: Doc me up! */
+erts_aint32_t
+erts_enqueue_signals(Process *rp, ErtsMessage *first,
+ ErtsMessage **last, ErtsMessage **last_next,
+ Uint msg_cnt,
+ erts_aint32_t in_state);
+
+/* SVERK: Doc me up! */
+void
+erts_proc_sig_send_pending(ErtsSchedulerData* esdp);
+
+
typedef struct {
Uint size;
ErtsMessage *msgp;
diff --git a/erts/emulator/beam/erl_process.c b/erts/emulator/beam/erl_process.c
index 650ec0958c..ad7ac27ac3 100644
--- a/erts/emulator/beam/erl_process.c
+++ b/erts/emulator/beam/erl_process.c
@@ -5715,6 +5715,12 @@ init_scheduler_data(ErtsSchedulerData* esdp, int num,
esdp->io.out = (Uint64) 0;
esdp->io.in = (Uint64) 0;
+ esdp->pending_signal.sig = NULL;
+ esdp->pending_signal.to = THE_NON_VALUE;
+#ifdef DEBUG
+ esdp->pending_signal.dbg_from = NULL;
+#endif
+
if (daww_ptr) {
init_aux_work_data(&esdp->aux_work_data, esdp, *daww_ptr);
*daww_ptr += daww_sz;
@@ -9674,8 +9680,13 @@ Process *erts_schedule(ErtsSchedulerData *esdp, Process *p, int calls)
} else {
is_normal_sched = !esdp;
if (is_normal_sched) {
- esdp = p->scheduler_data;
+ esdp = p->scheduler_data;
ASSERT(!ERTS_SCHEDULER_IS_DIRTY(esdp));
+
+ if (esdp->pending_signal.sig) {
+ ASSERT(esdp->pending_signal.dbg_from == p);
+ erts_proc_sig_send_pending(esdp);
+ }
}
else {
ASSERT(ERTS_SCHEDULER_IS_DIRTY(esdp));
@@ -10375,7 +10386,7 @@ notify_sys_task_executed(Process *c_p, ErtsProcSysTask *st,
ASSERT(hp_start + hsz == hp);
#endif
- erts_queue_message(rp, rp_locks, mp, msg, c_p->common.id);
+ erts_queue_proc_message(c_p, rp, rp_locks, mp, msg);
if (c_p == rp)
rp_locks &= ~ERTS_PROC_LOCK_MAIN;
@@ -10937,7 +10948,7 @@ dispatch_system_task(Process *c_p, erts_aint_t fail_state,
msg = copy_struct(operation, osz, &hp, ohp);
msg = TUPLE4(hp, st->requester, target, prio, msg);
- erts_queue_message(rp, rp_locks, mp, msg, st->requester);
+ erts_queue_message(rp, rp_locks, mp, msg, am_system);
if (rp_locks)
erts_proc_unlock(rp, rp_locks);
@@ -11912,6 +11923,9 @@ erl_create_process(Process* parent, /* Parent of process (default group leader).
p->sig_inq.len = 0;
p->sig_inq.nmsigs.next = NULL;
p->sig_inq.nmsigs.last = NULL;
+#ifdef ERTS_PROC_SIG_HARD_DEBUG
+ p->sig_inq.may_contain_heap_terms = 0;
+#endif
p->bif_timers = NULL;
p->mbuf = NULL;
p->msg_frag = NULL;
@@ -12119,6 +12133,9 @@ void erts_init_empty_process(Process *p)
p->sig_inq.len = 0;
p->sig_inq.nmsigs.next = NULL;
p->sig_inq.nmsigs.last = NULL;
+#ifdef ERTS_PROC_SIG_HARD_DEBUG
+ p->sig_inq.may_contain_heap_terms = 0;
+#endif
p->bif_timers = NULL;
p->dictionary = NULL;
p->seq_trace_clock = 0;
diff --git a/erts/emulator/beam/erl_process.h b/erts/emulator/beam/erl_process.h
index e2aa1d9f84..b66272194c 100644
--- a/erts/emulator/beam/erl_process.h
+++ b/erts/emulator/beam/erl_process.h
@@ -660,6 +660,13 @@ struct ErtsSchedulerData_ {
Uint64 out;
Uint64 in;
} io;
+ struct {
+ ErtsSignal* sig;
+ Eterm to;
+#ifdef DEBUG
+ Process* dbg_from;
+#endif
+ } pending_signal;
Uint64 reductions;
ErtsSchedWallTime sched_wall_time;
diff --git a/erts/emulator/beam/erl_term.h b/erts/emulator/beam/erl_term.h
index 18483fca35..bddf403b0a 100644
--- a/erts/emulator/beam/erl_term.h
+++ b/erts/emulator/beam/erl_term.h
@@ -1184,6 +1184,54 @@ _ET_DECLARE_CHECKED(struct erl_node_*,external_ref_node,Eterm)
#define is_map(x) (is_boxed((x)) && is_map_header(*boxed_val(x)))
#define is_not_map(x) (!is_map(x))
+#define MAP_HEADER(hp, sz, keys) \
+ ((hp)[0] = MAP_HEADER_FLATMAP, \
+ (hp)[1] = sz, \
+ (hp)[2] = keys)
+
+#define MAP_SZ(sz) (MAP_HEADER_FLATMAP_SZ + 2*sz + 1)
+
+#define MAP0_SZ MAP_SZ(0)
+#define MAP1_SZ MAP_SZ(1)
+#define MAP2_SZ MAP_SZ(2)
+#define MAP3_SZ MAP_SZ(3)
+#define MAP4_SZ MAP_SZ(4)
+#define MAP5_SZ MAP_SZ(5)
+#define MAP0(hp) \
+ (MAP_HEADER(hp, 0, TUPLE0(hp+MAP_HEADER_FLATMAP_SZ)), \
+ make_flatmap(hp))
+#define MAP1(hp, k1, v1) \
+ (MAP_HEADER(hp, 1, TUPLE1(hp+1+MAP_HEADER_FLATMAP_SZ, k1)), \
+ (hp)[MAP_HEADER_FLATMAP_SZ+0] = v1, \
+ make_flatmap(hp))
+#define MAP2(hp, k1, v1, k2, v2) \
+ (MAP_HEADER(hp, 2, TUPLE2(hp+2+MAP_HEADER_FLATMAP_SZ, k1, k2)), \
+ (hp)[MAP_HEADER_FLATMAP_SZ+0] = v1, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+1] = v2, \
+ make_flatmap(hp))
+#define MAP3(hp, k1, v1, k2, v2, k3, v3) \
+ (MAP_HEADER(hp, 3, TUPLE3(hp+3+MAP_HEADER_FLATMAP_SZ, k1, k2, k3)), \
+ (hp)[MAP_HEADER_FLATMAP_SZ+0] = v1, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+1] = v2, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+2] = v3, \
+ make_flatmap(hp))
+#define MAP4(hp, k1, v1, k2, v2, k3, v3, k4, v4) \
+ (MAP_HEADER(hp, 4, TUPLE4(hp+4+MAP_HEADER_FLATMAP_SZ, k1, k2, k3, k4)), \
+ (hp)[MAP_HEADER_FLATMAP_SZ+0] = v1, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+1] = v2, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+2] = v3, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+3] = v4, \
+ make_flatmap(hp))
+#define MAP5(hp, k1, v1, k2, v2, k3, v3, k4, v4, k5, v5) \
+ (MAP_HEADER(hp, 5, TUPLE5(hp+5+MAP_HEADER_FLATMAP_SZ, k1, k2, k3, k4, k5)), \
+ (hp)[MAP_HEADER_FLATMAP_SZ+0] = v1, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+1] = v2, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+2] = v3, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+3] = v4, \
+ (hp)[MAP_HEADER_FLATMAP_SZ+4] = v5, \
+ make_flatmap(hp))
+
+
/* number tests */
#define is_integer(x) (is_small(x) || is_big(x))
diff --git a/erts/emulator/beam/erl_trace.c b/erts/emulator/beam/erl_trace.c
index 1e833539b3..065a560b52 100644
--- a/erts/emulator/beam/erl_trace.c
+++ b/erts/emulator/beam/erl_trace.c
@@ -2044,7 +2044,7 @@ enqueue_sys_msg(enum ErtsSysMsgType type,
void
erts_queue_error_logger_message(Eterm from, Eterm msg, ErlHeapFragment *bp)
{
- enqueue_sys_msg(SYS_MSG_TYPE_ERRLGR, from, am_error_logger, msg, bp);
+ enqueue_sys_msg(SYS_MSG_TYPE_ERRLGR, from, am_logger, msg, bp);
}
void
@@ -2110,13 +2110,13 @@ sys_msg_disp_failure(ErtsSysMsgQ *smqp, Eterm receiver)
erts_thr_progress_unblock();
break;
case SYS_MSG_TYPE_ERRLGR: {
- char *no_elgger = "(no error logger present)";
+ char *no_elgger = "(no logger present)";
Eterm *tp;
Eterm tag;
if (is_not_tuple(smqp->msg)) {
unexpected_elmsg:
erts_fprintf(stderr,
- "%s unexpected error logger message: %T\n",
+ "%s unexpected logger message: %T\n",
no_elgger,
smqp->msg);
}
@@ -2284,7 +2284,7 @@ sys_msg_dispatcher_func(void *unused)
}
break;
case SYS_MSG_TYPE_ERRLGR:
- receiver = am_error_logger;
+ receiver = am_logger;
break;
default:
receiver = NIL;
@@ -2313,7 +2313,7 @@ sys_msg_dispatcher_func(void *unused)
erts_proc_unlock(proc, proc_locks);
}
}
- else if (receiver == am_error_logger) {
+ else if (receiver == am_logger) {
proc = erts_whereis_process(NULL,0,receiver,proc_locks,0);
if (!proc)
goto failure;
@@ -2379,7 +2379,7 @@ erts_foreach_sys_msg_in_q(void (*func)(Eterm,
to = erts_get_system_profile();
break;
case SYS_MSG_TYPE_ERRLGR:
- to = am_error_logger;
+ to = am_logger;
break;
default:
to = NIL;
diff --git a/erts/emulator/beam/utils.c b/erts/emulator/beam/utils.c
index 2e22130524..d74052d8b2 100644
--- a/erts/emulator/beam/utils.c
+++ b/erts/emulator/beam/utils.c
@@ -1924,145 +1924,155 @@ make_internal_hash(Eterm term, Uint32 salt)
#undef HCONST
#undef MIX
+/* error_logger !
+ {log, Level, format, [args], #{ gl, pid, time, error_logger => #{tag, emulator => true} }}
+*/
static Eterm
-do_allocate_logger_message(Eterm gleader, Eterm **hp, ErlOffHeap **ohp,
- ErlHeapFragment **bp, Process **p, Uint sz)
+do_allocate_logger_message(Eterm gleader, ErtsMonotonicTime *ts, Eterm *pid,
+ Eterm **hp, ErlOffHeap **ohp,
+ ErlHeapFragment **bp, Uint sz)
{
Uint gl_sz;
gl_sz = IS_CONST(gleader) ? 0 : size_object(gleader);
- sz = sz + gl_sz;
+ sz = sz + gl_sz + 6 /*outer 5-tuple*/
+ + MAP2_SZ /* error_logger map */;
+
+ *pid = erts_get_current_pid();
+
+ if (is_nil(gleader) && is_non_value(*pid)) {
+ sz += MAP2_SZ /* metadata map no gl, no pid */;
+ } else if (is_nil(gleader) || is_non_value(*pid))
+ sz += MAP3_SZ /* metadata map no gl or no pid*/;
+ else
+ sz += MAP4_SZ /* metadata map w gl w pid*/;
+
+ *ts = ERTS_MONOTONIC_TO_USEC(erts_get_monotonic_time(NULL)) + ERTS_MONOTONIC_OFFSET_USEC;
+ erts_bld_sint64(NULL, &sz, *ts);
*bp = new_message_buffer(sz);
*ohp = &(*bp)->off_heap;
*hp = (*bp)->mem;
- return (is_nil(gleader)
- ? am_noproc
- : (IS_CONST(gleader)
- ? gleader
- : copy_struct(gleader,gl_sz,hp,*ohp)));
+ return copy_struct(gleader,gl_sz,hp,*ohp);
}
-static void do_send_logger_message(Eterm *hp, ErlOffHeap *ohp, ErlHeapFragment *bp,
- Process *p, Eterm message)
+static void do_send_logger_message(Eterm gl, Eterm tag, Eterm format, Eterm args,
+ ErtsMonotonicTime ts, Eterm pid,
+ Eterm *hp, ErlHeapFragment *bp)
{
-#ifdef HARDDEBUG
- erts_fprintf(stderr, "%T\n", message);
-#endif
- {
- Eterm from = erts_get_current_pid();
- if (is_not_internal_pid(from))
- from = NIL;
- erts_queue_error_logger_message(from, message, bp);
+ Eterm message, md, el_tag = tag;
+ Eterm time = erts_bld_sint64(&hp, NULL, ts);
+
+ /* This mapping is needed for the backwards compatible error_logger */
+ switch (tag) {
+ case am_info: el_tag = am_info_msg; break;
+ case am_warning: el_tag = am_warning_msg; break;
+ default:
+ ASSERT(am_error);
+ break;
}
+
+ md = MAP2(hp, am_emulator, am_true,
+ am_atom_put("tag", 3), el_tag);
+ hp += MAP2_SZ;
+
+ if (is_nil(gl) && is_non_value(pid)) {
+ /* no gl and no pid, probably from a port */
+ md = MAP2(hp,
+ am_error_logger, md,
+ am_time, time);
+ hp += MAP2_SZ;
+ pid = NIL;
+ } else if (is_nil(gl)) {
+ /* no gl */
+ md = MAP3(hp,
+ am_error_logger, md,
+ am_pid, pid,
+ am_time, time);
+ hp += MAP3_SZ;
+ } else if (is_non_value(pid)) {
+ /* no gl */
+ md = MAP3(hp,
+ am_error_logger, md,
+ am_atom_put("gl", 2), gl,
+ am_time, time);
+ hp += MAP3_SZ;
+ pid = NIL;
+ } else {
+ md = MAP4(hp,
+ am_error_logger, md,
+ am_atom_put("gl", 2), gl,
+ am_pid, pid,
+ am_time, time);
+ hp += MAP4_SZ;
+ }
+
+ message = TUPLE5(hp, am_log, tag, format, args, md);
+ erts_queue_error_logger_message(pid, message, bp);
}
-/* error_logger !
- {notify,{info_msg,gleader,{emulator,format,[args]}}} |
- {notify,{error,gleader,{emulator,format,[args]}}} |
- {notify,{warning_msg,gleader,{emulator,format,[args}]}} */
-static int do_send_to_logger(Eterm tag, Eterm gleader, char *buf, size_t len)
+static int do_send_to_logger(Eterm tag, Eterm gl, char *buf, size_t len)
{
Uint sz;
- Eterm gl;
- Eterm list,args,format,tuple1,tuple2,tuple3;
+ Eterm list, args, format, pid;
+ ErtsMonotonicTime ts;
Eterm *hp = NULL;
ErlOffHeap *ohp = NULL;
ErlHeapFragment *bp = NULL;
- Process *p = NULL;
-
- ASSERT(is_atom(tag));
-
- if (len == 0) {
- return -1;
- }
sz = len * 2 /* message list */ + 2 /* cons surrounding message list */
- + 3 /*outer 2-tuple*/ + 4 /* middle 3-tuple */ + 4 /*inner 3-tuple */
+ 8 /* "~s~n" */;
/* gleader size is accounted and allocated next */
- gl = do_allocate_logger_message(gleader, &hp, &ohp, &bp, &p, sz);
-
- if(is_nil(gl)) {
- /* buf *always* points to a null terminated string */
- erts_fprintf(stderr, "(no error logger present) %T: \"%s\"\n",
- tag, buf);
- return 0;
- }
+ gl = do_allocate_logger_message(gl, &ts, &pid, &hp, &ohp, &bp, sz);
list = buf_to_intlist(&hp, buf, len, NIL);
args = CONS(hp,list,NIL);
hp += 2;
format = buf_to_intlist(&hp, "~s~n", 4, NIL);
- tuple1 = TUPLE3(hp, am_emulator, format, args);
- hp += 4;
- tuple2 = TUPLE3(hp, tag, gl, tuple1);
- hp += 4;
- tuple3 = TUPLE2(hp, am_notify, tuple2);
- do_send_logger_message(hp, ohp, bp, p, tuple3);
+ do_send_logger_message(gl, tag, format, args, ts, pid, hp, bp);
return 0;
}
-static int do_send_term_to_logger(Eterm tag, Eterm gleader,
+static int do_send_term_to_logger(Eterm tag, Eterm gl,
char *buf, size_t len, Eterm args)
{
Uint sz;
- Eterm gl;
Uint args_sz;
- Eterm format,tuple1,tuple2,tuple3;
+ Eterm format, pid;
+ ErtsMonotonicTime ts;
Eterm *hp = NULL;
ErlOffHeap *ohp = NULL;
ErlHeapFragment *bp = NULL;
- Process *p = NULL;
- ASSERT(is_atom(tag));
+ ASSERT(len > 0);
args_sz = size_object(args);
- sz = len * 2 /* format */ + args_sz
- + 3 /*outer 2-tuple*/ + 4 /* middle 3-tuple */ + 4 /*inner 3-tuple */;
+ sz = len * 2 /* format */ + args_sz;
/* gleader size is accounted and allocated next */
- gl = do_allocate_logger_message(gleader, &hp, &ohp, &bp, &p, sz);
-
- if(is_nil(gl)) {
- /* buf *always* points to a null terminated string */
- erts_fprintf(stderr, "(no error logger present) %T: \"%s\" %T\n",
- tag, buf, args);
- return 0;
- }
+ gl = do_allocate_logger_message(gl, &ts, &pid, &hp, &ohp, &bp, sz);
format = buf_to_intlist(&hp, buf, len, NIL);
args = copy_struct(args, args_sz, &hp, ohp);
- tuple1 = TUPLE3(hp, am_emulator, format, args);
- hp += 4;
- tuple2 = TUPLE3(hp, tag, gl, tuple1);
- hp += 4;
- tuple3 = TUPLE2(hp, am_notify, tuple2);
- do_send_logger_message(hp, ohp, bp, p, tuple3);
+ do_send_logger_message(gl, tag, format, args, ts, pid, hp, bp);
return 0;
}
static ERTS_INLINE int
send_info_to_logger(Eterm gleader, char *buf, size_t len)
{
- return do_send_to_logger(am_info_msg, gleader, buf, len);
+ return do_send_to_logger(am_info, gleader, buf, len);
}
static ERTS_INLINE int
send_warning_to_logger(Eterm gleader, char *buf, size_t len)
{
- Eterm tag;
- switch (erts_error_logger_warnings) {
- case am_info: tag = am_info_msg; break;
- case am_warning: tag = am_warning_msg; break;
- default: tag = am_error; break;
- }
- return do_send_to_logger(tag, gleader, buf, len);
+ return do_send_to_logger(erts_error_logger_warnings, gleader, buf, len);
}
static ERTS_INLINE int
diff --git a/erts/emulator/test/dgawd_handler.erl b/erts/emulator/test/dgawd_handler.erl
index 52cdd26427..29b9d6ac7b 100644
--- a/erts/emulator/test/dgawd_handler.erl
+++ b/erts/emulator/test/dgawd_handler.erl
@@ -42,10 +42,10 @@
%%====================================================================
install() ->
- gen_event:add_handler(error_logger, ?MODULE, []).
+ error_logger:add_report_handler(?MODULE, []).
restore() ->
- gen_event:delete_handler(error_logger, ?MODULE, []).
+ error_logger:delete_report_handler(?MODULE).
got_dgawd_report() ->
gen_event:call(error_logger, ?MODULE, got_dgawd_report, 10*60*1000).
diff --git a/erts/emulator/test/statistics_SUITE.erl b/erts/emulator/test/statistics_SUITE.erl
index 029a6de897..3f2897242e 100644
--- a/erts/emulator/test/statistics_SUITE.erl
+++ b/erts/emulator/test/statistics_SUITE.erl
@@ -310,6 +310,13 @@ scheduler_wall_time_all(Config) when is_list(Config) ->
scheduler_wall_time_test(scheduler_wall_time_all).
scheduler_wall_time_test(Type) ->
+ case string:find(erlang:system_info(system_version),
+ "dirty-schedulers-TEST") == nomatch of
+ true -> run_scheduler_wall_time_test(Type);
+ false -> {skip, "Cannot be run with dirty-schedulers-TEST build"}
+ end.
+
+run_scheduler_wall_time_test(Type) ->
%% Should return undefined if system_flag is not turned on yet
undefined = statistics(Type),
%% Turn on statistics
diff --git a/erts/preloaded/ebin/erl_prim_loader.beam b/erts/preloaded/ebin/erl_prim_loader.beam
index e66343a5ad..10545086b5 100644
--- a/erts/preloaded/ebin/erl_prim_loader.beam
+++ b/erts/preloaded/ebin/erl_prim_loader.beam
Binary files differ
diff --git a/erts/preloaded/ebin/init.beam b/erts/preloaded/ebin/init.beam
index 6486ea750c..0473eda5fb 100644
--- a/erts/preloaded/ebin/init.beam
+++ b/erts/preloaded/ebin/init.beam
Binary files differ
diff --git a/erts/preloaded/ebin/prim_file.beam b/erts/preloaded/ebin/prim_file.beam
index 4f11df2c38..d1b3f5dca4 100644
--- a/erts/preloaded/ebin/prim_file.beam
+++ b/erts/preloaded/ebin/prim_file.beam
Binary files differ
diff --git a/erts/preloaded/src/erl_prim_loader.erl b/erts/preloaded/src/erl_prim_loader.erl
index b82b9f5b46..42c1f32a8e 100644
--- a/erts/preloaded/src/erl_prim_loader.erl
+++ b/erts/preloaded/src/erl_prim_loader.erl
@@ -299,8 +299,12 @@ check_file_result(Func, Target, {error,Reason}) ->
end,
%% this is equal to calling error_logger:error_report/1 which
%% we don't want to do from code_server during system boot
- error_logger ! {notify,{error_report,group_leader(),
- {self(),std_error,Report}}},
+ logger ! {log,error,#{label=>{?MODULE,file_error},report=>Report},
+ #{pid=>self(),
+ gl=>group_leader(),
+ time=>erlang:monotonic_time(microsecond),
+ error_logger=>#{tag=>error_report,
+ type=>std_error}}},
error
end;
check_file_result(_, _, Other) ->
diff --git a/erts/preloaded/src/init.erl b/erts/preloaded/src/init.erl
index 0c74169e97..9e3de87e08 100644
--- a/erts/preloaded/src/init.erl
+++ b/erts/preloaded/src/init.erl
@@ -485,7 +485,12 @@ do_handle_msg(Msg,State) ->
X ->
case whereis(user) of
undefined ->
- catch error_logger ! {info, self(), {self(), X, []}};
+ Time = erlang:monotonic_time(microsecond),
+ catch logger ! {log, info, "init got unexpected: ~p", [X],
+ #{pid=>self(),
+ gl=>self(),
+ time=>Time,
+ error_logger=>#{tag=>info_msg}}};
User ->
User ! X,
ok
@@ -559,8 +564,11 @@ do_stop({stop,Status},State) ->
clear_system(BootPid,State) ->
Heart = get_heart(State#state.kernel),
- shutdown_pids(Heart,BootPid,State),
- unload(Heart).
+ Logger = get_logger(State#state.kernel),
+ shutdown_pids(Heart,Logger,BootPid,State),
+ unload(Heart),
+ kill_em([Logger]),
+ do_unload([logger_server]).
flush() ->
receive
@@ -580,19 +588,26 @@ stop_heart(State) ->
shutdown_kernel_pid(Pid, BootPid, self(), State)
end.
-shutdown_pids(Heart,BootPid,State) ->
+shutdown_pids(Heart,Logger,BootPid,State) ->
Timer = shutdown_timer(State#state.flags),
catch shutdown(State#state.kernel,BootPid,Timer,State),
- kill_all_pids(Heart), % Even the shutdown timer.
- kill_all_ports(Heart),
+ kill_all_pids(Heart,Logger), % Even the shutdown timer.
+ kill_all_ports(Heart), % Logger has no ports
flush_timout(Timer).
-get_heart([{heart,Pid}|_Kernel]) -> Pid;
-get_heart([_|Kernel]) -> get_heart(Kernel);
-get_heart(_) -> false.
+get_heart(Kernel) ->
+ get_kernelpid(heart,Kernel).
+get_logger(Kernel) ->
+ get_kernelpid(logger,Kernel).
-shutdown([{heart,_Pid}|Kernel],BootPid,Timer,State) ->
+get_kernelpid(Name,[{Name,Pid}|_Kernel]) -> Pid;
+get_kernelpid(Name,[_|Kernel]) -> get_kernelpid(Name,Kernel);
+get_kernelpid(_,_) -> false.
+
+
+shutdown([{Except,_Pid}|Kernel],BootPid,Timer,State)
+ when Except==heart; Except==logger ->
shutdown(Kernel, BootPid, Timer, State);
shutdown([{_Name,Pid}|Kernel],BootPid,Timer,State) ->
shutdown_kernel_pid(Pid, BootPid, Timer, State),
@@ -644,24 +659,25 @@ resend(_) ->
%%
%% Kill all existing pids in the system (except init and heart).
-kill_all_pids(Heart) ->
- case get_pids(Heart) of
+kill_all_pids(Heart,Logger) ->
+ case get_pids(Heart,Logger) of
[] ->
ok;
Pids ->
kill_em(Pids),
- kill_all_pids(Heart) % Continue until all are really killed.
+ kill_all_pids(Heart,Logger) % Continue until all are really killed.
end.
%% All except system processes.
-get_pids(Heart) ->
+get_pids(Heart,Logger) ->
Pids = [P || P <- processes(), not erts_internal:is_system_process(P)],
- delete(Heart,self(),Pids).
+ delete(Heart,Logger,self(),Pids).
-delete(Heart,Init,[Heart|Pids]) -> delete(Heart,Init,Pids);
-delete(Heart,Init,[Init|Pids]) -> delete(Heart,Init,Pids);
-delete(Heart,Init,[Pid|Pids]) -> [Pid|delete(Heart,Init,Pids)];
-delete(_,_,[]) -> [].
+delete(Heart,Logger,Init,[Heart|Pids]) -> delete(Heart,Logger,Init,Pids);
+delete(Heart,Logger,Init,[Logger|Pids]) -> delete(Heart,Logger,Init,Pids);
+delete(Heart,Logger,Init,[Init|Pids]) -> delete(Heart,Logger,Init,Pids);
+delete(Heart,Logger,Init,[Pid|Pids]) -> [Pid|delete(Heart,Logger,Init,Pids)];
+delete(_,_,_,[]) -> [].
kill_em([Pid|Pids]) ->
exit(Pid,kill),
@@ -691,9 +707,9 @@ kill_all_ports(_,_) ->
ok.
unload(false) ->
- do_unload(sub(erlang:pre_loaded(),erlang:loaded()));
+ do_unload(sub([logger_server|erlang:pre_loaded()],erlang:loaded()));
unload(_) ->
- do_unload(sub([heart|erlang:pre_loaded()],erlang:loaded())).
+ do_unload(sub([heart,logger_server|erlang:pre_loaded()],erlang:loaded())).
do_unload([M|Mods]) ->
catch erlang:purge_module(M),
diff --git a/erts/preloaded/src/prim_file.erl b/erts/preloaded/src/prim_file.erl
index 432a8c15cd..558a0f883f 100644
--- a/erts/preloaded/src/prim_file.erl
+++ b/erts/preloaded/src/prim_file.erl
@@ -557,8 +557,13 @@ list_dir_convert([RawName | Rest], SkipInvalid, Result) ->
{error, ignore} ->
list_dir_convert(Rest, SkipInvalid, Result);
{error, warning} ->
- error_logger:warning_msg(
- "Non-unicode filename ~p ignored\n", [RawName]),
+ %% this is equal to calling error_logger:warning_msg/2 which
+ %% we don't want to do from code_server during system boot
+ logger ! {log,warning,"Non-unicode filename ~p ignored\n", [RawName],
+ #{pid=>self(),
+ gl=>group_leader(),
+ time=>erlang:monotonic_time(microsecond),
+ error_logger=>#{tag=>warning_msg}}},
list_dir_convert(Rest, SkipInvalid, Result);
{error, _} ->
{error, {no_translation, RawName}}
diff --git a/erts/test/upgrade_SUITE.erl b/erts/test/upgrade_SUITE.erl
index 31ceb06314..73d221cfab 100644
--- a/erts/test/upgrade_SUITE.erl
+++ b/erts/test/upgrade_SUITE.erl
@@ -132,7 +132,7 @@ upgrade_test1(FromVsn,ToVsn,Config) ->
{FromRel,FromApps} = target_system(FromRelName, FromVsn,
CreateDir, InstallDir,Config),
- {ToRel,ToApps} = upgrade_system(FromRel, ToRelName, ToVsn,
+ {ToRel,ToApps} = upgrade_system(FromVsn, FromRel, ToRelName, ToVsn,
CreateDir, InstallDir),
do_upgrade(FromVsn, FromApps, ToRel, ToApps, InstallDir).
@@ -216,7 +216,7 @@ target_system(RelName0,RelVsn,CreateDir,InstallDir,Config) ->
%%% Create a release containing the current (the test node) OTP
%%% release, including relup to allow upgrade from an earlier OTP
%%% release.
-upgrade_system(FromRel, ToRelName0, ToVsn,
+upgrade_system(FromVsn, FromRel, ToRelName0, ToVsn,
CreateDir, InstallDir) ->
{RelName,Apps,_} = create_relfile(node(),CreateDir,ToRelName0,ToVsn),
@@ -226,6 +226,11 @@ upgrade_system(FromRel, ToRelName0, ToVsn,
ok = systools:make_relup(RelName,[FromRel],[FromRel],
[{path,[FromPath]},
{outdir,CreateDir}]),
+ case {FromVsn,ToVsn} of
+ {"20"++_,"21"++_} -> fix_relup_inets_ftp(filename:dirname(RelName));
+ _ -> ok
+ end,
+
SysConfig = filename:join([CreateDir, "sys.config"]),
write_file(SysConfig, "[]."),
@@ -233,6 +238,41 @@ upgrade_system(FromRel, ToRelName0, ToVsn,
{RelName,Apps}.
+%% In OTP-21, ftp and tftp were split out from inets and formed two
+%% new separate applications. When creating the relup, systools
+%% automatically adds new applications first, before upgrading
+%% existing applications. Since ftp and tftp have processes with the
+%% same name as in the old version of inets, the upgrade failed with
+%% trying to start the new applications (already exist).
+%%
+%% To go around this problem, this function adds an instruction to
+%% stop inets before the new applications are started. This is a very
+%% specific adjustment, and it will be needed for any upgrade which
+%% involves conversion from inets to ftp/tftp.
+fix_relup_inets_ftp(Dir) ->
+ Filename = filename:join(Dir,"relup"),
+ {ok,[{ToVsn,Up,Down}]} = file:consult(Filename),
+ [{FromVsn,UpDescr,UpInstr}] = Up,
+ [{FromVsn,DownDescr,DownInstr}] = Down,
+
+ Fun = fun(point_of_no_return) -> false;
+ (_) -> true
+ end,
+ {UpBefore,[point_of_no_return|UpAfter]} = lists:splitwith(Fun,UpInstr),
+ {DownBefore,[point_of_no_return|DownAfter]} = lists:splitwith(Fun,DownInstr),
+ NewRelup =
+ {ToVsn,
+ [{FromVsn,UpDescr,UpBefore++[point_of_no_return,
+ {apply,{application,stop,[inets]}} |
+ UpAfter]}],
+ [{FromVsn,DownDescr,DownBefore++[point_of_no_return,
+ {apply,{application,stop,[inets]}} |
+ DownAfter]}]},
+ {ok, Fd} = file:open(Filename, [write,{encoding,utf8}]),
+ io:format(Fd, "%% ~s~n~tp.~n", [epp:encoding_to_string(utf8),NewRelup]),
+ ok = file:close(Fd).
+
+
%%%-----------------------------------------------------------------
%%% Start a new node running the release from target_system/5
%%% above. Then upgrade to the system from upgrade_system/5.
diff --git a/lib/common_test/src/Makefile b/lib/common_test/src/Makefile
index 9d751996ad..2a2a9cb5bc 100644
--- a/lib/common_test/src/Makefile
+++ b/lib/common_test/src/Makefile
@@ -165,3 +165,5 @@ release_tests_spec: opt
release_docs_spec: docs
+# Include dependencies -- list below added by Kostis Sagonas
+$(EBIN)/cth_log_redirect.beam: ../../kernel/include/logger.hrl
diff --git a/lib/common_test/src/cth_log_redirect.erl b/lib/common_test/src/cth_log_redirect.erl
index b05f0bd28b..417ea615a3 100644
--- a/lib/common_test/src/cth_log_redirect.erl
+++ b/lib/common_test/src/cth_log_redirect.erl
@@ -33,17 +33,19 @@
pre_init_per_testcase/4, post_init_per_testcase/5,
pre_end_per_testcase/4, post_end_per_testcase/5]).
-%% Event handler Callbacks
--export([init/1,
- handle_event/2, handle_call/2, handle_info/2,
- terminate/1, terminate/2, code_change/3]).
+%% Logger handler and gen_server callbacks
+-export([log/2,
+ init/1,
+ handle_cast/2, handle_call/3,
+ terminate/1, terminate/2]).
%% Other
-export([handle_remote_events/1]).
-include("ct.hrl").
+-include("../../kernel/src/logger_internal.hrl").
--behaviour(gen_event).
+-behaviour(gen_server).
-record(eh_state, {log_func,
curr_suite,
@@ -57,7 +59,7 @@ id(_Opts) ->
init(?MODULE, _Opts) ->
ct_util:mark_process(),
- error_logger:add_report_handler(?MODULE),
+ ok = start_log_handler(),
tc_log_async.
pre_init_per_suite(Suite, Config, State) ->
@@ -100,7 +102,7 @@ pre_end_per_testcase(_Suite, _TC, Config, State) ->
post_end_per_testcase(_Suite, _TC, _Config, Result, State) ->
%% Make sure that the event queue is flushed
%% before ending this test case.
- gen_event:call(error_logger, ?MODULE, flush, 300000),
+ gen_server:call(?MODULE, flush, 300000),
{Result, State}.
pre_end_per_group(_Suite, Group, Config, {tc_log, Group}) ->
@@ -114,127 +116,155 @@ post_end_per_group(_Suite, _Group, Config, Return, State) ->
set_curr_func({group,undefined}, Config),
{Return, State}.
-%% Copied and modified from sasl_report_tty_h.erl
-init(_Type) ->
+start_log_handler() ->
+ case whereis(?MODULE) of
+ undefined ->
+ ChildSpec =
+ #{id=>?MODULE,
+ start=>{gen_server,start_link,[{local,?MODULE},?MODULE,[],[]]},
+ restart=>transient,
+ shutdown=>2000,
+ type=>worker,
+ modules=>[?MODULE]},
+ {ok,_} = supervisor:start_child(logger_sup,ChildSpec);
+ _Pid ->
+ ok
+ end,
+ ok = logger:add_handler(?MODULE,?MODULE,
+ #{level=>info,
+ formatter=>{?DEFAULT_FORMATTER,
+ ?DEFAULT_FORMAT_CONFIG}}).
+
+init([]) ->
{ok, #eh_state{log_func = tc_log_async}}.
-handle_event({_Type,GL,_Msg}, #eh_state{handle_remote_events = false} = State)
- when node(GL) /= node() ->
- {ok, State};
-handle_event(Event, #eh_state{log_func = LogFunc} = State) ->
+log(#{msg:={report,Msg},meta:=#{domain:=[beam,erlang,otp,sasl]}}=Log,Config) ->
case whereis(sasl_sup) of
undefined ->
- sasl_not_started;
+ ok; % sasl application is not started
_Else ->
- {ok, ErrLogType} = application:get_env(sasl, errlog_type),
- SReport = sasl_report:format_report(group_leader(), ErrLogType,
- tag_event(Event, local)),
- if is_list(SReport) ->
- SaslHeader = format_header(State),
- case LogFunc of
- tc_log ->
- ct_logs:tc_log(sasl, ?STD_IMPORTANCE,
- SaslHeader, SReport, [], []);
- tc_log_async ->
- ct_logs:tc_log_async(sasl, ?STD_IMPORTANCE,
- SaslHeader, SReport, [])
- end;
- true -> %% Report is an atom if no logging is to be done
- ignore
- end
- end,
- %% note that error_logger (unlike sasl) expects UTC time
- EReport = error_logger_tty_h:write_event(
- tag_event(Event, utc), io_lib),
- if is_list(EReport) ->
- ErrHeader = format_header(State),
- case LogFunc of
- tc_log ->
- ct_logs:tc_log(error_logger, ?STD_IMPORTANCE,
- ErrHeader, EReport, [], []);
- tc_log_async ->
- ct_logs:tc_log_async(error_logger, ?STD_IMPORTANCE,
- ErrHeader, EReport, [])
- end;
- true -> %% Report is an atom if no logging is to be done
- ignore
- end,
- {ok, State}.
-
-handle_info({'EXIT',User,killed}, State) ->
- case whereis(user) of
- %% init:stop/1/2 has been called, let's finish!
- undefined ->
- remove_handler;
- User ->
- remove_handler;
- _ ->
- {ok,State}
+ Level =
+ case application:get_env(sasl, errlog_type) of
+ {ok,error} ->
+ error;
+ {ok,_} ->
+ info;
+ undefined ->
+ info
+ end,
+ case Level of
+ error ->
+ case Msg of
+ #{label:={_,progress}} ->
+ ok;
+ _ ->
+ do_log(add_log_category(Log,sasl),Config)
+ end;
+ _ ->
+ do_log(add_log_category(Log,sasl),Config)
+ end
end;
+log(#{meta:=#{domain:=[beam,erlang,otp]}}=Log,Config) ->
+ do_log(add_log_category(Log,error_logger),Config);
+log(#{meta:=#{domain:=_}},_) ->
+ ok;
+log(Log,Config) ->
+ do_log(add_log_category(Log,error_logger),Config).
+
+add_log_category(#{meta:=Meta}=Log,Category) ->
+ Log#{meta=>Meta#{?MODULE=>#{category=>Category}}}.
-handle_info(_, State) ->
- {ok,State}.
+do_log(Log,Config) ->
+ gen_server:call(?MODULE,{log,Log,Config}).
-handle_call(flush,State) ->
- {ok, ok, State};
+handle_cast(_, State) ->
+ {noreply,State}.
-handle_call({set_curr_func,{group,Group,Conf},Config},
- State) when is_list(Config) ->
+handle_call({log,#{meta:=#{gl:=GL}},_}, _From,
+ #eh_state{handle_remote_events=false}=State)
+ when node(GL) /= node() ->
+ {reply, ok, State};
+
+handle_call({log,
+ #{msg:=Msg0,
+ meta:=#{?MODULE:=#{category:=Category}}=Meta}=Log,
+ #{formatter:={Formatter,FConfig}}},
+ _From,
+ #eh_state{log_func=LogFunc}=State) ->
+ Header = format_header(State),
+ Msg =
+ case Msg0 of
+ {report,R} ->
+ Fun=maps:get(report_cb,Meta,fun logger:format_report/1),
+ Fun(R);
+ _ ->
+ Msg0
+ end,
+ String = Formatter:format(Log#{msg=>Msg},FConfig),
+ case LogFunc of
+ tc_log ->
+ ct_logs:tc_log(Category, ?STD_IMPORTANCE,
+ Header, String, [], []);
+ tc_log_async ->
+ ct_logs:tc_log_async(sasl, ?STD_IMPORTANCE,
+ Header, String, [])
+ end,
+ {reply,ok,State};
+
+handle_call(flush,_From,State) ->
+ {reply, ok, State};
+
+handle_call({set_curr_func,{group,Group,Conf},Config},_From,State)
+ when is_list(Config) ->
Parallel = case proplists:get_value(tc_group_properties, Config) of
undefined -> false;
Props -> lists:member(parallel, Props)
end,
- {ok, ok, State#eh_state{curr_group = Group,
- curr_func = Conf,
- parallel_tcs = Parallel}};
-handle_call({set_curr_func,{group,Group,Conf},_SkipOrFail}, State) ->
- {ok, ok, State#eh_state{curr_group = Group,
- curr_func = Conf,
- parallel_tcs = false}};
-handle_call({set_curr_func,{group,undefined},_Config}, State) ->
- {ok, ok, State#eh_state{curr_group = undefined,
- curr_func = undefined,
- parallel_tcs = false}};
-handle_call({set_curr_func,{Suite,Conf},_Config}, State) ->
- {ok, ok, State#eh_state{curr_suite = Suite,
- curr_func = Conf,
- parallel_tcs = false}};
-handle_call({set_curr_func,undefined,_Config}, State) ->
- {ok, ok, State#eh_state{curr_suite = undefined,
- curr_func = undefined,
- parallel_tcs = false}};
-handle_call({set_curr_func,TC,_Config}, State) ->
- {ok, ok, State#eh_state{curr_func = TC}};
-
-handle_call({set_logfunc,NewLogFunc}, State) ->
- {ok, NewLogFunc, State#eh_state{log_func = NewLogFunc}};
-
-handle_call({handle_remote_events,Bool}, State) ->
- {ok, ok, State#eh_state{handle_remote_events = Bool}};
-
-handle_call(_Query, _State) ->
- {error, bad_query}.
+ {reply, ok, State#eh_state{curr_group = Group,
+ curr_func = Conf,
+ parallel_tcs = Parallel}};
+handle_call({set_curr_func,{group,Group,Conf},_SkipOrFail}, _From, State) ->
+ {reply, ok, State#eh_state{curr_group = Group,
+ curr_func = Conf,
+ parallel_tcs = false}};
+handle_call({set_curr_func,{group,undefined},_Config}, _From, State) ->
+ {reply, ok, State#eh_state{curr_group = undefined,
+ curr_func = undefined,
+ parallel_tcs = false}};
+handle_call({set_curr_func,{Suite,Conf},_Config}, _From, State) ->
+ {reply, ok, State#eh_state{curr_suite = Suite,
+ curr_func = Conf,
+ parallel_tcs = false}};
+handle_call({set_curr_func,undefined,_Config}, _From, State) ->
+ {reply, ok, State#eh_state{curr_suite = undefined,
+ curr_func = undefined,
+ parallel_tcs = false}};
+handle_call({set_curr_func,TC,_Config}, _From, State) ->
+ {reply, ok, State#eh_state{curr_func = TC}};
+
+handle_call({set_logfunc,NewLogFunc}, _From, State) ->
+ {reply, NewLogFunc, State#eh_state{log_func = NewLogFunc}};
+
+handle_call({handle_remote_events,Bool}, _From, State) ->
+ {reply, ok, State#eh_state{handle_remote_events = Bool}}.
terminate(_) ->
- error_logger:delete_report_handler(?MODULE),
- [].
+ _ = logger:remove_handler(?MODULE),
+ _ = supervisor:terminate_child(logger_sup,?MODULE),
+ _ = supervisor:delete_child(logger_sup,?MODULE),
+ ok.
terminate(_Arg, _State) ->
ok.
-tag_event(Event, utc) ->
- {calendar:universal_time(), Event};
-tag_event(Event, _) ->
- {calendar:local_time(), Event}.
-
set_curr_func(CurrFunc, Config) ->
- gen_event:call(error_logger, ?MODULE, {set_curr_func, CurrFunc, Config}).
+ gen_server:call(?MODULE, {set_curr_func, CurrFunc, Config}).
set_log_func(Func) ->
- gen_event:call(error_logger, ?MODULE, {set_logfunc, Func}).
+ gen_server:call(?MODULE, {set_logfunc, Func}).
handle_remote_events(Bool) ->
- gen_event:call(error_logger, ?MODULE, {handle_remote_events, Bool}).
+ gen_server:call(?MODULE, {handle_remote_events, Bool}).
%%%-----------------------------------------------------------------
@@ -272,6 +302,3 @@ format_header(#eh_state{curr_suite = Suite,
curr_func = TC}) ->
io_lib:format("System report during ~w:~tw/1 in ~tw",
[Suite,TC,Group]).
-
-code_change(_OldVsn, State, _Extra) ->
- {ok, State}.
diff --git a/lib/common_test/test/ct_hooks_SUITE.erl b/lib/common_test/test/ct_hooks_SUITE.erl
index 3c1e887f65..6228524a88 100644
--- a/lib/common_test/test/ct_hooks_SUITE.erl
+++ b/lib/common_test/test/ct_hooks_SUITE.erl
@@ -258,15 +258,20 @@ cth_log(Config) when is_list(Config) ->
fun(UnexpIoLog) ->
{ok,Bin} = file:read_file(UnexpIoLog),
Ts = string:lexemes(binary_to_list(Bin),[$\n]),
- Matches = lists:foldl(fun([$=,$E,$R,$R,$O,$R|_], N) ->
- N+1;
- ([$L,$o,$g,$g,$e,$r|_], N) ->
- N+1;
+ Matches = lists:foldl(fun([$=,$E,$R,$R,$O,$R|_], {E,I,L}) ->
+ {E+1,I,L};
+ ([$=,$I,$N,$F,$O|_], {E,I,L}) ->
+ {E,I+1,L};
+ ([$L,$o,$g,$g,$e,$r|_], {E,I,L}) ->
+ {E,I,L+1};
(_, N) -> N
- end, 0, Ts),
- ct:pal("~p matches in ~tp", [Matches,UnexpIoLog]),
- if Matches > 10 -> ok;
- true -> exit({no_unexpected_io_found,UnexpIoLog})
+ end, {0,0,0}, Ts),
+ ct:pal("~p ({Error,Info,Log}) matches in ~tp",
+ [Matches,UnexpIoLog]),
+ MatchList = tuple_to_list(Matches),
+ case [N || N <- MatchList, N<3] of
+ [] -> ok;
+ _ -> exit({missing_unexpected_io,UnexpIoLog})
end
end, UnexpIoLogs),
ok.
diff --git a/lib/common_test/test/ct_hooks_SUITE_data/cth/tests/cth_log_SUITE.erl b/lib/common_test/test/ct_hooks_SUITE_data/cth/tests/cth_log_SUITE.erl
index bd1ac54781..a0cd77b88b 100644
--- a/lib/common_test/test/ct_hooks_SUITE_data/cth/tests/cth_log_SUITE.erl
+++ b/lib/common_test/test/ct_hooks_SUITE_data/cth/tests/cth_log_SUITE.erl
@@ -124,6 +124,6 @@ gen() ->
gen_loop(N) ->
ct:log("Logger iteration: ~p", [N]),
error_logger:error_report(N),
- error_logger:info_report(progress, N),
+ error_logger:info_report(N),
ct:sleep(150),
gen_loop(N+1).
diff --git a/lib/common_test/test/ct_pre_post_test_io_SUITE.erl b/lib/common_test/test/ct_pre_post_test_io_SUITE.erl
index 0b85392009..538fd822c1 100644
--- a/lib/common_test/test/ct_pre_post_test_io_SUITE.erl
+++ b/lib/common_test/test/ct_pre_post_test_io_SUITE.erl
@@ -241,7 +241,7 @@ try_loop(_Fun, 0) ->
gave_up;
try_loop(Fun, N) ->
try Fun() of
- {error,_} ->
+ {Error,_} when Error==error; Error==badrpc ->
timer:sleep(10),
try_loop(Fun, N-1);
Result ->
@@ -257,7 +257,7 @@ try_loop(M, F, _A, 0) ->
gave_up;
try_loop(M, F, A, N) ->
try apply(M, F, A) of
- {error,_} ->
+ {Error,_Reason} when Error==error; Error==badrpc ->
timer:sleep(10),
try_loop(M, F, A, N-1);
Result ->
diff --git a/lib/compiler/src/beam_validator.erl b/lib/compiler/src/beam_validator.erl
index d5aef51dfa..962f17d83c 100644
--- a/lib/compiler/src/beam_validator.erl
+++ b/lib/compiler/src/beam_validator.erl
@@ -576,7 +576,7 @@ valfun_4({wait,_}, Vst) ->
valfun_4({wait_timeout,_,Src}, Vst) ->
assert_term(Src, Vst),
verify_y_init(Vst),
- Vst;
+ prune_x_regs(0, Vst);
valfun_4({loop_rec_end,_}, Vst) ->
verify_y_init(Vst),
kill_state(Vst);
diff --git a/lib/compiler/src/sys_core_fold.erl b/lib/compiler/src/sys_core_fold.erl
index bb3a9c7628..a13bdedaf9 100644
--- a/lib/compiler/src/sys_core_fold.erl
+++ b/lib/compiler/src/sys_core_fold.erl
@@ -214,6 +214,8 @@ opt_guard_try(#c_case{clauses=Cs}=Term) ->
Term#c_case{clauses=opt_guard_try_list(Cs)};
opt_guard_try(#c_clause{body=B0}=Term) ->
Term#c_clause{body=opt_guard_try(B0)};
+opt_guard_try(#c_let{vars=[],arg=#c_values{es=[]},body=B}) ->
+ B;
opt_guard_try(#c_let{arg=Arg,body=B0}=Term) ->
case opt_guard_try(B0) of
#c_literal{}=B ->
@@ -401,14 +403,15 @@ expr(#c_receive{clauses=Cs0,timeout=T0,action=A0}=Recv, Ctxt, Sub) ->
T1 = expr(T0, value, Sub),
A1 = body(A0, Ctxt, Sub),
Recv#c_receive{clauses=Cs1,timeout=T1,action=A1};
-expr(#c_apply{anno=Anno,op=Op0,args=As0}=App, _, Sub) ->
+expr(#c_apply{anno=Anno,op=Op0,args=As0}=Apply0, _, Sub) ->
Op1 = expr(Op0, value, Sub),
As1 = expr_list(As0, value, Sub),
case cerl:is_data(Op1) andalso not is_literal_fun(Op1) of
false ->
- App#c_apply{op=Op1,args=As1};
+ Apply = Apply0#c_apply{op=Op1,args=As1},
+ fold_apply(Apply, Op1, As1);
true ->
- add_warning(App, invalid_call),
+ add_warning(Apply0, invalid_call),
Err = #c_call{anno=Anno,
module=#c_literal{val=erlang},
name=#c_literal{val=error},
@@ -766,6 +769,25 @@ make_effect_seq([H|T], Sub) ->
end;
make_effect_seq([], _) -> void().
+%% fold_apply(Apply, LiteraFun, Args) -> Apply.
+%% Replace an apply of a literal external fun with a call.
+
+fold_apply(Apply, #c_literal{val=Fun}, Args) when is_function(Fun) ->
+ {module,Mod} = erlang:fun_info(Fun, module),
+ {name,Name} = erlang:fun_info(Fun, name),
+ {arity,Arity} = erlang:fun_info(Fun, arity),
+ if
+ Arity =:= length(Args) ->
+ #c_call{anno=Apply#c_apply.anno,
+ module=#c_literal{val=Mod},
+ name=#c_literal{val=Name},
+ args=Args};
+ true ->
+ Apply
+ end;
+fold_apply(Apply, _, _) -> Apply.
+
+
%% Handling remote calls. The module/name fields have been processed.
call(#c_call{args=As}=Call, #c_literal{val=M}=M0, #c_literal{val=N}=N0, Sub) ->
@@ -803,6 +825,8 @@ fold_call(Call, #c_literal{val=M}, #c_literal{val=F}, Args, Sub) ->
fold_call_1(Call, M, F, Args, Sub);
fold_call(Call, _M, _N, _Args, _Sub) -> Call.
+fold_call_1(Call, erlang, apply, [Fun,Args], _) ->
+ simplify_fun_apply(Call, Fun, Args);
fold_call_1(Call, erlang, apply, [Mod,Func,Args], _) ->
simplify_apply(Call, Mod, Func, Args);
fold_call_1(Call, Mod, Name, Args, Sub) ->
@@ -1111,24 +1135,38 @@ eval_failure(Call, Reason) ->
%% Simplify an apply/3 to a call if the number of arguments
%% are known at compile time.
-simplify_apply(Call, Mod, Func, Args) ->
+simplify_apply(Call, Mod, Func, Args0) ->
case is_atom_or_var(Mod) andalso is_atom_or_var(Func) of
- true -> simplify_apply_1(Args, Call, Mod, Func, []);
- false -> Call
+ true ->
+ case get_fixed_args(Args0, []) of
+ error ->
+ Call;
+ {ok,Args} ->
+ Call#c_call{module=Mod,name=Func,args=Args}
+ end;
+ false ->
+ Call
end.
-
-simplify_apply_1(#c_literal{val=MoreArgs0}, Call, Mod, Func, Args)
- when length(MoreArgs0) >= 0 ->
- MoreArgs = [#c_literal{val=Arg} || Arg <- MoreArgs0],
- Call#c_call{module=Mod,name=Func,args=reverse(Args, MoreArgs)};
-simplify_apply_1(#c_cons{hd=Arg,tl=T}, Call, Mod, Func, Args) ->
- simplify_apply_1(T, Call, Mod, Func, [Arg|Args]);
-simplify_apply_1(_, Call, _, _, _) -> Call.
-
is_atom_or_var(#c_literal{val=Atom}) when is_atom(Atom) -> true;
is_atom_or_var(#c_var{}) -> true;
is_atom_or_var(_) -> false.
+simplify_fun_apply(#c_call{anno=Anno}=Call, Fun, Args0) ->
+ case get_fixed_args(Args0, []) of
+ error ->
+ Call;
+ {ok,Args} ->
+ #c_apply{anno=Anno,op=Fun,args=Args}
+ end.
+
+get_fixed_args(#c_literal{val=MoreArgs0}, Args)
+ when length(MoreArgs0) >= 0 ->
+ MoreArgs = [#c_literal{val=Arg} || Arg <- MoreArgs0],
+ {ok,reverse(Args, MoreArgs)};
+get_fixed_args(#c_cons{hd=Arg,tl=T}, Args) ->
+ get_fixed_args(T, [Arg|Args]);
+get_fixed_args(_, _) -> error.
+
%% clause(Clause, Cepxr, Context, Sub) -> Clause.
clause(#c_clause{pats=Ps0}=Cl, Cexpr, Ctxt, Sub0) ->
diff --git a/lib/compiler/test/core_fold_SUITE.erl b/lib/compiler/test/core_fold_SUITE.erl
index 4fd1f84569..ab7f36abf7 100644
--- a/lib/compiler/test/core_fold_SUITE.erl
+++ b/lib/compiler/test/core_fold_SUITE.erl
@@ -27,7 +27,8 @@
multiple_aliases/1,redundant_boolean_clauses/1,
mixed_matching_clauses/1,unnecessary_building/1,
no_no_file/1,configuration/1,supplies/1,
- redundant_stack_frame/1,export_from_case/1]).
+ redundant_stack_frame/1,export_from_case/1,
+ empty_values/1]).
-export([foo/0,foo/1,foo/2,foo/3]).
@@ -47,7 +48,8 @@ groups() ->
multiple_aliases,redundant_boolean_clauses,
mixed_matching_clauses,unnecessary_building,
no_no_file,configuration,supplies,
- redundant_stack_frame,export_from_case]}].
+ redundant_stack_frame,export_from_case,
+ empty_values]}].
init_per_suite(Config) ->
@@ -584,5 +586,17 @@ export_from_case_2(Bool, Rec) ->
end,
{ok,Result}.
+empty_values(_Config) ->
+ case ?MODULE of
+ core_fold_inline_SUITE ->
+ {'EXIT',_} = (catch do_empty_values());
+ _ ->
+ {'EXIT',{function_clause,_}} = (catch do_empty_values())
+ end,
+ ok.
+
+do_empty_values() when (#{})#{} ->
+ c.
+
id(I) -> I.
diff --git a/lib/compiler/test/fun_SUITE.erl b/lib/compiler/test/fun_SUITE.erl
index 16474adf5b..3c272a35a6 100644
--- a/lib/compiler/test/fun_SUITE.erl
+++ b/lib/compiler/test/fun_SUITE.erl
@@ -194,6 +194,17 @@ external(Config) when is_list(Config) ->
?APPLY2(ListsMod, ListsMap, 2),
?APPLY2(ListsMod, ListsMap, ListsArity),
+ 42 = (fun erlang:abs/1)(-42),
+ 42 = (id(fun erlang:abs/1))(-42),
+ 42 = apply(fun erlang:abs/1, [-42]),
+ 42 = apply(id(fun erlang:abs/1), [-42]),
+ 6 = (fun lists:sum/1)([1,2,3]),
+ 6 = (id(fun lists:sum/1))([1,2,3]),
+
+ {'EXIT',{{badarity,_},_}} = (catch (fun lists:sum/1)(1, 2, 3)),
+ {'EXIT',{{badarity,_},_}} = (catch (id(fun lists:sum/1))(1, 2, 3)),
+ {'EXIT',{{badarity,_},_}} = (catch apply(fun lists:sum/1, [1,2,3])),
+
ok.
call_me(I) ->
diff --git a/lib/crypto/c_src/crypto.c b/lib/crypto/c_src/crypto.c
index 149387bcee..dbb6bf8135 100644
--- a/lib/crypto/c_src/crypto.c
+++ b/lib/crypto/c_src/crypto.c
@@ -3004,16 +3004,21 @@ static ERL_NIF_TERM rsa_generate_key_nif(ErlNifEnv* env, int argc, const ERL_NIF
static ERL_NIF_TERM dh_generate_key_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (PrivKey|undefined, DHParams=[P,G], Mpint, Len|0) */
- DH* dh_params;
+ DH* dh_params = NULL;
int pub_len, prv_len;
unsigned char *pub_ptr, *prv_ptr;
ERL_NIF_TERM ret, ret_pub, ret_prv, head, tail;
int mpint; /* 0 or 4 */
- BIGNUM *priv_key = NULL;
+ BIGNUM *priv_key_in = NULL;
BIGNUM *dh_p = NULL, *dh_g = NULL;
unsigned long len = 0;
+#ifdef HAS_EVP_PKEY_CTX
+ EVP_PKEY_CTX *ctx = NULL;
+ EVP_PKEY *dhkey = NULL,
+ *params = NULL;
+#endif
- if (!(get_bn_from_bin(env, argv[0], &priv_key)
+ if (!(get_bn_from_bin(env, argv[0], &priv_key_in)
|| argv[0] == atom_undefined)
|| !enif_get_list_cell(env, argv[1], &head, &tail)
|| !get_bn_from_bin(env, head, &dh_p)
@@ -3021,40 +3026,63 @@ static ERL_NIF_TERM dh_generate_key_nif(ErlNifEnv* env, int argc, const ERL_NIF_
|| !get_bn_from_bin(env, head, &dh_g)
|| !enif_is_empty_list(env, tail)
|| !enif_get_int(env, argv[2], &mpint) || (mpint & ~4)
- || !enif_get_ulong(env, argv[3], &len) ) {
-
- if (priv_key) BN_free(priv_key);
+ || !enif_get_ulong(env, argv[3], &len)
+
+ /* Load dh_params with values to use by the generator.
+ Mem mgmnt transfered from dh_p etc to dh_params */
+ || !(dh_params = DH_new())
+ || (priv_key_in && !DH_set0_key(dh_params, NULL, priv_key_in))
+ || !DH_set0_pqg(dh_params, dh_p, NULL, dh_g)
+ ) {
+ if (priv_key_in) BN_free(priv_key_in);
if (dh_p) BN_free(dh_p);
if (dh_g) BN_free(dh_g);
+ if (dh_params) DH_free(dh_params);
return enif_make_badarg(env);
}
- dh_params = DH_new();
- DH_set0_key(dh_params, NULL, priv_key);
- DH_set0_pqg(dh_params, dh_p, NULL, dh_g);
-
if (len) {
if (len < BN_num_bits(dh_p))
DH_set_length(dh_params, len);
else {
- DH_free(dh_params);
+ if (priv_key_in) BN_free(priv_key_in);
+ if (dh_p) BN_free(dh_p);
+ if (dh_g) BN_free(dh_g);
+ if (dh_params) DH_free(dh_params);
return enif_make_badarg(env);
}
}
+#ifdef HAS_EVP_PKEY_CTX
+ if ((dhkey = EVP_PKEY_new())
+ && (params = EVP_PKEY_new())
+ && EVP_PKEY_set1_DH(params, dh_params) /* set the key referenced by params to dh_params.
+ dh_params (and params) must be freed by us*/
+ && (ctx = EVP_PKEY_CTX_new(params, NULL))
+ && EVP_PKEY_keygen_init(ctx)
+ && EVP_PKEY_keygen(ctx, &dhkey)
+ && (dh_params = EVP_PKEY_get1_DH(dhkey)) /* return the referenced key. dh_params and dhkey must be freed */
+ ) {
+#else
if (DH_generate_key(dh_params)) {
- const BIGNUM *pub_key, *priv_key;
- DH_get0_key(dh_params, &pub_key, &priv_key);
- pub_len = BN_num_bytes(pub_key);
- prv_len = BN_num_bytes(priv_key);
+#endif
+ const BIGNUM *pub_key_gen, *priv_key_gen;
+
+ DH_get0_key(dh_params,
+ &pub_key_gen, &priv_key_gen); /* Get pub_key_gen and priv_key_gen.
+ "The values point to the internal representation of
+ the public key and private key values. This memory
+ should not be freed directly." says man */
+ pub_len = BN_num_bytes(pub_key_gen);
+ prv_len = BN_num_bytes(priv_key_gen);
pub_ptr = enif_make_new_binary(env, pub_len+mpint, &ret_pub);
prv_ptr = enif_make_new_binary(env, prv_len+mpint, &ret_prv);
if (mpint) {
put_int32(pub_ptr, pub_len); pub_ptr += 4;
put_int32(prv_ptr, prv_len); prv_ptr += 4;
}
- BN_bn2bin(pub_key, pub_ptr);
- BN_bn2bin(priv_key, prv_ptr);
+ BN_bn2bin(pub_key_gen, pub_ptr);
+ BN_bn2bin(priv_key_gen, prv_ptr);
ERL_VALGRIND_MAKE_MEM_DEFINED(pub_ptr, pub_len);
ERL_VALGRIND_MAKE_MEM_DEFINED(prv_ptr, prv_len);
ret = enif_make_tuple2(env, ret_pub, ret_prv);
@@ -3062,21 +3090,33 @@ static ERL_NIF_TERM dh_generate_key_nif(ErlNifEnv* env, int argc, const ERL_NIF_
else {
ret = atom_error;
}
+
DH_free(dh_params);
+#ifdef HAS_EVP_PKEY_CTX
+ if (ctx) EVP_PKEY_CTX_free(ctx);
+ /* if (dhkey) EVP_PKEY_free(dhkey); */
+ /* if (params) EVP_PKEY_free(params); */
+#endif
return ret;
}
static ERL_NIF_TERM dh_compute_key_nif(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[])
{/* (OthersPublicKey, MyPrivateKey, DHParams=[P,G]) */
- DH* dh_params;
- BIGNUM *dummy_pub_key = NULL, *priv_key = NULL;
- BIGNUM *other_pub_key;
- BIGNUM *dh_p = NULL, *dh_g = NULL;
- int i;
+ BIGNUM *dummy_pub_key = NULL,
+ *priv_key = NULL,
+ *other_pub_key = NULL,
+ *dh_p = NULL,
+ *dh_g = NULL;
ErlNifBinary ret_bin;
ERL_NIF_TERM ret, head, tail;
-
- dh_params = DH_new();
+ DH *dh_priv = DH_new(), *dh_pub = DH_new();
+#ifdef HAS_EVP_PKEY_CTX
+ EVP_PKEY_CTX *ctx = NULL;
+ EVP_PKEY *my_priv_key = NULL, *peer_pub_key = NULL;
+ size_t skeylen;
+#else
+ int i;
+#endif
if (!get_bn_from_bin(env, argv[0], &other_pub_key)
|| !get_bn_from_bin(env, argv[1], &priv_key)
@@ -3084,35 +3124,78 @@ static ERL_NIF_TERM dh_compute_key_nif(ErlNifEnv* env, int argc, const ERL_NIF_T
|| !get_bn_from_bin(env, head, &dh_p)
|| !enif_get_list_cell(env, tail, &head, &tail)
|| !get_bn_from_bin(env, head, &dh_g)
- || !enif_is_empty_list(env, tail)) {
+ || !enif_is_empty_list(env, tail)
+
+ /* Note: DH_set0_key() does not allow setting only the
+ * private key, although DH_compute_key() does not use the
+ * public key. Work around this limitation by setting
+ * the public key to a copy of the private key.
+ */
+ || !(dummy_pub_key = BN_dup(priv_key))
+ || !DH_set0_key(dh_priv, dummy_pub_key, priv_key)
+ || !DH_set0_pqg(dh_priv, dh_p, NULL, dh_g)
+ ) {
if (dh_p) BN_free(dh_p);
if (dh_g) BN_free(dh_g);
- ret = enif_make_badarg(env);
+ if (other_pub_key) BN_free(other_pub_key);
+ if (dummy_pub_key) BN_free(dummy_pub_key);
+ if (priv_key) BN_free(priv_key);
+ return enif_make_badarg(env);
+ }
+
+#ifdef HAS_EVP_PKEY_CTX
+ if (!(my_priv_key = EVP_PKEY_new())
+ || !EVP_PKEY_set1_DH(my_priv_key, dh_priv) /* set the key referenced by my_priv_key to dh_priv.
+ dh_priv (and my_priv_key) must be freed by us*/
+
+ || !(peer_pub_key = EVP_PKEY_new())
+ || !DH_set0_key(dh_pub, other_pub_key, NULL)
+ || !DH_set0_pqg(dh_pub, dh_p, NULL, dh_g)
+ || !EVP_PKEY_set1_DH(peer_pub_key, dh_pub)
+
+ || !(ctx = EVP_PKEY_CTX_new(my_priv_key, NULL))
+ || (EVP_PKEY_derive_init(ctx) <= 0)
+ || (EVP_PKEY_derive_set_peer(ctx, peer_pub_key) <= 0)
+ || (EVP_PKEY_derive(ctx, NULL, &skeylen) <= 0)) {
+
+ ret = atom_error;
}
else {
- /* Note: DH_set0_key() does not allow setting only the
- * private key, although DH_compute_key() does not use the
- * public key. Work around this limitation by setting
- * the public key to a copy of the private key.
- */
- dummy_pub_key = BN_dup(priv_key);
- DH_set0_key(dh_params, dummy_pub_key, priv_key);
- DH_set0_pqg(dh_params, dh_p, NULL, dh_g);
- enif_alloc_binary(DH_size(dh_params), &ret_bin);
- i = DH_compute_key(ret_bin.data, other_pub_key, dh_params);
- if (i > 0) {
- if (i != ret_bin.size) {
- enif_realloc_binary(&ret_bin, i);
- }
- ret = enif_make_binary(env, &ret_bin);
- }
- else {
+ enif_alloc_binary(skeylen, &ret_bin);
+
+ if ((EVP_PKEY_derive(ctx, ret_bin.data, &skeylen) > 0)
+ && (ret_bin.size >= skeylen)) {
+ /* Derivation succeded */
+ if (ret_bin.size > skeylen) enif_realloc_binary(&ret_bin, skeylen);
+ ret = enif_make_binary(env, &ret_bin);
+ }
+ else {
enif_release_binary(&ret_bin);
- ret = atom_error;
- }
+ ret = atom_error;
+ }
}
+
+#else
+ enif_alloc_binary(DH_size(dh_priv), &ret_bin);
+ i = DH_compute_key(ret_bin.data, other_pub_key, dh_priv);
+ if (i > 0) {
+ if (i != ret_bin.size) enif_realloc_binary(&ret_bin, i);
+ ret = enif_make_binary(env, &ret_bin);
+ }
+ else {
+ enif_release_binary(&ret_bin);
+ ret = atom_error;
+ }
+#endif
+
if (other_pub_key) BN_free(other_pub_key);
- DH_free(dh_params);
+ if (dh_priv) DH_free(dh_priv);
+ if (dh_pub) DH_free(dh_pub);
+#ifdef HAS_EVP_PKEY_CTX
+ if (ctx) EVP_PKEY_CTX_free(ctx);
+ /* if (my_priv_key) EVP_PKEY_free(my_priv_key); */
+ /* if (peer_pub_key) EVP_PKEY_free(peer_pub_key); */
+#endif
return ret;
}
diff --git a/lib/crypto/doc/src/crypto.xml b/lib/crypto/doc/src/crypto.xml
index 3a5efd0bea..af676d9021 100644
--- a/lib/crypto/doc/src/crypto.xml
+++ b/lib/crypto/doc/src/crypto.xml
@@ -136,6 +136,7 @@
See also <seealso marker="#supports-0">crypto:supports/0</seealso>
</p>
+ <marker id="type-engine_key_ref"/>
<marker id="engine_key_ref_type"/>
<code>engine_key_ref() = #{engine := engine_ref(),
key_id := key_id(),
diff --git a/lib/crypto/test/crypto_SUITE.erl b/lib/crypto/test/crypto_SUITE.erl
index 6dab459df6..d148fa3856 100644
--- a/lib/crypto/test/crypto_SUITE.erl
+++ b/lib/crypto/test/crypto_SUITE.erl
@@ -131,7 +131,8 @@ groups() ->
{ecdsa, [], [sign_verify
%% Does not work yet: ,public_encrypt, private_encrypt
]},
- {dh, [], [generate_compute]},
+ {dh, [], [generate_compute,
+ compute_bug]},
{ecdh, [], [compute, generate]},
{srp, [], [generate_compute]},
{des_cbc, [], [block]},
@@ -463,6 +464,24 @@ generate_compute(Config) when is_list(Config) ->
GenCom = proplists:get_value(generate_compute, Config),
lists:foreach(fun do_generate_compute/1, GenCom).
%%--------------------------------------------------------------------
+compute_bug() ->
+ [{doc, "Test that it works even if the Secret is smaller than expected"}].
+compute_bug(Config) ->
+ ExpectedSecret = <<118,89,171,16,156,18,156,103,189,134,130,49,28,144,111,241,247,82,79,32,228,11,209,141,119,176,251,80,105,143,235,251,203,121,223,211,129,3,233,133,45,2,31,157,24,111,5,75,153,66,135,185,128,115,229,178,216,39,73,52,80,151,8,241,34,52,226,71,137,167,53,48,59,224,175,154,89,110,76,83,24,117,149,21,72,6,186,78,149,74,188,56,98,244,30,77,108,248,88,194,195,237,23,51,20,242,254,123,21,12,209,74,217,168,230,65,7,60,211,139,128,239,234,153,22,229,180,59,159,121,41,156,121,200,177,130,163,162,54,224,93,1,94,11,177,254,118,28,156,26,116,10,207,145,219,166,214,189,214,230,221,170,228,15,69,88,31,68,94,255,113,58,49,82,86,192,248,176,131,133,39,186,194,172,206,84,184,16,66,68,153,128,178,227,27,118,52,130,122,92,24,222,102,195,221,207,255,13,152,175,65,32,167,84,54,244,243,109,244,18,234,16,159,224,188,2,106,123,27,17,131,171,226,34,111,251,62,119,155,124,221,124,254,62,97,167,1,105,116,98,98,19,197,30,72,180,79,221,100,134,120,117,124,85,73,132,224,223,222,41,155,137,218,130,238,237,157,161,134,150,69,206,91,141,17,89,120,218,235,229,37,150,76,197,7,157,56,144,42,203,137,100,200,72,141,194,239,1,67,236,238,183,48,214,75,76,108,235,3,237,67,40,137,45,182,236,246,37,116,103,144,237,142,211,88,233,11,24,21,218,41,245,250,51,130,250,104,74,189,17,69,145,70,50,50,215,253,155,10,128,41,114,185,211,82,164,72,92,17,145,104,66,6,140,226,80,43,62,1,166,216,153,118,96,15,147,126,137,118,191,192,75,149,241,206,18,92,17,154,215,219,18,6,139,190,103,210,156,184,29,224,213,157,60,112,189,104,220,125,40,186,50,119,17,143,136,149,38,74,107,21,192,59,61,59,42,231,144,59,175,3,176,87,23,16,122,54,31,82,34,230,211,44,81,41,47,86,37,228,175,130,148,88,136,131,254,241,202,99,199,175,1,141,215,124,155,120,43,141,89,11,140,120,141,29,35,82,219,155,204,75,12,66,241,253,33,250,84,24,85,68,13,80,85,142,227,34,139,26,146,24>>,
+ OthersPublicKey = 635619632099733175381667940709387641100492974601603060984753028943194386334921787463327680809776598322996634648015962954045728174069768874873236397421720142610982770302060309928552098274817978606093380781524199673890631795310930242601197479471368910519338301177304682162189801040921618559902948819107531088646753320486728060005223263561551402855338732899079439899705951063999951507319258050864346087428042978411873495523439615429804957374639092580169417598963105885529553632847023899713490485619763926900318508906706745060947269748612049634207985438016935262521715769812475329234748426647554362991758104620357149045960316987533503707855364806010494793980069245562784050236811004893018183726397041999426883788660276453352521120006817370050691205529335316794439089316232980047277245051173281601960196573681285904611182521967067911862467395705665888521948321299521549941618586026714676885890192323289343756440666276226084448279082483536164085883288884231665240707495770544705648564889889198060417915693315346959170105413290799314390963124178046425737828369059171472978294050322371452255088799865552038756937873388385970088906560408959959429398326288750834357514847891423941047433478384621074116184703014798814515161475596555032391555842,
+ MyPrivateKey = 387759582879975726965038486537011291913744975764132199838375902680222019267527675651273586836110220500657652661706223760165097275862806031329642160439090779625708664007910974206651834216043397115514725827856461492311499129200688538220719685637154290305617686974719521885238198226075381217068175824097878445476010193039590876624464274744156624589136789060427283492343902761765833713520850870233407503430180028104167029073459918756981323130062648615262139444306321256382009848217866984408901761817655567071716275177768316006340055589170095799943481591033461616307776069027985761229636731465482676467627154100912586936231051371168178564599296638350391246393336702334311781595616786107810962134407697848002331639021101685320844880636050048769216986088652236979636019052557155807310341483407890060105599892252118584570558049301477535792498672552850760356632076013402382600669875697284264329434950712239302528367835155163504374877787288116104285944993818319105835423479332617802010952731990182088670508346704423006877514817882782443833997288652405892920173712497948376815825396272381214976859009518623799156300136570204539240675245115597412280078940442452936425561984312708387584800789375684525365060589104566195610526570099527133097201479,
+ P = 818034524162384276004384029858643530286875094391273833506734966261806257117433972760379103507630310628953496150318170372254219924175532996281953750642804369831900894594960807970232131410638888573275563720690293481410915588408505771183615664441221559618326229227448328879290185035795866796496147000467456347856187771645103733376761936369144682074588463621004219054111244232031965820058057143484947957179035662640791007685559024477920075136419228662974090561346329074935312181886940693299380892129818458511403741106419480550004799657220331973244248753744516935739033770420884365608406478656143540532371463443324228730693491647103274058971797182813283112583029849186056551355376851686616057869624968077484471229044125401535456699914745876082047459812392122562460031611344154642406382436701361983114768023990405077450124649862159757605118611426368650203370143674925598905779061402007525955196464201496773278952462368223659263492419274489020447849336502432222101793313731259141617677580646998184158969477474527427664187763741360356528830301163614618231141541403007931347398186427059736520580903587497382362610721261644208653717495736748724114113311672504064943864203789205551568648546606356374830209356446449765364678719909024329058480379,
+ G = 2,
+ DHParameters = [P, G],
+ case crypto:compute_key(dh, OthersPublicKey, MyPrivateKey, DHParameters) of
+ ExpectedSecret ->
+ ok;
+ Others ->
+ ct:log("Got ~p",[Others]),
+ {fail, "crypto:compute_key(dh,...) failed for the bug test"}
+ end.
+
+%%--------------------------------------------------------------------
no_generate_compute() ->
[{doc, "Test crypto:genarate_key and crypto:compute_key "
"for disabled algorithms"}].
diff --git a/lib/diameter/doc/src/diameter.xml b/lib/diameter/doc/src/diameter.xml
index 6bc7d147c0..dfa4c803ed 100644
--- a/lib/diameter/doc/src/diameter.xml
+++ b/lib/diameter/doc/src/diameter.xml
@@ -135,7 +135,7 @@ along with any
extra arguments to be appended to those documented.
Note that extra arguments specific to an outgoing request can be
specified to &call;, in which case
-those are are appended to any module-specific extra arguments.</p>
+those are appended to any module-specific extra arguments.</p>
<p>
Specifying a <c>#diameter_callback{}</c> record allows individual
@@ -150,7 +150,7 @@ See <c>diameter_callback.erl</c> for details.</p>
<p>
Options defining a Diameter application.
-Has one the following types.</p>
+Has one of the following types.</p>
<taglist>
@@ -2221,7 +2221,7 @@ Stop a diameter service.</p>
<p>
Stopping a service causes all associated transport connections to be
broken.
-A DPR message with be sent as in the case of &remove_transport;.</p>
+A DPR message will be sent as in the case of &remove_transport;.</p>
<note>
<p>
diff --git a/lib/diameter/doc/src/diameter_codec.xml b/lib/diameter/doc/src/diameter_codec.xml
index 5124b49484..0a34dd7ec7 100644
--- a/lib/diameter/doc/src/diameter_codec.xml
+++ b/lib/diameter/doc/src/diameter_codec.xml
@@ -255,7 +255,7 @@ Another list-valued representation allows a message to be specified
as a list whose head is a &header; and whose tail is an &avp; list.
This representation is used by diameter itself when relaying requests
as directed by the return value of a &app_handle_request; callback.
-It differs from the other other two in that it bypasses the checks for
+It differs from the other two in that it bypasses the checks for
messages that do not agree with their definitions in the dictionary in
question: messages are sent exactly as specified.</p>
diff --git a/lib/diameter/doc/src/diameter_dict.xml b/lib/diameter/doc/src/diameter_dict.xml
index 94016d9466..37d30b709b 100644
--- a/lib/diameter/doc/src/diameter_dict.xml
+++ b/lib/diameter/doc/src/diameter_dict.xml
@@ -486,9 +486,9 @@ will result in the following record definition given an empty
prefix.</p>
<pre>
--record('SIP-Deregistration-Reason' {'SIP-Reason-Code',
- 'SIP-Reason-Info',
- 'AVP'}).
+-record('SIP-Deregistration-Reason', {'SIP-Reason-Code',
+ 'SIP-Reason-Info',
+ 'AVP'}).
</pre>
<p>
diff --git a/lib/diameter/doc/src/diameter_soc.xml b/lib/diameter/doc/src/diameter_soc.xml
index 28e01ff1be..2d2d66a243 100644
--- a/lib/diameter/doc/src/diameter_soc.xml
+++ b/lib/diameter/doc/src/diameter_soc.xml
@@ -137,7 +137,7 @@ Capitalized <em>Diameter</em> refers to the protocol, lowercase
<cell>Creating New AVPs</cell>
<cell>&NA;</cell>
<cell>New AVPs can be defined using the dictionary interface.
- Both both RFC data formats and extensions are supported.</cell>
+ Both RFC data formats and extensions are supported.</cell>
</row>
<row>
<cell>1.3.3</cell>
diff --git a/lib/diameter/doc/src/notes.xml b/lib/diameter/doc/src/notes.xml
index fa1be39b5b..f74a4ca2a2 100644
--- a/lib/diameter/doc/src/notes.xml
+++ b/lib/diameter/doc/src/notes.xml
@@ -75,7 +75,7 @@ first.</p>
<list>
<item>
<p>
- Fix documentation typo: peer_down/3 was written where
+ Fix documentation typo: peer_up/3 was written where
peer_down/3 was intended.</p>
<p>
Own Id: OTP-14805</p>
diff --git a/lib/diameter/src/diameter.appup.src b/lib/diameter/src/diameter.appup.src
index 05a8c9378e..3389f11937 100644
--- a/lib/diameter/src/diameter.appup.src
+++ b/lib/diameter/src/diameter.appup.src
@@ -57,7 +57,8 @@
{"2.1", [{restart_application, diameter}]}, %% 20.1
{"2.1.1", [{restart_application, diameter}]}, %% 20.1.2
{"2.1.2", [{restart_application, diameter}]}, %% 20.1.3
- {"2.1.3", [{restart_application, diameter}]} %% 20.2
+ {"2.1.3", [{restart_application, diameter}]}, %% 20.2
+ {"2.1.4", [{restart_application, diameter}]} %% 20.3
],
[
{"0.9", [{restart_application, diameter}]},
@@ -96,6 +97,7 @@
{"2.1", [{restart_application, diameter}]},
{"2.1.1", [{restart_application, diameter}]},
{"2.1.2", [{restart_application, diameter}]},
- {"2.1.3", [{restart_application, diameter}]}
+ {"2.1.3", [{restart_application, diameter}]},
+ {"2.1.4", [{restart_application, diameter}]}
]
}.
diff --git a/lib/diameter/vsn.mk b/lib/diameter/vsn.mk
index b0fb4ada28..3081034df9 100644
--- a/lib/diameter/vsn.mk
+++ b/lib/diameter/vsn.mk
@@ -17,5 +17,5 @@
# %CopyrightEnd%
APPLICATION = diameter
-DIAMETER_VSN = 2.1.4
+DIAMETER_VSN = 2.1.5
APP_VSN = $(APPLICATION)-$(DIAMETER_VSN)$(PRE_VSN)
diff --git a/lib/edoc/src/edoc_doclet.erl b/lib/edoc/src/edoc_doclet.erl
index 0e084e619e..f55cffe158 100644
--- a/lib/edoc/src/edoc_doclet.erl
+++ b/lib/edoc/src/edoc_doclet.erl
@@ -255,7 +255,7 @@ modules_frame(Dir, Ms, Title, CSS) ->
?NL,
{table, [{width, "100%"}, {border, 0},
{summary, "list of modules"}],
- lists:concat(
+ lists:append(
[[?NL,
{tr, [{td, [],
[{a, [{href, module_ref(M)},
@@ -448,7 +448,7 @@ application_frame(Dir, Apps, Title, CSS) ->
{h2, ["Applications"]},
?NL,
{table, [{width, "100%"}, {border, 0}],
- lists:concat(
+ lists:append(
[[{tr, [{td, [], [{a, [{href,app_ref(Path,App)},
{target,"_top"}],
[App]}]}]}]
diff --git a/lib/kernel/doc/src/Makefile b/lib/kernel/doc/src/Makefile
index 2413541082..82869d7b15 100644
--- a/lib/kernel/doc/src/Makefile
+++ b/lib/kernel/doc/src/Makefile
@@ -56,6 +56,11 @@ XML_REF3_FILES = application.xml \
inet.xml \
inet_res.xml \
init_stub.xml \
+ logger.xml \
+ logger_std_h.xml \
+ logger_disk_log_h.xml \
+ logger_filters.xml \
+ logger_formatter.xml \
net_adm.xml \
net_kernel.xml \
os.xml \
@@ -70,11 +75,17 @@ XML_REF4_FILES = app.xml config.xml
XML_REF6_FILES = kernel_app.xml
-XML_PART_FILES =
-XML_CHAPTER_FILES = notes.xml
+XML_PART_FILES = part.xml
+XML_CHAPTER_FILES = \
+ notes.xml \
+ introduction_chapter.xml \
+ logger_chapter.xml
BOOK_FILES = book.xml
+IMAGE_FILES = \
+ logger_arch.png
+
XML_FILES = \
$(BOOK_FILES) $(XML_CHAPTER_FILES) \
$(XML_PART_FILES) $(XML_REF3_FILES) $(XML_REF4_FILES)\
@@ -111,7 +122,7 @@ SPECS_FLAGS = -I../../include
# ----------------------------------------------------
# Targets
# ----------------------------------------------------
-$(HTMLDIR)/%.gif: %.gif
+$(HTMLDIR)/%: %
$(INSTALL_DATA) $< $@
docs: man pdf html
@@ -120,11 +131,12 @@ $(TOP_PDF_FILE): $(XML_FILES)
pdf: $(TOP_PDF_FILE)
-html: gifs $(HTML_REF_MAN_FILE)
+html: images $(HTML_REF_MAN_FILE)
man: $(MAN3_FILES) $(MAN4_FILES) $(MAN6_FILES)
-gifs: $(GIF_FILES:%=$(HTMLDIR)/%)
+images: $(IMAGE_FILES:%=$(HTMLDIR)/%)
+
debug opt:
clean clean_docs:
diff --git a/lib/kernel/doc/src/book.xml b/lib/kernel/doc/src/book.xml
index 81a87d126d..0b69b547e7 100644
--- a/lib/kernel/doc/src/book.xml
+++ b/lib/kernel/doc/src/book.xml
@@ -34,6 +34,9 @@
<preamble>
<contents level="2"></contents>
</preamble>
+ <parts lift="yes">
+ <xi:include href="part.xml"/>
+ </parts>
<applications>
<xi:include href="ref_man.xml"/>
</applications>
diff --git a/lib/kernel/doc/src/error_logger.xml b/lib/kernel/doc/src/error_logger.xml
index 91bf57cb91..cb6165c73e 100644
--- a/lib/kernel/doc/src/error_logger.xml
+++ b/lib/kernel/doc/src/error_logger.xml
@@ -31,6 +31,16 @@
<module>error_logger</module>
<modulesummary>Erlang error logger.</modulesummary>
<description>
+
+ <note>
+ <p>In OTP-21, a new API for logging was added to Erlang/OTP. The
+ old <c>error_logger</c> module can still be used by legacy
+ code, but new code should use the new API instead.</p>
+ <p>See <seealso marker="logger"><c>logger(3)</c></seealso> and
+ the <seealso marker="logger_chapter">Logging</seealso> chapter
+ in the user's guide for more information.</p>
+ </note>
+
<p>The Erlang <em>error logger</em> is an event manager (see
<seealso marker="doc/design_principles:des_princ">OTP Design Principles</seealso> and
<seealso marker="stdlib:gen_event"><c>gen_event(3)</c></seealso>),
@@ -171,14 +181,17 @@ ok</pre>
<func>
<name name="get_format_depth" arity="0"/>
<fsummary>Get the value of the Kernel application variable
- <c>error_logger_format_depth</c>.</fsummary>
+ <c>logger_format_depth</c>.</fsummary>
<desc>
<p>Returns <c>max(10, Depth)</c>, where <c>Depth</c> is the
value of
- <seealso marker="kernel:kernel_app#error_logger_format_depth">
- error_logger_format_depth</seealso>
+ <seealso marker="kernel_app#logger_format_depth">
+ logger_format_depth</seealso>
in the Kernel application, if Depth is an integer. Otherwise,
<c>unlimited</c> is returned.</p>
+ <p>For backwards compatibility, the value
+ of <c>error_logger_format_depth</c> is used
+ if <c>logger_format_depth</c> is not set.</p>
</desc>
</func>
<func>
diff --git a/lib/kernel/doc/src/introduction_chapter.xml b/lib/kernel/doc/src/introduction_chapter.xml
new file mode 100644
index 0000000000..6e6990ddda
--- /dev/null
+++ b/lib/kernel/doc/src/introduction_chapter.xml
@@ -0,0 +1,64 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2017</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>Introduction</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev></rev>
+ <file>introduction.xml</file>
+ </header>
+
+ <section>
+ <title>Scope</title>
+ <p>The Kernel application has all the code necessary to run
+ the Erlang runtime system: file servers, code servers,
+ and so on.</p>
+ <p>The Kernel application is the first application started. It is
+ mandatory in the sense that the minimal system based on
+ Erlang/OTP consists of Kernel and STDLIB. Kernel
+ contains the following functional areas:</p>
+ <list type="bulleted">
+ <item>Start, stop, supervision, configuration, and distribution of applications</item>
+ <item>Code loading</item>
+ <item>Logging</item>
+ <item>Error logging</item>
+ <item>Global name service</item>
+ <item>Supervision of Erlang/OTP</item>
+ <item>Communication with sockets</item>
+ <item>Operating system interface</item>
+ </list>
+ </section>
+
+ <section>
+ <title>Prerequisites</title>
+ <p>It is assumed that the reader is familiar with the Erlang programming
+ language.</p>
+ </section>
+</chapter>
+
+
diff --git a/lib/kernel/doc/src/kernel_app.xml b/lib/kernel/doc/src/kernel_app.xml
index 0762cebc94..554d675383 100644
--- a/lib/kernel/doc/src/kernel_app.xml
+++ b/lib/kernel/doc/src/kernel_app.xml
@@ -51,10 +51,13 @@
</description>
<section>
- <title>Error Logger Event Handlers</title>
- <p>Two standard error logger event handlers are defined in
- the Kernel application. These are described in
- <seealso marker="error_logger"><c>error_logger(3)</c></seealso>.</p>
+ <title>Logger Handlers</title>
+ <p>Two standard logger handlers are defined in
+ the Kernel application. These are described in the
+ <seealso marker="logger_chapter">Kernel User's Guide</seealso>,
+ and in <seealso marker="logger_std_h"><c>logger_std_h(3)</c></seealso>
+ and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h(3)</c>
+ </seealso>.</p>
</section>
<section>
@@ -113,6 +116,7 @@
</section>
<section>
+ <marker id="configuration"/>
<title>Configuration</title>
<p>The following configuration parameters are defined for the Kernel
application. For more information about configuration parameters,
@@ -176,34 +180,105 @@
<p>Permissions are described in
<seealso marker="application#permit/2"><c>application:permit/2</c></seealso>.</p>
</item>
- <tag><c>error_logger = Value</c></tag>
+ <tag><c>logger_dest = Value</c></tag>
<item>
<p><c>Value</c> is one of:</p>
<taglist>
<tag><c>tty</c></tag>
- <item><p>Installs the standard event handler, which prints error
- reports to <c>stdio</c>. This is the default option.</p></item>
+ <item><p>Installs the standard handler, <seealso marker="logger_std_h">
+ <c>logger_std_h(3)</c></seealso>, with <c>type</c> set
+ to <c>standard_io</c>. This is the default
+ option.</p></item>
<tag><c>{file, FileName}</c></tag>
- <item><p>Installs the standard event handler, which prints error
- reports to file <c>FileName</c>, where <c>FileName</c>
+ <item><p>Installs the standard handler, <seealso marker="logger_std_h">
+ <c>logger_std_h(3)</c></seealso>, with <c>type</c> set
+ to <c>{file, FileName}</c>, where <c>FileName</c>
is a string. The file is opened with encoding UTF-8.</p></item>
+ <tag><c>{disk_log, FileName}</c></tag>
+ <item><p>Installs the disk_log handler, <seealso marker="logger_disk_log_h">
+ <c>logger_disk_log_h(3)</c></seealso>, with <c>file</c> set
+ to <c>FileName</c> (a string), and possibly other disk_log
+ parameters set by the environment variables
+ <c>logger_disk_log_type</c>, <c>logger_disk_log_maxfiles</c> and
+ <c>logger_disk_log_maxbytes</c>,
+ see <seealso marker="#disk_log_vars">below</seealso>. The
+ file is opened with encoding UTF-8.</p></item>
<tag><c>false</c></tag>
<item>
- <p>No standard event handler is installed, but
- the initial, primitive event handler is kept, printing
+ <p>No standard handler is installed, but
+ the initial, primitive handler is kept, printing
raw event messages to <c>tty</c>.</p>
</item>
<tag><c>silent</c></tag>
<item>
- <p>Error logging is turned off.</p>
+ <p>No standard handler is started, and the initial,
+ primitive handler is removed.</p>
</item>
</taglist>
</item>
- <tag><c>error_logger_format_depth = Depth</c></tag>
+ <tag><c>logger_level = Level</c></tag>
+ <item>
+ <p><c>Value = emergency | alert | critical | error | warning |
+ notice | info | debug</c></p>
+ <p>This parameter specifies which log levels to log. The
+ specified level, and all levels that are more severe, will
+ be logged.</p>
+ <p>This configuration parameter is used both for the global
+ logger level, and for the standard handler started by
+ the Kernel application (see <c>logger_dest</c> variable above).</p>
+ <p>The default value is <c>info</c></p>
+ </item>
+ <tag><marker id="disk_log_vars"/>
+ <c>logger_disk_log_type = halt | wrap</c></tag>
+ <item/>
+ <tag><c>logger_disk_log_maxfiles = integer()</c></tag>
+ <item/>
+ <tag><c>logger_disk_log_maxbytes = integer()</c></tag>
+ <item>
+ <p>If <c>logger_dest</c> is set to {disk_log,File}, then these
+ parameters specify the configuration to use when opening the
+ disk log file. They specify the type of disk log, the
+ maximum number of files (if the type is wrap) and the
+ maximum size of each file, respectively.</p>
+ <p>The default values are:</p>
+ <code>
+logger_disk_log_type = wrap
+logger_disk_log_maxfiles = 10
+logger_disk_log_maxbytes = 1048576</code>
+ </item>
+ <tag><marker id="logger_sasl_compatible"/>
+ <c>logger_sasl_compatible = boolean()</c></tag>
+ <item>
+ <p>If this parameter is set to true, then the logger handler
+ started by kernel will not log any progress-, crash-, or
+ supervisor reports. If the SASL application is starated,
+ these log events will be sent to a second handler instance
+ named sasl_h, according to values of the SASL environment
+ variables <c>sasl_error_logger</c>
+ and <c>sasl_errlog_type</c>, see
+ <seealso marker="sasl:sasl_app#configuration">SASL(6)
+ </seealso></p>
+ <p>The default value is <c>false</c></p>
+ <p>See chapter <seealso marker="logger_chapter#compatibility">Backwards
+ compatibility with error_logger</seealso> for more
+ information about handling of the so called SASL reports.</p>
+ </item>
+ <tag><marker id="logger_log_progress"/>
+ <c>logger_log_progress = boolean()</c></tag>
+ <item>
+ <p>If <c>logger_sasl_compatible = false</c>,
+ then <c>logger_log_progress</c> specifies if progress
+ reports from <c>supervisor</c>
+ and <c>application_controller</c> shall be logged or
+ not.</p>
+ <p>If <c>logger_sasl_compatible = false</c>,
+ then <c>logger_log_progress</c> is ignored.</p>
+ </item>
+ <tag><marker id="logger_format_depth"/>
+ <c>logger_format_depth = Depth</c></tag>
<item>
- <marker id="error_logger_format_depth"></marker>
<p>Can be used to limit the size of the
- formatted output from the error logger event handlers.</p>
+ formatted output from the logger handlers.</p>
<note><p>This configuration parameter was introduced in OTP 18.1
and is experimental. Based on user feedback, it
@@ -214,16 +289,16 @@
useless.</p></note>
<p><c>Depth</c> is a positive integer representing the maximum
- depth to which terms are printed by the error logger event
+ depth to which terms are printed by the logger
handlers included in OTP. This
- configuration parameter is used by the two event handlers
- defined by the Kernel application and the two event
- handlers in the SASL application.
- (If you have implemented your own error handlers, this configuration
- parameter has no effect on them.)</p>
+ configuration parameter is used by the default formatter,
+ <seealso marker="logger_formatter"><c>logger_formatter(3)</c></seealso>,
+ unless the formatter's <c>depth</c> parameter is explicitly set.
+ (If you have implemented your own formatter, this configuration
+ parameter has no effect on that.)</p>
<p><c>Depth</c> is used as follows: Format strings
- passed to the event handlers are rewritten.
+ received by the formatter are rewritten.
The format controls <c>~p</c> and <c>~w</c> are replaced with
<c>~P</c> and <c>~W</c>, respectively, and <c>Depth</c> is
used as the depth parameter. For details, see
@@ -234,7 +309,20 @@
<c>30</c>. We recommend to test crashing various processes in your
application, examine the logs from the crashes, and then
increase or decrease the value.</p></note>
- </item>
+ </item>
+ <tag><c>logger_max_size = integer() | unlimited</c></tag>
+ <item>
+ <p>This parameter specifies the maximum size (bytes) each
+ log event can have when printed by the standard logger
+ handler. If the resulting string after formatting an event
+ is bigger than this, it will be truncated before printed
+ to the handler's destination.</p>
+ </item>
+ <tag><c>logger_utc = boolean()</c></tag>
+ <item>
+ <p>If set to <c>true</c>, the default formatter will display
+ all dates in Universal Coordinated Time.</p>
+ </item>
<tag><c>global_groups = [GroupTuple]</c></tag>
<item>
<marker id="global_groups"></marker>
@@ -497,6 +585,26 @@ MaxT = TickTime + TickTime / 4</code>
</section>
<section>
+ <title>Deprecated Configuration Parameters</title>
+ <p>In OTP-21, a new API for logging was added to Erlang/OTP. The
+ old <c>error_logger</c> event manager, and event handlers
+ running on this manager, will still work, but they are not used
+ by default.</p>
+ <p>The following application environment variables can still be
+ set, but they will only be used if the corresponding new logger
+ variables are not set.</p>
+ <taglist>
+ <tag><c>error_logger</c></tag>
+ <item>Replaced by <c>logger_dest</c></item>
+ <tag><c>error_logger_format_depth</c></tag>
+ <item>Replaced by <c>logger_format_depth</c></item>
+ </taglist>
+ <p>See <seealso marker="logger_chapter#compatibility">Backwards
+ compatibility with error_logger</seealso> for more
+ information.</p>
+ </section>
+
+ <section>
<title>See Also</title>
<p><seealso marker="app"><c>app(4)</c></seealso>,
<seealso marker="application"><c>application(3)</c></seealso>,
diff --git a/lib/kernel/doc/src/logger.xml b/lib/kernel/doc/src/logger.xml
new file mode 100644
index 0000000000..66e6e5c689
--- /dev/null
+++ b/lib/kernel/doc/src/logger.xml
@@ -0,0 +1,478 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2017</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>logger</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>logger.xml</file>
+ </header>
+ <module>logger</module>
+ <modulesummary>API module for the logger application.</modulesummary>
+
+ <description>
+
+ </description>
+
+ <datatypes>
+ <datatype>
+ <name name="level"/>
+ <desc>
+ <p>The severity level for the message to be logged.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="log"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="report"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="msg_fun"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="metadata"/>
+ <desc>
+ <p>Metadata associated with the message to be logged.</p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="config"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="handler_id"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="filter_id"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="filter"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ <datatype>
+ <name name="filter_return"/>
+ <desc>
+ <p></p>
+ </desc>
+ </datatype>
+ </datatypes>
+
+ <section>
+ <title>Macros</title>
+ <p>The following macros are defined:</p>
+
+ <list>
+ <item><c>?LOG_EMERGENCY(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_EMERGENCY(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_ALERT(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_ALERT(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_CRITICAL(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_CRITICAL(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_ERROR(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_ERROR(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_WARNING(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_WARNING(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_NOTICE(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_NOTICE(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_INFO(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_INFO(FunOrFormat,Args[,Metadata])</c></item>
+ <item><c>?LOG_DEBUG(StringOrReport[,Metadata])</c></item>
+ <item><c>?LOG_DEBUG(FunOrFormat,Args[,Metadata])</c></item>
+ </list>
+
+ <p>All macros expand to a call to logger, where <c>Level</c> is
+ taken from the macro name, and the following metadata is added,
+ or merged with the given <c>Metadata</c>:</p>
+
+ <code>
+#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY},
+ file=>?FILE,
+ line=>?LINE}
+ </code>
+
+ <p>The call is wrapped in a case statement and will be evaluated
+ only if <c>Level</c> is equal to or below the configured log
+ level.</p>
+ </section>
+
+ <funcs>
+ <func>
+ <name>emergency(StringOrReport[,Metadata])</name>
+ <name>emergency(Format,Args[,Metadata])</name>
+ <name>emergency(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>emergency</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(emergency,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>alert(StringOrReport[,Metadata])</name>
+ <name>alert(Format,Args[,Metadata])</name>
+ <name>alert(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>alert</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(alert,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>critical(StringOrReport[,Metadata])</name>
+ <name>critical(Format,Args[,Metadata])</name>
+ <name>critical(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>critical</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(critical,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>error(StringOrReport[,Metadata])</name>
+ <name>error(Format,Args[,Metadata])</name>
+ <name>error(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>error</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(error,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>warning(StringOrReport[,Metadata])</name>
+ <name>warning(Format,Args[,Metadata])</name>
+ <name>warning(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>warning</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(warning,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>notice(StringOrReport[,Metadata])</name>
+ <name>notice(Format,Args[,Metadata])</name>
+ <name>notice(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>notice</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(notice,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>info(StringOrReport[,Metadata])</name>
+ <name>info(Format,Args[,Metadata])</name>
+ <name>info(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>info</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(info,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name>debug(StringOrReport[,Metadata])</name>
+ <name>debug(Format,Args[,Metadata])</name>
+ <name>debug(Fun,FunArgs[,Metadata])</name>
+ <fsummary>Logs the given message as level <c>debug</c>.</fsummary>
+ <desc>
+ <p>Equivalent to
+ <seealso marker="#log-2"><c>log(debug,...)</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="log" arity="2"/>
+ <name name="log" arity="3" clause_i="1"/>
+ <name name="log" arity="3" clause_i="2"/>
+ <name name="log" arity="3" clause_i="3"/>
+ <name name="log" arity="4" clause_i="1"/>
+ <name name="log" arity="4" clause_i="2"/>
+ <fsummary>Logs the given message.</fsummary>
+ <type variable="Level"/>
+ <type variable="StringOrReport" name_i="1"/>
+ <type variable="Format" name_i="3"/>
+ <type variable="Args" name_i="3"/>
+ <type variable="Fun" name_i="4"/>
+ <type variable="FunArgs" name_i="4"/>
+ <type variable="Metadata"/>
+ <desc>
+ <p>Log the given message.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_logger_config" arity="0"/>
+ <fsummary>Lookup the current configuration for logger.</fsummary>
+ <desc>
+ <p>Lookup the current configuration for logger.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_handler_config" arity="1"/>
+ <fsummary>Lookup the current configuration for the given handler.</fsummary>
+ <desc>
+ <p>Lookup the current configuration for the given handler.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="i" arity="0"/>
+ <fsummary>Get information about all logger configurations</fsummary>
+ <desc>
+ <p>Same as <seealso marker="#i/1"><c>logger:i(term)</c></seealso></p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="i" arity="1" clause_i="1"/>
+ <name name="i" arity="1" clause_i="2"/>
+ <name name="i" arity="1" clause_i="3"/>
+ <fsummary>Get information about all logger configurations</fsummary>
+ <desc>
+ <p>The <c>logger:i/1</c> function can be used to get all
+ current logger configuration. The way that the information
+ is returned depends on the <c><anno>Action</anno></c></p>
+ <taglist>
+ <tag>string</tag>
+ <item>Return the pretty printed current logger configuration
+ as iodata.</item>
+ <tag>term</tag>
+ <item>Return the current logger configuration as a term. The
+ format of this term may change inbetween releases. For a
+ stable format use <seealso marker="#get_handler_config/1">
+ <c>logger:get_handler_config/1</c></seealso>
+ and <seealso marker="#get_logger_config/0">
+ <c>logger:get_logger_config/0</c></seealso>.
+ The same as calling <c>logger:i()</c>.</item>
+ <tag>print</tag>
+ <item>Pretty print all the current logger configuration to
+ standard out. Example:
+ <code><![CDATA[1> logger:i().
+Current logger configuration:
+ Level: info
+ FilterDefault: log
+ Filters:
+ Handlers:
+ Id: logger_std_h
+ Module: logger_std_h
+ Level: info
+ Formatter:
+ Module: logger_formatter
+ Config: #{template => [{logger_formatter,header},"\n",msg,"\n"],
+ legacy_header => true}
+ Filter Default: stop
+ Filters:
+ Id: stop_progress
+ Fun: fun logger_filters:progress/2
+ Config: stop
+ Id: remote_gl
+ Fun: fun logger_filters:remote_gl/2
+ Config: stop
+ Id: domain
+ Fun: fun logger_filters:domain/2
+ Config: {log,prefix_of,[beam,erlang,otp,sasl]}
+ Id: no_domain
+ Fun: fun logger_filters:domain/2
+ Config: {log,no_domain,[]}
+ Handler Config:
+ logger_std_h: #{type => standard_io}
+ Level set per module:
+ Module: my_module
+ Level: debug]]></code>
+ </item>
+ </taglist>
+ </desc>
+ </func>
+
+ <func>
+ <name name="add_logger_filter" arity="2"/>
+ <fsummary>Add a filter to the logger.</fsummary>
+ <desc>
+ <p>Add a filter to the logger.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="add_handler_filter" arity="3"/>
+ <fsummary>Add a filter to the specified handler.</fsummary>
+ <desc>
+ <p>Add a filter to the specified handler.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="remove_logger_filter" arity="1"/>
+ <fsummary>Remove a filter from the logger.</fsummary>
+ <desc>
+ <p>Remove the filter with the specified identity from the logger.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="remove_handler_filter" arity="2"/>
+ <fsummary>Remove a filter from the specified handler.</fsummary>
+ <desc>
+ <p>Remove the filter with the specified identity from the given handler.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="add_handler" arity="3"/>
+ <fsummary>Add a handler with the given configuration.</fsummary>
+ <desc>
+ <p>Add a handler with the given configuration.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="remove_handler" arity="1"/>
+ <fsummary>Remove the handler with the specified identity.</fsummary>
+ <desc>
+ <p>Remove the handler with the specified identity.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_module_level" arity="2"/>
+ <fsummary>Set the log level for the specified module.</fsummary>
+ <desc>
+ <p>Set the log level for the specified module.</p>
+ <p>To change the logging level globally, use
+ <seealso marker="#set_logger_config/2"><c>logger:set_logger_config(level, Level)</c></seealso>.
+ </p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="reset_module_level" arity="1"/>
+ <fsummary>Remove a module specific log setting.</fsummary>
+ <desc>
+ <p>Remove a module specific log setting. After this, the
+ global log level is used for the specified module.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_logger_config" arity="1"/>
+ <name name="set_logger_config" arity="2"/>
+ <fsummary>Add or update configuration data for the logger.</fsummary>
+ <desc>
+ <p>Add or update configuration data for the logger.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_handler_config" arity="2"/>
+ <name name="set_handler_config" arity="3"/>
+ <fsummary>Add or update configuration data for the specified
+ handler.</fsummary>
+ <desc>
+ <p>Add or update configuration data for the specified
+ handler.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="compare_levels" arity="2"/>
+ <fsummary>Compare the severity of two log levels.</fsummary>
+ <desc>
+ <p>Compare the severity of two log levels. Returns <c>gt</c>
+ if <c>Level1</c> is more severe than
+ <c>Level2</c>, <c>lt</c> if <c>Level1</c> is less severe,
+ and <c>eq</c> if the levels are equal.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="set_process_metadata" arity="1"/>
+ <fsummary>Set metadata to use when logging from current process.</fsummary>
+ <desc>
+ <p>Set metadata which <c>logger</c> automatically inserts it
+ in all log events produced on the current
+ process. Subsequent calls will overwrite previous data set
+ by this function.</p>
+ <p>When logging, location data produced by the log macros,
+ and/or metadata given as argument to the log call (API
+ function or macro), will be merged with the process
+ metadata. If the same keys occur, values from the metadata
+ argument to the log call will overwrite values in the
+ process metadata, which in turn will overwrite values from
+ the location data.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="get_process_metadata" arity="0"/>
+ <fsummary>Retrieve data set with set_process_metadata/1.</fsummary>
+ <desc>
+ <p>Retrieve data set
+ with <seealso marker="#set_process_metadata-1">
+ <c>set_process_metadata/1</c></seealso>.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="unset_process_metadata" arity="0"/>
+ <fsummary>Delete data set with set_process_metadata/1.</fsummary>
+ <desc>
+ <p>Delete data set
+ with <seealso marker="#set_process_metadata-1">
+ <c>set_process_metadata/1</c></seealso>.</p>
+ </desc>
+ </func>
+
+ </funcs>
+
+</erlref>
+
+
diff --git a/lib/kernel/doc/src/logger_arch.png b/lib/kernel/doc/src/logger_arch.png
new file mode 100644
index 0000000000..727609a6ef
--- /dev/null
+++ b/lib/kernel/doc/src/logger_arch.png
Binary files differ
diff --git a/lib/kernel/doc/src/logger_chapter.xml b/lib/kernel/doc/src/logger_chapter.xml
new file mode 100644
index 0000000000..0bc3b37476
--- /dev/null
+++ b/lib/kernel/doc/src/logger_chapter.xml
@@ -0,0 +1,815 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE chapter SYSTEM "chapter.dtd">
+
+<chapter>
+ <header>
+ <copyright>
+ <year>2017</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>Logging</title>
+ <prepared></prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ <file>logger_chapter.xml</file>
+ </header>
+
+ <section>
+ <title>Overview</title>
+ <p>Erlang/OTP provides a standard API for logging. The backend of
+ this API can be used as is, or it can be customized to suite
+ specific needs.</p>
+ <p>It consists of two parts - the <em>logger</em> part and the
+ <em>handler</em> part. The logger will forward log events to one
+ or more handler(s).</p>
+
+ <image file="logger_arch.png">
+ <icaption>Conceptual overview</icaption>
+ </image>
+
+ <p><em>Filters</em> can be added to the logger and to each
+ handler. The filters decide if an event is to be forwarded or
+ not, and they can also modify all parts of the log event.</p>
+
+ <p>A <em>formatter</em> can be set for each handler. The formatter
+ does the final formatting of the log event, including the log
+ message itself, and possibly a timestamp, header and other
+ metadata.</p>
+
+ <p>In accordance with the Syslog protocol, RFC-5424, eight
+ severity levels can be specified:</p>
+
+ <table align="left">
+ <row>
+ <cell><strong>Level</strong></cell>
+ <cell align="center"><strong>Integer</strong></cell>
+ <cell><strong>Description</strong></cell>
+ </row>
+ <row>
+ <cell>emergency</cell>
+ <cell align="center">0</cell>
+ <cell>system is unusable</cell>
+ </row>
+ <row>
+ <cell>alert</cell>
+ <cell align="center">1</cell>
+ <cell>action must be taken immediately</cell>
+ </row>
+ <row>
+ <cell>critical</cell>
+ <cell align="center">2</cell>
+ <cell>critical contidions</cell>
+ </row>
+ <row>
+ <cell>error</cell>
+ <cell align="center">3</cell>
+ <cell>error conditions</cell>
+ </row>
+ <row>
+ <cell>warning</cell>
+ <cell align="center">4</cell>
+ <cell>warning conditions</cell>
+ </row>
+ <row>
+ <cell>notice</cell>
+ <cell align="center">5</cell>
+ <cell>normal but significant conditions</cell>
+ </row>
+ <row>
+ <cell>info</cell>
+ <cell align="center">6</cell>
+ <cell>informational messages</cell>
+ </row>
+ <row>
+ <cell>debug</cell>
+ <cell align="center">7</cell>
+ <cell>debug-level messages</cell>
+ </row>
+ <tcaption>Severity levels</tcaption>
+ </table>
+
+ <p>A log event is allowed by Logger if the integer value of
+ its <c>Level</c> is less than or equal to the currently
+ configured log level. The log level can be configured globally,
+ or to allow more verbose logging from a specific part of the
+ system, per module.</p>
+
+ <section>
+ <title>Customizable parts</title>
+
+ <taglist>
+ <tag><marker id="Handler"/>Handler</tag>
+ <item>
+ <p>A handler is defined as a module exporting the following
+ function:</p>
+
+ <code>log(Log, Config) -> ok</code>
+
+ <p>A handler is called by the logger backend after filtering on
+ logger level and on handler level for the handler which is
+ about to be called. The function call is done on the client
+ process, and it is up to the handler implementation if other
+ processes are to be involved or not.</p>
+
+ <p>Multiple instances of the same handler can be
+ added. Configuration is per instance.</p>
+
+ </item>
+
+ <tag><marker id="Filter"/>Filter</tag>
+ <item>
+ <p>Filters can be set on the logger or on a handler. Logger
+ filters are applied first, and if passed, the handler filters
+ for each handler are applied. The handler plugin is only
+ called if all handler filters for the handler in question also
+ pass.</p>
+
+ <p>A filter is specified as:</p>
+
+ <code>{fun((Log,Extra) -> Log | stop | ignore), Extra}</code>
+
+ <p>The configuration parameter <c>filter_default</c>
+ specifies the behavior if all filters return <c>ignore</c>.
+ <c>filter_default</c> is by default set to <c>log</c>.</p>
+
+ <p>The <c>Extra</c> parameter may contain any data that the
+ filter needs.</p>
+ </item>
+
+ <tag><marker id="Formatter"/>Formatter</tag>
+ <item>
+ <p>A formatter is defined as a module exporting the following
+ function:</p>
+
+ <code>format(Log,Extra) -> string()</code>
+
+ <p>The formatter plugin is called by each handler, and the
+ returned string can be printed to the handler's destination
+ (stdout, file, ...).</p>
+ </item>
+
+ </taglist>
+ </section>
+
+ <section>
+ <title>Built-in handlers</title>
+
+ <taglist>
+ <tag><c>logger_std_h</c></tag>
+ <item>
+ <p>This is the default handler used by OTP. Multiple instances
+ can be started, and each instance will write log events to a
+ given destination, console or file. Filters can be used for
+ selecting which event to send to which handler instance.</p>
+ </item>
+
+ <tag><c>logger_disk_log_h</c></tag>
+ <item>
+ <p>This handler behaves much like logger_std_h, except it uses
+ <seealso marker="disk_log"><c>disk_log</c></seealso> as its
+ destination.</p>
+ </item>
+
+ <tag><marker id="ErrorLoggerManager"/><c>error_logger</c></tag>
+ <item>
+ <p>This handler is to be used for backwards compatibility
+ only. It is not started by default, but will be automatically
+ started the first time an event handler is added
+ with <seealso marker="error_logger#add_report_handler-1">
+ <c>error_logger:add_report_handler/1,2</c></seealso>.</p>
+
+ <p>No built-in event handlers exist.</p>
+ </item>
+ </taglist>
+ </section>
+
+ <section>
+ <title>Built-in filters</title>
+
+ <taglist>
+ <tag><c>logger_filters:domain/2</c></tag>
+ <item>
+ <p>This filter provides a way of filtering log events based on a
+ <c>domain</c> field <c>Metadata</c>. See
+ <seealso marker="logger_filters#domain-2">
+ <c>logger_filters:domain/2</c></seealso></p>
+ </item>
+
+ <tag><c>logger_filters:level/2</c></tag>
+ <item>
+ <p>This filter provides a way of filtering log events based
+ on the log level. See <seealso marker="logger_filters#domain-2">
+ <c>logger_filters:domain/2</c></seealso></p>
+ </item>
+
+ <tag><c>logger_filters:progress/2</c></tag>
+ <item>
+ <p>This filter matches all progress reports
+ from <c>supervisor</c> and <c>application_controller</c>.
+ See <seealso marker="logger_filters#progress/2">
+ <c>logger_filters:progress/2</c></seealso></p>
+ </item>
+
+ <tag><c>logger_filters:remote_gl/2</c></tag>
+ <item>
+ <p>This filter matches all events originating from a process
+ that has its group leader on a remote node.
+ See <seealso marker="logger_filters#remote_gl/2">
+ <c>logger_filters:remote_gl/2</c></seealso></p>
+ </item>
+ </taglist>
+ </section>
+
+ <section>
+ <title>Default formatter</title>
+
+ <p>The default formatter is <c>logger_formatter</c>.
+ See <seealso marker="logger_formatter#format-2">
+ <c>logger_formatter:format/2</c></seealso>.</p>
+ </section>
+ </section>
+
+ <section>
+ <title>Configuration</title>
+
+ <section>
+ <title>Application environment variables</title>
+ <p>See <seealso marker="kernel_app#configuration">Kernel(6)</seealso> for
+ information about the application environment variables that can
+ be used for configuring logger.</p>
+ </section>
+
+ <section>
+ <title>Logger configuration</title>
+
+ <taglist>
+ <tag><c>level</c></tag>
+ <item>
+ <p>Specifies the severity level to log.</p>
+ </item>
+ <tag><c>filters</c></tag>
+ <item>
+ <p>Logger filters are added or removed with
+ <seealso marker="logger#add_logger_filter-2">
+ <c>logger:add_logger_filter/2</c></seealso> and
+ <seealso marker="logger#remove_logger_filter-1">
+ <c>logger:remove_logger_filter/1</c></seealso>,
+ respectively.</p>
+ <p>See <seealso marker="#Filter">Filter</seealso> for more
+ information.</p>
+ <p>By default, no filters exist.</p>
+ </item>
+ <tag><c>filter_default = log | stop</c></tag>
+ <item>
+ <p>Specifies what to do with an event if all filters
+ return <c>ignore</c>.</p>
+ <p>Default is <c>log</c>.</p>
+ </item>
+ <tag><c>handlers</c></tag>
+ <item>
+ <p>Handlers are added or removed with
+ <seealso marker="logger#add_handler-3">
+ <c>logger:add_handler/3</c></seealso> and
+ <seealso marker="logger#remove_handler-1">
+ <c>logger:remove_handler/1</c></seealso>,
+ respectively.</p>
+ <p>See <seealso marker="#Handler">Handler</seealso> for more
+ information.</p>
+ </item>
+ </taglist>
+ </section>
+
+ <section>
+ <marker id="handler_configuration"/>
+ <title>Handler configuration</title>
+ <taglist>
+ <tag><c>level</c></tag>
+ <item>
+ <p>Specifies the severity level to log.</p>
+ </item>
+ <tag><c>filters</c></tag>
+ <item>
+ <p>Handler filters can be specified when adding the handler,
+ or added or removed later with
+ <seealso marker="logger#add_handler_filter-3">
+ <c>logger:add_handler_filter/3</c></seealso> and
+ <seealso marker="logger#remove_handler_filter-2">
+ <c>logger:remove_handler_filter/2</c></seealso>,
+ respectively.</p>
+ <p>See <seealso marker="#Filter">Filter</seealso> for more
+ information.</p>
+ <p>By default, no filters exist.</p>
+ </item>
+ <tag><c>filter_default = log | stop</c></tag>
+ <item>
+ <p>Specifies what to do with an event if all filters
+ return <c>ignore</c>.</p>
+ <p>Default is <c>log</c>.</p>
+ </item>
+ <tag><c>depth = pos_integer() | unlimited</c></tag>
+ <item>
+ <p>Specifies if the depth of terms in the log events shall
+ be limited by using control characters <c>~P</c>
+ and <c>~W</c> instead of <c>~p</c> and <c>~w</c>,
+ respectively. See
+ <seealso marker="stdlib:io#format-1"><c>io:format</c></seealso>.</p>
+ </item>
+ <tag><c>max_size = pos_integer() | unlimited</c></tag>
+ <item>
+ <p>Specifies if the size of a log event shall be limited by
+ truncating the formatted string.</p>
+ </item>
+ <tag><c>formatter = {Module::module(),Extra::term()}</c></tag>
+ <item>
+ <p>See <seealso marker="#Formatter">Formatter</seealso> for more
+ information.</p>
+ <p>The default module is <seealso marker="logger_formatter">
+ <c>logger_formatter</c></seealso>, and <c>Extra</c> is
+ it's configuration map.</p>
+ </item>
+ </taglist>
+
+ <p>Note that <c>level</c> and <c>filters</c> are obeyed by
+ Logger itself before forwarding the log events to each
+ handler, while <c>depth</c>, <c>max_size</c>
+ and <c>formatter</c> are left to the handler
+ implementation. All Logger's built-in handlers do apply these
+ configuration parameters before printing.</p>
+ </section>
+
+ </section>
+
+ <section>
+ <marker id="compatibility"/>
+ <title>Backwards compatibility with error_logger</title>
+ <p>Logger provides backwards compatibility with the old
+ <c>error_logger</c> in the following ways:</p>
+
+ <taglist>
+ <tag>Legacy event handlers</tag>
+ <item>
+ <p>To use event handlers written for <c>error_logger</c>, just
+ add your event handler with</p>
+ <code>
+error_logger:add_report_handler/1,2.
+ </code>
+ <p>This will automatically start the <c>error_logger</c>
+ event manager, and add <c>error_logger</c> as a
+ handler to <c>logger</c>, with configuration</p>
+<code>
+#{level=>info,
+ filter_default=>log,
+ filters=>[]}.
+</code>
+ <p>Note that this handler will ignore events that do not
+ originate from the old <c>error_logger</c> API, or from
+ within OTP. This means that if your code uses the logger API
+ for logging, then your log events will be discarded by this
+ handler.</p>
+ <p>Also note that <c>error_logger</c> is not overload
+ protected.</p>
+ </item>
+ <tag>Logger API</tag>
+ <item>
+ <p>The old <c>error_logger</c> API still exists, but should
+ only be used by legacy code. It will be removed in a later
+ release.</p>
+ </item>
+ <tag>Output format</tag>
+ <item>
+ <p>To get log events on the same format as produced
+ by <c>error_logger_tty_h</c> and <c>error_logger_file_h</c>,
+ use the default formatter, <c>logger_formatter</c>, with
+ configuration parameter <c>legacy_header=>true</c>. This is
+ also the default.</p>
+ </item>
+ <tag>Default format of log events from OTP</tag>
+ <item>
+ <p>By default, all log events originating from within OTP,
+ except the former so called "SASL reports", look the same as
+ before.</p>
+ </item>
+ <tag>SASL reports</tag>
+ <item>
+ <p>By SASL reports we mean supervisor reports, crash reports
+ and progress reports.</p>
+ <p>In earlier releases, these reports were only logged when
+ the SASL application was running, and they were printed
+ trough specific event handlers
+ named <c>sasl_report_tty_h</c>
+ and <c>sasl_report_file_h</c>.</p>
+ <p>The destination of these log events were configured by
+ environment variables for the SASL application.</p>
+ <p>Due to the specific event handlers, the output format
+ slightly differed from other log events.</p>
+ <p>As of OTP-21, the concept of SASL reports is removed,
+ meaning that the default behavior is as follows:</p>
+ <list>
+ <item>Supervisor reports, crash reports and progress reports
+ are no longer connected to the SASL application.</item>
+ <item>Supervisor reports and crash reports are logged by
+ default.</item>
+ <item>Progress reports are not logged by default, but can be
+ enabled with the kernel environment
+ variable <c>logger_log_progress</c>.</item>
+ <item>The output format is the same for all log
+ events.</item>
+ </list>
+ <p>If the old behavior is preferred, the kernel environment
+ variable <c>logger_sasl_compatible</c> can be set
+ to <c>true</c>. The old SASL environment variables can then
+ be used as before, and the SASL reports will only be printed
+ if the SASL application is running - through a second log
+ handler named <c>sasl_h</c>.</p>
+ <p>All SASL reports have a metadata
+ field <c>domain=>[beam,erlang,otp,sasl]</c>, which can be
+ used, for example, by filters to to stop or allow the
+ events.</p>
+ </item>
+ </taglist>
+ </section>
+
+
+ <section>
+ <title>Error handling</title>
+ <p>Log data is expected to be either a format string and
+ arguments, a string (unicode:chardata), or a report (map or
+ key-value list) which can be converted to a format string and
+ arguments by the handler. A default report callback should be
+ included in the log event's metadata, which can be used for
+ converting the report to a format string and arguments. The
+ handler might also do a custom conversion if the default format
+ is not desired.</p>
+ <p><c>logger</c> does, to a certain extent, check its input data
+ before forwarding a log event to the handlers, but it does not
+ evaluate conversion funs or check the validity of format strings
+ and arguments. This means that any filter or handler must be
+ careful when formatting the data of a log event, making sure
+ that it does not crash due to bad input data or faulty
+ callbacks.</p>
+ <p>If a filter or handler still crashes, logger will remove the
+ filter or handler in question from the configuration, and then
+ print a short error message on the console. A debug event
+ containing the crash reason and other details is also issued,
+ and can be seen if a handler is installed which logs on debug
+ level.</p>
+ </section>
+
+ <section>
+ <title>Example: add a handler to log debug events to file</title>
+ <p>When starting an erlang node, the default behavior is that all
+ log events with level info and above are logged to the
+ console. In order to also log debug events, you can either
+ change the global log level to <c>debug</c> or add a separate
+ handler to take care of this. In this example we will add a new
+ handler which prints the debug events to a separate file.</p>
+ <p>First, we add an instance of logger_std_h with
+ type <c>{file,File}</c>, and we set the handler's level
+ to <c>debug</c>:</p>
+ <pre>
+1> <input>Config = #{level=>debug,logger_std_h=>#{type=>{file,"./debug.log"}}}.</input>
+#{logger_std_h => #{type => {file,"./debug.log"}},
+ level => debug}
+2> <input>logger:add_handler(debug_handler,logger_std_h,Config).</input>
+ok</pre>
+ <p>By default, the handler receives all events, so we need to add a filter
+ to stop all non-debug events:</p>
+ <pre>
+3> <input>Fun = fun(#{level:=debug}=Log,_) -> Log; (_,_) -> stop end.</input>
+#Fun&lt;erl_eval.12.98642416>
+4> <input>logger:add_handler_filter(debug_handler,allow_debug,{Fun,[]}).</input>
+ok</pre>
+ <p>And finally, we need to make sure that the logger itself allows
+ debug events. This can either be done by setting the global
+ logger level:</p>
+ <pre>
+5> <input>logger:set_logger_config(level,debug).</input>
+ok</pre>
+ <p>Or by allowing debug events from one or a few modules only:</p>
+ <pre>
+6> <input>logger:set_module_level(mymodule,debug).</input>
+ok</pre>
+
+ </section>
+
+ <section>
+ <title>Example: implement a handler</title>
+ <p>The only requirement that a handler MUST fulfill is to export
+ the following function:</p>
+ <code>log(logger:log(),logger:config()) ->ok</code>
+ <p>It may also implement the following callbacks:</p>
+ <code>
+adding_handler(logger:handler_id(),logger:config()) -> {ok,logger:config()} | {error,term()}
+removing_handler(logger:handler_id()) -> ok
+changing_config(logger:handler_id(),logger:config(),logger:config()) -> {ok,logger:config()} | {error,term()}
+ </code>
+ <p>When logger:add_handler(Id,Module,Config) is called, logger
+ will first call Module:adding_handler(Id,Config), and if it
+ returns {ok,NewConfig} the NewConfig is written to the
+ configuration database. After this, the handler may receive log
+ events as calls to Module:log/2.</p>
+ <p>A handler can be removed by calling
+ logger:remove_handler(Id). logger will call
+ Module:removing_handler(Id), and then remove the handler's
+ configuration from the configuration database.</p>
+ <p>When logger:set_handler_config is called, logger calls
+ Module:changing_config(Id,OldConfig,NewConfig). If this function
+ returns ok, the NewConfig is written to the configuration
+ database.</p>
+
+ <p>A simple handler which prints to the console could be
+ implemented as follows:</p>
+ <code>
+-module(myhandler).
+-export([log/2]).
+
+log(#{msg:={report,R}},_) ->
+ io:format("~p~n",[R]);
+log(#{msg:={string,S}},_) ->
+ io:put_chars(S);
+log(#{msg:={F,A}},_) ->
+ io:format(F,A).
+ </code>
+
+ <p>A simple handler which prints to file could be implemented like
+ this:</p>
+ <code>
+-module(myhandler).
+-export([adding_handler/2, removing_handler/1, log/2]).
+-export([init/1, handle_call/3, handle_cast/2, terminate/2]).
+
+adding_handler(Id,Config) ->
+ {ok,Fd} = file:open(File,[append,{encoding,utf8}]),
+ {ok,Config#{myhandler_fd=>Fd}}.
+
+removing_handler(Id,#{myhandler_fd:=Fd}) ->
+ _ = file:close(Fd),
+ ok.
+
+log(#{msg:={report,R}},#{myhandler_fd:=Fd}) ->
+ io:format(Fd,"~p~n",[R]);
+log(#{msg:={string,S}},#{myhandler_fd:=Fd}) ->
+ io:put_chars(Fd,S);
+log(#{msg:={F,A}},#{myhandler_fd:=Fd}) ->
+ io:format(Fd,F,A).
+ </code>
+
+ <p>Note that none of the above handlers have any overload
+ protection, and all log events are printed directly from the
+ client process. Neither do the handlers use the formatter or
+ in any way add time or other metadata to the printed events.</p>
+
+ <p>For examples of overload protection, please refer to the
+ implementation
+ of <seealso marker="logger_std_h"><c>logger_std_h</c></seealso>
+ and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c>
+ </seealso>.</p>
+
+ <p>Below is a simpler example of a handler which logs through one
+ single process, and uses the default formatter to gain a common
+ look of the log events.</p>
+ <p>It also uses the metadata field <c>report_cb</c>, if it exists,
+ to print reports in the way the event issuer suggests. The
+ formatter will normally do this, but if the handler either has
+ an own default (as in this example) or if the
+ given <c>report_cb</c> should not be used at all, then the
+ handler must take care of this itself.</p>
+ <code>
+-module(myhandler).
+-export([adding_handler/2, removing_handler/1, log/2]).
+-export([init/1, handle_call/3, handle_cast/2, terminate/2]).
+
+adding_handler(Id,Config) ->
+ {ok,Pid} = gen_server:start(?MODULE,Config),
+ {ok,Config#{myhandler_pid=>Pid}}.
+
+removing_handler(Id,#{myhandler_pid:=Pid}) ->
+ gen_server:stop(Pid).
+
+log(Log,#{myhandler_pid:=Pid} = Config) ->
+ gen_server:cast(Pid,{log,Log,Config}).
+
+init(#{myhandler_file:=File}) ->
+ {ok,Fd} = file:open(File,[append,{encoding,utf8}]),
+ {ok,#{file=>File,fd=>Fd}}.
+
+handle_call(_,_,State) ->
+ {reply,{error,bad_request},State}.
+
+handle_cast({log,Log,Config},#{fd:=Fd} = State) ->
+ do_log(Fd,Log,Config),
+ {noreply,State}.
+
+terminate(Reason,#{fd:=Fd}) ->
+ _ = file:close(Fd),
+ ok.
+
+do_log(Fd,#{msg:={report,R}} = Log, Config) ->
+ Fun = maps:get(report_cb,Config,fun my_report_cb/1,
+ {F,A} = Fun(R),
+ do_log(Fd,Log#{msg=>{F,A},Config);
+do_log(Fd,Log,#{formatter:={FModule,FConfig}}) ->
+ String = FModule:format(Log,FConfig),
+ io:put_chars(Fd,String).
+
+my_report_cb(R) ->
+ {"~p",[R]}.
+ </code>
+ </section>
+
+ <section>
+ <marker id="overload_protection"/>
+ <title>Protecting the handler from overload</title>
+ <p>In order for the built-in handlers to survive, and stay responsive,
+ during periods of high load (i.e. when huge numbers of incoming
+ log requests must be handled), a mechanism for overload protection
+ has been implemented in the
+ <seealso marker="logger_std_h"><c>logger_std_h</c></seealso>
+ and <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c>
+ </seealso> handler. The mechanism, used by both handlers, works
+ as follows:</p>
+
+ <section>
+ <title>Message queue length</title>
+ <p>The handler process keeps track of the length of its message
+ queue and reacts in different ways depending on the current status.
+ The purpose is to keep the handler in, or (as quickly as possible),
+ get the handler into, a state where it can keep up with the pace
+ of incoming log requests. The memory usage of the handler must never
+ keep growing larger and larger, since that would eventually cause the
+ handler to crash. Three thresholds with associated actions have been
+ defined:</p>
+
+ <taglist>
+ <tag><c>toggle_sync_qlen</c></tag>
+ <item>
+ <p>The default value of this level is <c>10</c> messages,
+ and as long as the length of the message queue is lower, all log
+ requests are handled asynchronously. This simply means that the
+ process sending the log request (by calling a log function in the
+ logger API) does not wait for a response from the handler but
+ continues executing immediately after the request (i.e. it will not
+ be affected by the time it takes the handler to print to the log
+ device). If the message queue grows larger than this value, however,
+ the handler starts handling the log requests synchronously instead,
+ meaning the process sending the request will have to wait for a
+ response. When the handler manages to reduce the message queue to a
+ level below the <c>toggle_sync_qlen</c> threshold, asynchronous
+ operation is resumed. The switch from asynchronous to synchronous
+ mode will force the logging tempo of few busy senders to slow down,
+ but can not protect the handler sufficiently in situations of many
+ concurrent senders.</p>
+ </item>
+ <tag><c>drop_new_reqs_qlen</c></tag>
+ <item>
+ <p>When the message queue has grown larger than this threshold, which
+ defaults to <c>200</c> messages, the handler switches to a mode in
+ which it drops any new requests being made. Dropping a message in
+ this state means that the log function never actually sends a message
+ to the handler. The log call simply returns without an action. When
+ the length of the message queue has been reduced to a level below this
+ threshold, synchronous or asynchronous request handling mode is
+ resumed.</p>
+ </item>
+ <tag><c>flush_reqs_qlen</c></tag>
+ <item>
+ <p>Above this threshold, which defaults to <c>1000</c> messages, a
+ flush operation takes place, in which all messages buffered in the
+ process mailbox get deleted without any logging actually taking
+ place. (Processes waiting for a response from a synchronous log request
+ will receive a reply indicating that the request has been dropped).</p>
+ </item>
+ </taglist>
+
+ <p>For the overload protection algorithm to work properly, it is a
+ requirement that:</p>
+
+ <p><c>toggle_sync_qlen &lt; drop_new_reqs_qlen &lt; flush_reqs_qlen</c></p>
+
+ <p>During high load scenarios, the length of the handler message queue
+ rarely grows in a linear and predictable way. Instead, whenever the
+ handler process gets scheduled in, it can have an almost arbitrary number
+ of messages waiting in the mailbox. It's for this reason that the overload
+ protection mechanism is focused on acting quickly and quite drastically
+ (such as immediately dropping or flushing messages) as soon as a large
+ queue length is detected. </p>
+
+ <p>The thresholds listed above may be modified by the user if, e.g, a handler
+ shouldn't drop or flush messages unless the message queue length grows
+ extremely large. (The handler must be allowed to use large amounts of memory
+ under such circumstances however). Another example of when the user might want
+ to change the settings is if, for performance reasons, the logging processes must
+ never get blocked by synchronous log requests, while dropping or flushing requests
+ is perfectly acceptable (since it doesn't affect the performance of the
+ loggers).</p>
+
+ <p>A configuration example:</p>
+ <code type="none">
+logger:add_handler(my_standard_h, logger_std_h,
+ #{logger_std_h =>
+ #{type => {file,"./system_info.log"},
+ toggle_sync_qlen => 100,
+ drop_new_reqs_qlen => 1000,
+ flush_reqs_qlen => 2000}}).
+ </code>
+ </section>
+
+ <section>
+ <title>Controlling bursts of log requests</title>
+ <p>A potential problem with large bursts of log requests, is that log files
+ may get full or wrapped too quickly (in the latter case overwriting
+ previously logged data that could be of great importance). For this reason,
+ both built-in handlers offer the possibility to set a maximum level of how
+ many requests to process with a certain time frame. With this burst control
+ feature enabled, the handler will take care of bursts of log requests
+ without choking log files, or the console, with massive amounts of
+ printouts. These are the configuration parameters:</p>
+
+ <taglist>
+ <tag><c>enable_burst_limit</c></tag>
+ <item>
+ <p>This is set to <c>true</c> by default. The value <c>false</c>
+ disables the burst control feature.</p>
+ </item>
+ <tag><c>burst_limit_size</c></tag>
+ <item>
+ <p>This is how many requests should be processed within the
+ <c>burst_window_time</c> time frame. After this maximum has been
+ reached, successive requests will be dropped until the end of the
+ time frame. The default value is <c>500</c> messages.</p>
+ </item>
+ <tag><c>burst_window_time</c></tag>
+ <item>
+ <p>The default window is <c>1000</c> milliseconds long.</p>
+ </item>
+ </taglist>
+
+ <p>A configuration example:</p>
+ <code type="none">
+logger:add_handler(my_disk_log_h, logger_disk_log_h,
+ #{disk_log_opts =>
+ #{file => "./my_disk_log"},
+ logger_disk_log_h =>
+ #{burst_limit_size => 10,
+ burst_window_time => 500}}).
+ </code>
+ </section>
+
+ <section>
+ <title>Terminating a large handler</title>
+ <p>A handler process may grow large even if it can manage peaks of high load
+ without crashing. The overload protection mechanism includes user configurable
+ levels for a maximum allowed message queue length and maximum allowed memory
+ usage. This feature is disabled by default, but can be switched on by means
+ of the following configuration parameters:</p>
+
+ <taglist>
+ <tag><c>enable_kill_overloaded</c></tag>
+ <item>
+ <p>This is set to <c>false</c> by default. The value <c>true</c>
+ enables the feature.</p>
+ </item>
+ <tag><c>handler_overloaded_qlen</c></tag>
+ <item>
+ <p>This is the maximum allowed queue length. If the mailbox grows larger
+ than this, the handler process gets terminated.</p>
+ </item>
+ <tag><c>handler_overloaded_mem</c></tag>
+ <item>
+ <p>This is the maximum allowed memory usage of the handler process. If
+ the handler grows any larger, the process gets terminated.</p>
+ </item>
+ <tag><c>handler_restart_after</c></tag>
+ <item>
+ <p>If the handler gets terminated because of its queue length or
+ memory usage, it can get automatically restarted again after a
+ configurable delay time. The time is specified in milliseconds
+ and <c>5000</c> is the default value. The value <c>never</c> can
+ also be set, which prevents a restart.</p>
+ </item>
+ </taglist>
+ </section>
+ </section>
+
+ <section>
+ <title>See Also</title>
+ <p><seealso marker="error_logger"><c>error_logger(3)</c></seealso>,
+ <seealso marker="sasl:sasl_app"><c>SASL(6)</c></seealso></p>
+ </section>
+</chapter>
diff --git a/lib/kernel/doc/src/logger_disk_log_h.xml b/lib/kernel/doc/src/logger_disk_log_h.xml
new file mode 100644
index 0000000000..90cc4fec30
--- /dev/null
+++ b/lib/kernel/doc/src/logger_disk_log_h.xml
@@ -0,0 +1,146 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2017</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>logger_disk_log_h</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>logger_disk_log_h.xml</file>
+ </header>
+ <module>logger_disk_log_h</module>
+ <modulesummary>A disk_log based handler for the Logger
+ application.</modulesummary>
+
+ <description>
+ <p>This is a handler for the Logger application that offers circular
+ (wrapped) logs by using the disk_log application. Multiple instances
+ of this handler can be added to logger, and each instance will print to
+ its own disk_log file, created with the name and settings specified in
+ the handler configuration.</p>
+ <p>The default standard handler,
+ <seealso marker="logger_std_h"><c>logger_std_h</c></seealso>, can be
+ replaced by a disk_log handler at startup of the kernel application.
+ See an example of this below.</p>
+ <p>The handler has an overload protection mechanism that will keep the handler
+ process and the kernel application alive during a high load of log
+ requests. How this feature works, and how to modify the configuration,
+ is described in the
+ <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
+ </seealso>.</p>
+ <p>To add a new instance of the disk_log handler, use
+ <seealso marker="logger#add_handler-3"><c>logger:add_handler/3</c>
+ </seealso>. The handler configuration argument is a map which may contain
+ general configuration parameters, as documented in the
+ <seealso marker="logger_chapter#handler_configuration"><c>User's Guide</c>
+ </seealso>, as well as handler specific parameters.</p>
+ <p>The settings for the disk_log log file should be specified with the
+ key <c>disk_log_opts</c>. These settings are a subset of the disk_log
+ datatype
+ <seealso marker="disk_log#open-1"><c>dlog_option()</c></seealso>.</p>
+ <p>Parameters in the <c>disk_log_opts</c> map:</p>
+ <taglist>
+ <tag><c>file</c></tag>
+ <item>This is the full name of the disk_log log file.</item>
+ <tag><c>type</c></tag>
+ <item>This is the disk_log type, <c>wrap</c> or <c>halt</c>. The
+ default value is <c>wrap</c>.</item>
+ <tag><c>max_no_files</c></tag>
+ <item>This is the maximum number of files that disk_log will use
+ for its circular logging. The default value is <c>10</c>. (The setting
+ has no effect on a halt log).</item>
+ <tag><c>max_no_bytes</c></tag>
+ <item>This is the maximum number of bytes that will be written to
+ a log file before disk_log proceeds with the next file in order (or
+ generates an error in case of a full halt log). The default value for
+ a wrap log is <c>1048576</c> bytes, and <c>infinity</c> for a halt
+ log.</item>
+ </taglist>
+ <p>Specific configuration for the handler (represented as a sub map)
+ is specified with the key <c>logger_disk_log_h</c>. It may contain the
+ following parameter:</p>
+ <taglist>
+ <tag><c>filesync_repeat_interval</c></tag>
+ <item>
+ <p>This value (in milliseconds) specifies how often the handler will
+ do a disk_log sync operation in order to make sure that buffered data
+ gets written to disk. The handler will repeatedly attempt this
+ operation, but only perform it if something has actually been logged
+ since the last sync. The default value is <c>5000</c> milliseconds.
+ If <c>no_repeat</c> is set as value, the repeated sync operation is
+ disabled. The user can also call the
+ <seealso marker="logger_disk_log_h#disk_log_sync-1"><c>disk_log_sync/1</c>
+ </seealso> function to perform a disk_log sync.</p></item>
+ </taglist>
+ <p>There are a number of other configuration parameters available, that are
+ to be used for customizing the overload protection behaviour. The same
+ parameters are used both in the standard handler and the disk_log handler,
+ and are documented in the
+ <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
+ </seealso>.</p>
+ <p>Note that when changing the configuration of the handler in runtime, by
+ calling
+ <seealso marker="logger#set_handler_config-2"><c>logger:set_handler_config/2
+ or logger:set_handler_config/3</c></seealso>, the <c>disk_log_opts</c>
+ settings may not be modified.</p>
+ <p>Example of adding a disk_log handler:</p>
+ <code type="none">
+logger:add_handler(my_disk_log_h, logger_disk_log_h,
+ #{level => error,
+ filter_default => log,
+ disk_log_opts =>
+ #{file => "./my_disk_log",
+ type => wrap,
+ max_no_files => 4,
+ max_no_bytes => 10000},
+ logger_disk_log_h =>
+ #{filesync_repeat_interval => 1000}}).
+ </code>
+ <p>In order to use the disk_log handler instead of the default standard
+ handler when starting en Erlang node, use the kernel configuration parameter
+ <seealso marker="kernel_app#configuration"><c>logger_dest</c></seealso> with
+ value <c>{disk_log,FileName}</c>. Example:</p>
+ <code type="none">
+erl -kernel logger_dest '{disk_log,"./system_disk_log"}'
+ </code>
+ </description>
+
+ <funcs>
+
+ <func>
+ <name name="disk_log_sync" arity="1" clause_i="1"/>
+ <fsummary>Writes buffered data to disk.</fsummary>
+ <desc>
+ <p>Write buffered data to disk.</p>
+ </desc>
+ </func>
+
+ </funcs>
+
+</erlref>
+
+
diff --git a/lib/kernel/doc/src/logger_filters.xml b/lib/kernel/doc/src/logger_filters.xml
new file mode 100644
index 0000000000..d742391e35
--- /dev/null
+++ b/lib/kernel/doc/src/logger_filters.xml
@@ -0,0 +1,191 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2018</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>logger_filters</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>logger_filters.xml</file>
+ </header>
+ <module>logger_filters</module>
+ <modulesummary>Filters to use with logger.</modulesummary>
+
+ <description>
+ <p>Filters to use with logger. All functions exported from this
+ module can be used as logger or handler
+ filters. See <seealso marker="logger#add_logger_filter-2">
+ <c>logger:add_logger_filter/2</c></seealso>
+ and <seealso marker="logger#add_handler_filter-3">
+ <c>logger:add_handler_filter/3</c></seealso>
+ for more information about how filters are added.</p>
+ </description>
+
+ <funcs>
+ <func>
+ <name name="domain" arity="2"/>
+ <fsummary>Filter log events based on the domain field in metadata.</fsummary>
+ <desc>
+ <p>This filter provides a way of filtering log events based on a
+ <c>domain</c> field <c>Metadata</c>.</p>
+
+ <p>The <c><anno>Extra</anno></c> parameter is specified when
+ adding the filter
+ via <seealso marker="logger#add_logger_filter-2">
+ <c>logger:add_logger_filter/2</c></seealso>
+ or <seealso marker="logger#add_handler_filter-3">
+ <c>logger:add_handler_filter/3</c></seealso>.</p>
+
+ <p>The filter compares the value of the <c>domain</c> field
+ in the log event's metadata (<c>Domain</c>)
+ to <c><anno>MatchDomain</anno></c> as follows:</p>
+
+ <taglist>
+ <tag><c><anno>Compare</anno> = starts_with</c></tag>
+ <item><p>The filter matches if <c>MatchDomain</c> is a prefix
+ of <c>Domain</c>.</p></item>
+ <tag><c><anno>Compare</anno> = prefix_of</c></tag>
+ <item><p>The filter matches if <c>Domain</c> is a prefix
+ of <c>MatchDomain</c>.</p></item>
+ <tag><c><anno>Compare</anno> = equals</c></tag>
+ <item><p>The filter matches if <c>Domain</c> is equal
+ to <c>MatchDomain</c>.</p></item>
+ <tag><c><anno>Compare</anno> = no_domain</c></tag>
+ <item><p>The filter matches if there is no domain field in
+ metadata. In this case <c><anno>MatchDomain</anno></c> shall
+ be <c>[]</c>.</p></item>
+ </taglist>
+
+ <p>If the filter matches and <c><anno>Action</anno> =
+ log</c>, the log event is allowed. If the filter matches
+ and <c><anno>Action</anno> = stop</c>, the log event is
+ stopped.</p>
+
+ <p>If the filter does not match, it returns <c>ignore</c>,
+ meaning that other filters, or the value of the
+ configuration parameter <c>filter_default</c>, will decide
+ if the event is allowed or not.</p>
+
+ <p>Log events that do not contain any domain field, will
+ only match when <c><anno>Compare</anno> = no_domain</c>.</p>
+
+ <p>Example: stop all events with
+ domain <c>[beam,erlang,otp,sasl|_]</c></p>
+
+ <code>
+logger:set_handler_config(h1,filter_default,log). % this is the default
+Filter = {fun logger_filters:domain/2,{stop,starts_with,[beam,erlang,otp,sasl]}}.
+logger:add_handler_filter(h1,no_sasl,Filter).
+ok</code>
+ </desc>
+ </func>
+
+ <func>
+ <name name="level" arity="2"/>
+ <fsummary>Filter log events based on the log level.</fsummary>
+ <desc>
+ <p>This filter provides a way of filtering log events based
+ on the log level. It matches log events by comparing the
+ log level with a predefined <c>MatchLevel</c></p>
+
+ <p>The <c><anno>Extra</anno></c> parameter is specified when
+ adding the filter
+ via <seealso marker="logger#add_logger_filter-2">
+ <c>logger:add_logger_filter/2</c></seealso>
+ or <seealso marker="logger#add_handler_filter-3">
+ <c>logger:add_handler_filter/3</c></seealso>.</p>
+
+ <p>The filter compares the value of the event's log level
+ (<c>Level</c>) to <c><anno>MatchLevel</anno></c> by
+ calling <seealso marker="logger#compare_levels-2">
+ <c>logger:compare_levels(Level,MatchLevel) -> CmpRet</c></seealso>. It
+ matches the event if:</p>
+
+ <list>
+ <item><c>CmpRet = eq</c> and <c><anno>Operator</anno> =
+ eq | lteq | gteq</c></item>
+ <item><c>CmpRet = lt</c> and <c><anno>Operator</anno> =
+ lt | lteq | neq</c></item>
+ <item><c>CmpRet = gt</c> and <c><anno>Operator</anno> =
+ gt | gteq | neq</c></item>
+ </list>
+
+ <p>If the filter matches and <c><anno>Action</anno> =
+ log</c>, the log event is allowed. If the filter matches
+ and <c><anno>Action</anno> = stop</c>, the log event is
+ stopped.</p>
+
+ <p>If the filter does not match, it returns <c>ignore</c>,
+ meaning that other filters, or the value of the
+ configuration parameter <c>filter_default</c>, will decide
+ if the event is allowed or not.</p>
+
+ <p>Example: only allow debug level log events</p>
+
+ <code>
+logger:set_handler_config(h1,filter_default,stop).
+Filter = {fun logger_filters:level/2,{log,eq,debug}}.
+logger:add_handler_filter(h1,debug_only,Filter).
+ok</code>
+ </desc>
+ </func>
+
+ <func>
+ <name name="progress" arity="2"/>
+ <fsummary>Filter progress reports from supervisor and application_controller.</fsummary>
+ <desc>
+ <p>This filter matches all progress reports
+ from <c>supervisor</c> and <c>application_controller</c>.</p>
+
+ <p>If <c><anno>Extra</anno> = log</c>, the progress reports
+ are allowed. If <c><anno>Extra</anno> = stop</c>, the
+ progress reports are stopped.</p>
+
+ <p>The filter returns <c>ignore</c> for all other log events.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="remote_gl" arity="2"/>
+ <fsummary>Filter events with group leader on remote node.</fsummary>
+ <desc>
+ <p>This filter matches all events originating from a process
+ that has its group leader on a remote node.</p>
+
+ <p>If <c><anno>Extra</anno> = log</c>, the matching events
+ are allowed. If <c><anno>Extra</anno> = stop</c>, the
+ matching events are stopped.</p>
+
+ <p>The filter returns <c>ignore</c> for all other log events.</p>
+ </desc>
+ </func>
+
+ </funcs>
+
+</erlref>
+
+
diff --git a/lib/kernel/doc/src/logger_formatter.xml b/lib/kernel/doc/src/logger_formatter.xml
new file mode 100644
index 0000000000..6a17e3641f
--- /dev/null
+++ b/lib/kernel/doc/src/logger_formatter.xml
@@ -0,0 +1,157 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2017</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>logger_formatter</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>logger_formatter.xml</file>
+ </header>
+ <module>logger_formatter</module>
+ <modulesummary>Default formatter for the Logger application.</modulesummary>
+
+ <description>
+ <p>Default formatter for the Logger application.</p>
+ </description>
+
+ <datatypes>
+ <datatype>
+ <name name="template"/>
+ <desc>
+ </desc>
+ </datatype>
+ </datatypes>
+
+ <funcs>
+ <func>
+ <name name="format" arity="2"/>
+ <fsummary>Formats the given message.</fsummary>
+ <desc>
+ <p>Formats the given message.</p>
+ <p>The template is a list of atoms, tuples and strings. Atoms
+ can be <c>level</c> or <c>msg</c>, which are placeholders
+ for the severity level and the log message,
+ repectively. Tuples are interpreted as placeholders for
+ metadata. Each element in the tuple must be an atom which
+ matches a key in the nested metadata map, e.g. the
+ tuple <c>{key1,key2}</c> will be replaced by the value of
+ the key2 field in this nested map (the value vill be
+ converted to a string):</p>
+
+<code>
+#{key1=>#{key2=>my_value,
+ ...},
+ ...}</code>
+
+
+ <p> Strings are printed literally.</p>
+
+ <p><c>depth</c> is a positive integer representing the maximum
+ depth to which terms shall be printed by this
+ formatter. Format strings passed to this formatter are
+ rewritten. The format controls ~p and ~w are replaced with
+ ~P and ~W, respectively, and the value is used as the depth
+ parameter. For details, see
+ <seealso marker="stdlib:io#format-2">io:format/2,3</seealso>
+ in STDLIB.</p>
+
+ <p><c>chars_limit</c> is a positive integer representing the
+ value of the option with the same name to be used when calling
+ <seealso marker="stdlib:io#format-3">io:format/3</seealso>. This
+ value limits the total number of characters printed bu the
+ formatter. Notes that this is a soft limit. For a hard
+ truncation limit, see option <c>max_size</c>.</p>
+
+ <p><c>max_size</c> is a positive integer representing the
+ maximum size a string returned from this formatter can
+ have. If the formatted string is longer, after possibly
+ being limited by <c>depth</c> and/or <c>chars_limit</c>, it
+ will be truncated.</p>
+
+ <p><c>utc</c> is a boolean. If set to true, all dates are
+ displayed in Universal Coordinated Time. Default
+ is <c>false</c>.</p>
+
+ <p><c>report_cb</c> must be a function with arity 1,
+ returning <c>{Format,Args}</c>. This function will replace
+ any <c>report_cb</c> found in metadata.</p>
+
+ <p>If <c>single_line=true</c>, all newlines in the message are
+ replaced with <c>", "</c>, and whitespaces following directly
+ after newlines are removed. Note that newlines added by the
+ formatter template are not replaced.</p>
+
+ <p>If <c>legacy_header=true</c> a header field is added to
+ logger_formatter's part of <c>Metadata</c>. The value of
+ this field is a string similar to the header created by the
+ old <c>error_logger</c> event handlers. It can be included
+ in the log event by adding the
+ tuple <c>{logger_formatter,header}</c> to the template.</p>
+
+ <p>The default template when <c>legacy_header=true</c> is</p>
+
+ <code>[{logger_formatter,header},"\n",msg,"\n"]</code>
+
+ <p>which will cause log entries like this:</p>
+
+ <code>=ERROR REPORT==== 29-Dec-2017::13:30:51.245123 ===
+ process: &lt;0.74.0&gt;
+ exit_reason: "Something went wrong"</code>
+
+ <p>Note that all eight levels might occur here, not
+ only <c>ERROR</c>, <c>WARNING</c> or <c>INFO</c>. And also
+ that micro seconds are added at the end of the
+ timestamp.</p>
+
+ <p>The default template when <c>single_line=true</c> is</p>
+
+ <code>[time," ",level,": ",msg,"\n"]</code>
+
+ <p>which will cause log entries like this:</p>
+
+ <code>2017-12-29 13:31:49.640317 error: process: &lt;0.74.0&gt;, exit_reason: "Something went wrong"</code>
+
+ <p>The default template when both <c>legacy_header</c> and
+ <c>single_line</c> are set to false is:</p>
+
+ <code>[time," ",level,":\n",msg,"\n"]</code>
+
+ <p>which will cause log entries like this:</p>
+
+ <code>2017-12-29 13:32:25.191925 error:
+ process: &lt;0.74.0&gt;
+ exit_reason: "Something went wrong"</code>
+
+ </desc>
+ </func>
+
+ </funcs>
+
+</erlref>
+
+
diff --git a/lib/kernel/doc/src/logger_std_h.xml b/lib/kernel/doc/src/logger_std_h.xml
new file mode 100644
index 0000000000..fe9b9ca5a9
--- /dev/null
+++ b/lib/kernel/doc/src/logger_std_h.xml
@@ -0,0 +1,133 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2017</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>logger_std_h</title>
+ <prepared></prepared>
+ <responsible></responsible>
+ <docno></docno>
+ <approved></approved>
+ <checked></checked>
+ <date></date>
+ <rev>A</rev>
+ <file>logger_std_h.xml</file>
+ </header>
+ <module>logger_std_h</module>
+ <modulesummary>Default handler for the Logger application.</modulesummary>
+
+ <description>
+ <p>This is the default handler for the Logger
+ application. Multiple instances of this handler can be added to
+ logger, and each instance will print logs to <c>standard_io</c>,
+ <c>standard_error</c> or to file. The default instance that starts
+ with kernel is named <c>logger_std_h</c> - which is the name to be used
+ for reconfiguration.</p>
+ <p>The handler has an overload protection mechanism that will keep the handler
+ process and the kernel application alive during a high load of log
+ requests. How this feature works, and how to modify the configuration,
+ is described in the
+ <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
+ </seealso>.</p>
+ <p>To add a new instance of the standard handler, use
+ <seealso marker="logger#add_handler-3"><c>logger:add_handler/3</c>
+ </seealso>. The handler configuration argument is a map which may contain
+ general configuration parameters, as documented in the
+ <seealso marker="logger_chapter#handler_configuration"><c>User's Guide</c>
+ </seealso>, as well as handler specific parameters. The specific parameters
+ are stored in a sub map with the key <c>logger_std_h</c>. The following
+ keys and values may be specified:</p>
+ <taglist>
+ <tag><c>type</c></tag>
+ <item>
+ <p>This will have the value <c>standard_io</c>, <c>standard_error</c>,
+ <c>{file,LogFileName}</c>, or <c>{file,LogFileName,LogFileOpts}</c>,
+ where <c>standard_io</c> is the default value for type. It's recommended
+ to not specify <c>LogFileOpts</c> if not absolutely necessary. The
+ default options used by the handler to open a file for logging are:
+ <c>raw</c>, <c>append</c> and <c>delayed_write</c>. The standard
+ handler does not have support for circular logging. Use the
+ <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c>
+ </seealso> handler for this.</p></item>
+ <tag><c>filesync_repeat_interval</c></tag>
+ <item>
+ <p>This value (in milliseconds) specifies how often the handler will
+ do a file sync operation in order to make sure that buffered data gets
+ written to disk. The handler will repeatedly attempt this
+ operation, but only perform it if something has actually been logged
+ since the last sync. The default value is <c>5000</c> milliseconds.
+ If <c>no_repeat</c> is set as value, the repeated file sync operation
+ is disabled, and it will be the operating system settings that determine
+ how quickly or slowly data gets written to disk. The user can also call
+ the <seealso marker="logger_std_h#filesync-1"><c>filesync/1</c></seealso>
+ function to perform a file sync.</p></item>
+ </taglist>
+ <p>There are a number of other configuration parameters available, that are
+ to be used for customizing the overload protection behaviour. The same
+ parameters are used both in the standard handler and the disk_log handler,
+ and are documented in the
+ <seealso marker="logger_chapter#overload_protection"><c>User's Guide</c>
+ </seealso>.</p>
+ <p>Note that when changing the configuration of the handler in runtime, by
+ calling
+ <seealso marker="logger#set_handler_config-2"><c>logger:set_handler_config/2</c>
+ </seealso>, or
+ <seealso marker="logger#set_handler_config-3"><c>logger:set_handler_config/3</c>
+ </seealso>,
+ the <c>type</c> parameter may not be modified.</p>
+ <p>Example of adding a standard handler:</p>
+ <code type="none">
+logger:add_handler(my_standard_h, logger_std_h,
+ #{level => info,
+ filter_default => log,
+ logger_std_h =>
+ #{type => {file,"./system_info.log"},
+ filesync_repeat_interval => 1000}}).
+ </code>
+ <p>In order to configure the default handler (that starts initially with
+ the kernel application) to log to file instead of <c>standard_io</c>,
+ use the kernel configuration parameter
+ <seealso marker="kernel_app#configuration"><c>logger_dest</c></seealso> with
+ value <c>{file,FileName}</c>. Example:</p>
+ <code type="none">
+erl -kernel logger_dest '{file,"./erl.log"}'
+ </code>
+ <p>An example of how to replace the standard handler with a disk_log handler
+ at startup can be found in the manual of
+ <seealso marker="logger_disk_log_h"><c>logger_disk_log_h</c></seealso>.</p>
+ </description>
+
+ <funcs>
+
+ <func>
+ <name name="filesync" arity="1" clause_i="1"/>
+ <fsummary>Writes buffered data to disk.</fsummary>
+ <desc>
+ <p>Write buffered data to disk.</p>
+ </desc>
+ </func>
+
+ </funcs>
+
+</erlref>
+
+
diff --git a/lib/kernel/doc/src/part.xml b/lib/kernel/doc/src/part.xml
new file mode 100644
index 0000000000..68eb4530e2
--- /dev/null
+++ b/lib/kernel/doc/src/part.xml
@@ -0,0 +1,40 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE part SYSTEM "part.dtd">
+
+<part xmlns:xi="http://www.w3.org/2001/XInclude">
+ <header>
+ <copyright>
+ <year>1996</year><year>2017</year>
+ <holder>Ericsson AB. All Rights Reserved.</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ </legalnotice>
+
+ <title>Logger User's Guide</title>
+ <prepared>OTP Team</prepared>
+ <docno></docno>
+ <date>2017-12-01</date>
+ <rev>0.1</rev>
+ <file>part.xml</file>
+ </header>
+ <description>
+ <p>The System Architecture Support Libraries SASL application
+ provides support for alarm handling, release handling, and
+ related functions.</p>
+ </description>
+ <xi:include href="introduction_chapter.xml"/>
+ <xi:include href="logger_chapter.xml"/>
+</part>
+
diff --git a/lib/kernel/doc/src/ref_man.xml b/lib/kernel/doc/src/ref_man.xml
index 5cd77e0f6f..c06914d23d 100644
--- a/lib/kernel/doc/src/ref_man.xml
+++ b/lib/kernel/doc/src/ref_man.xml
@@ -52,6 +52,11 @@
<xi:include href="inet.xml"/>
<xi:include href="inet_res.xml"/>
<xi:include href="init_stub.xml"/>
+ <xi:include href="logger.xml"/>
+ <xi:include href="logger_filters.xml"/>
+ <xi:include href="logger_formatter.xml"/>
+ <xi:include href="logger_std_h.xml"/>
+ <xi:include href="logger_disk_log_h.xml"/>
<xi:include href="net_adm.xml"/>
<xi:include href="net_kernel.xml"/>
<xi:include href="os.xml"/>
diff --git a/lib/kernel/doc/src/specs.xml b/lib/kernel/doc/src/specs.xml
index 29d52f23bb..bcc422930e 100644
--- a/lib/kernel/doc/src/specs.xml
+++ b/lib/kernel/doc/src/specs.xml
@@ -20,6 +20,11 @@
<xi:include href="../specs/specs_inet.xml"/>
<xi:include href="../specs/specs_inet_res.xml"/>
<xi:include href="../specs/specs_init_stub.xml"/>
+ <xi:include href="../specs/specs_logger.xml"/>
+ <xi:include href="../specs/specs_logger_filters.xml"/>
+ <xi:include href="../specs/specs_logger_formatter.xml"/>
+ <xi:include href="../specs/specs_logger_std_h.xml"/>
+ <xi:include href="../specs/specs_logger_disk_log_h.xml"/>
<xi:include href="../specs/specs_net_adm.xml"/>
<xi:include href="../specs/specs_net_kernel.xml"/>
<xi:include href="../specs/specs_os.xml"/>
diff --git a/lib/kernel/include/logger.hrl b/lib/kernel/include/logger.hrl
new file mode 100644
index 0000000000..2143ccd297
--- /dev/null
+++ b/lib/kernel/include/logger.hrl
@@ -0,0 +1,49 @@
+-ifndef(LOGGER_HRL).
+-define(LOGGER_HRL,true).
+-define(LOG_EMERGENCY(A),?DO_LOG(emergency,[A])).
+-define(LOG_EMERGENCY(A,B),?DO_LOG(emergency,[A,B])).
+-define(LOG_EMERGENCY(A,B,C),?DO_LOG(emergency,[A,B,C])).
+
+-define(LOG_ALERT(A),?DO_LOG(alert,[A])).
+-define(LOG_ALERT(A,B),?DO_LOG(alert,[A,B])).
+-define(LOG_ALERT(A,B,C),?DO_LOG(alert,[A,B,C])).
+
+-define(LOG_CRITICAL(A),?DO_LOG(critical,[A])).
+-define(LOG_CRITICAL(A,B),?DO_LOG(critical,[A,B])).
+-define(LOG_CRITICAL(A,B,C),?DO_LOG(critical,[A,B,C])).
+
+-define(LOG_ERROR(A),?DO_LOG(error,[A])).
+-define(LOG_ERROR(A,B),?DO_LOG(error,[A,B])).
+-define(LOG_ERROR(A,B,C),?DO_LOG(error,[A,B,C])).
+
+-define(LOG_WARNING(A),?DO_LOG(warning,[A])).
+-define(LOG_WARNING(A,B),?DO_LOG(warning,[A,B])).
+-define(LOG_WARNING(A,B,C),?DO_LOG(warning,[A,B,C])).
+
+-define(LOG_NOTICE(A),?DO_LOG(notice,[A])).
+-define(LOG_NOTICE(A,B),?DO_LOG(notice,[A,B])).
+-define(LOG_NOTICE(A,B,C),?DO_LOG(notice,[A,B,C])).
+
+-define(LOG_INFO(A),?DO_LOG(info,[A])).
+-define(LOG_INFO(A,B),?DO_LOG(info,[A,B])).
+-define(LOG_INFO(A,B,C),?DO_LOG(info,[A,B,C])).
+
+-define(LOG_DEBUG(A),?DO_LOG(debug,[A])).
+-define(LOG_DEBUG(A,B),?DO_LOG(debug,[A,B])).
+-define(LOG_DEBUG(A,B,C),?DO_LOG(debug,[A,B,C])).
+
+-define(LOCATION,#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY},
+ line=>?LINE,
+ file=>?FILE}).
+
+%%%-----------------------------------------------------------------
+%%% Internal, i.e. not intended for direct use in code - use above
+%%% macros instead!
+-define(DO_LOG(Level,Args),
+ case logger:allow(Level,?MODULE) of
+ true ->
+ apply(logger,macro_log,[?LOCATION,Level|Args]);
+ false ->
+ ok
+ end).
+-endif.
diff --git a/lib/kernel/src/Makefile b/lib/kernel/src/Makefile
index 0bc9f121a0..702845512c 100644
--- a/lib/kernel/src/Makefile
+++ b/lib/kernel/src/Makefile
@@ -109,6 +109,17 @@ MODULES = \
kernel_refc \
local_udp \
local_tcp \
+ logger \
+ logger_backend \
+ logger_config \
+ logger_std_h \
+ logger_disk_log_h \
+ logger_h_common \
+ logger_filters \
+ logger_formatter \
+ logger_server \
+ logger_simple \
+ logger_sup \
net \
net_adm \
net_kernel \
@@ -132,13 +143,14 @@ MODULES = \
HRL_FILES= ../include/file.hrl ../include/inet.hrl ../include/inet_sctp.hrl \
../include/dist.hrl ../include/dist_util.hrl \
- ../include/net_address.hrl
+ ../include/net_address.hrl ../include/logger.hrl
INTERNAL_HRL_FILES= application_master.hrl disk_log.hrl \
erl_epmd.hrl hipe_ext_format.hrl \
inet_dns.hrl inet_res.hrl \
inet_boot.hrl inet_config.hrl inet_int.hrl \
- inet_dns_record_adts.hrl
+ inet_dns_record_adts.hrl \
+ logger_internal.hrl logger_h_common.hrl
ERL_FILES= $(MODULES:%=%.erl)
@@ -223,7 +235,7 @@ release_docs_spec:
# Include dependencies -- list below added by Kostis Sagonas
-$(EBIN)/application_controller.beam: application_master.hrl
+$(EBIN)/application_controller.beam: application_master.hrl ../include/logger.hrl
$(EBIN)/application_master.beam: application_master.hrl
$(EBIN)/auth.beam: ../include/file.hrl
$(EBIN)/code.beam: ../include/file.hrl
@@ -234,6 +246,7 @@ $(EBIN)/disk_log_server.beam: disk_log.hrl
$(EBIN)/dist_util.beam: ../include/dist_util.hrl ../include/dist.hrl
$(EBIN)/erl_boot_server.beam: inet_boot.hrl
$(EBIN)/erl_epmd.beam: inet_int.hrl erl_epmd.hrl
+$(EBIN)/error_logger.beam: logger_internal.hrl ../include/logger.hrl
$(EBIN)/file.beam: ../include/file.hrl file_int.hrl
$(EBIN)/file_io_server.beam: ../include/file.hrl file_int.hrl
$(EBIN)/gen_tcp.beam: inet_int.hrl
@@ -259,6 +272,16 @@ $(EBIN)/inet_udp.beam: inet_int.hrl
$(EBIN)/inet_sctp.beam: inet_int.hrl ../include/inet_sctp.hrl
$(EBIN)/local_udp.beam: inet_int.hrl
$(EBIN)/local_tcp.beam: inet_int.hrl
+$(EBIN)/logger.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_backend.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_config.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_disk_log_h.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl ../include/file.hrl
+$(EBIN)/logger_filters.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_formatter.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_server.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_simple.beam: logger_internal.hrl ../include/logger.hrl
+$(EBIN)/logger_std_h.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl ../include/file.hrl
+$(EBIN)/logger_h_common.beam: logger_h_common.hrl logger_internal.hrl ../include/logger.hrl
$(EBIN)/net_kernel.beam: ../include/net_address.hrl
$(EBIN)/os.beam: ../include/file.hrl
$(EBIN)/ram_file.beam: ../include/file.hrl
diff --git a/lib/kernel/src/application_controller.erl b/lib/kernel/src/application_controller.erl
index 3b642f5873..b9cb722575 100644
--- a/lib/kernel/src/application_controller.erl
+++ b/lib/kernel/src/application_controller.erl
@@ -44,6 +44,7 @@
keyfind/3, keydelete/3, keyreplace/4]).
-include("application_master.hrl").
+-include("logger.hrl").
-define(AC, ?MODULE). % Name of process
@@ -1546,9 +1547,8 @@ do_change_apps(Applications, Config, OldAppls) ->
%% Report errors, but do not terminate
%% (backwards compatible behaviour)
lists:foreach(fun({error, {SysFName, Line, Str}}) ->
- Str2 = lists:flatten(io_lib:format("~tp: ~w: ~ts~n",
- [SysFName, Line, Str])),
- error_logger:format(Str2, [])
+ ?LOG_ERROR("~tp: ~w: ~ts~n",[SysFName, Line, Str],
+ #{error_logger=>#{tag=>error}})
end,
Errors),
@@ -1631,8 +1631,9 @@ make_term(Str) ->
end.
handle_make_term_error(Mod, Reason, Str) ->
- error_logger:format("application_controller: ~ts: ~ts~n",
- [Mod:format_error(Reason), Str]),
+ ?LOG_ERROR("application_controller: ~ts: ~ts~n",
+ [Mod:format_error(Reason), Str],
+ #{error_logger=>#{tag=>error}}),
throw({error, {bad_environment_value, Str}}).
get_env_i(Name, #state{conf_data = ConfData}) when is_list(ConfData) ->
@@ -1913,19 +1914,25 @@ config_error() ->
"configuration file must contain ONE list ended by <dot>"}}.
%%-----------------------------------------------------------------
-%% Info messages sent to error_logger
+%% Info messages sent to logger
%%-----------------------------------------------------------------
info_started(Name, Node) ->
- Rep = [{application, Name},
- {started_at, Node}],
- error_logger:info_report(progress, Rep).
+ ?LOG_INFO(#{label=>{application_controller,progress},
+ report=>[{application, Name},
+ {started_at, Node}]},
+ #{domain=>[beam,erlang,otp,sasl],
+ report_cb=>fun logger:format_otp_report/1,
+ logger_formatter=>#{title=>"PROGRESS REPORT"},
+ error_logger=>#{tag=>info_report,type=>progress}}).
info_exited(Name, Reason, Type) ->
- Rep = [{application, Name},
- {exited, Reason},
- {type, Type}],
- error_logger:info_report(Rep).
-
+ ?LOG_INFO(#{label=>{application_controller,exit},
+ report=>[{application, Name},
+ {exited, Reason},
+ {type, Type}]},
+ #{domain=>[beam,erlang,otp],
+ report_cb=>fun logger:format_otp_report/1,
+ error_logger=>#{tag=>info_report,type=>std_info}}).
%%-----------------------------------------------------------------
%% Reply to all processes waiting this application to be started.
diff --git a/lib/kernel/src/code_server.erl b/lib/kernel/src/code_server.erl
index f5a890cb95..bbfa2a995d 100644
--- a/lib/kernel/src/code_server.erl
+++ b/lib/kernel/src/code_server.erl
@@ -1434,14 +1434,20 @@ all_loaded(Db) ->
-spec error_msg(io:format(), [term()]) -> 'ok'.
error_msg(Format, Args) ->
- Msg = {notify,{error, group_leader(), {self(), Format, Args}}},
- error_logger ! Msg,
+ logger ! {log,error,Format,Args,
+ #{pid=>self(),
+ gl=>group_leader(),
+ time=>erlang:monotonic_time(microsecond),
+ error_logger=>#{tag=>error}}},
ok.
-spec info_msg(io:format(), [term()]) -> 'ok'.
info_msg(Format, Args) ->
- Msg = {notify,{info_msg, group_leader(), {self(), Format, Args}}},
- error_logger ! Msg,
+ logger ! {log,info,Format,Args,
+ #{pid=>self(),
+ gl=>group_leader(),
+ time=>erlang:monotonic_time(microsecond),
+ error_logger=>#{tag=>info_msg}}},
ok.
objfile_extension() ->
diff --git a/lib/kernel/src/error_logger.erl b/lib/kernel/src/error_logger.erl
index 585507c545..0706220a94 100644
--- a/lib/kernel/src/error_logger.erl
+++ b/lib/kernel/src/error_logger.erl
@@ -19,22 +19,23 @@
%%
-module(error_logger).
--export([start/0,start_link/0,format/2,error_msg/1,error_msg/2,error_report/1,
+-include("logger_internal.hrl").
+
+-export([start/0,start_link/0,stop/0,
+ format/2,error_msg/1,error_msg/2,error_report/1,
error_report/2,info_report/1,info_report/2,warning_report/1,
warning_report/2,error_info/1,
info_msg/1,info_msg/2,warning_msg/1,warning_msg/2,
- logfile/1,tty/1,swap_handler/1,
+ logfile/1,tty/1,
add_report_handler/1,add_report_handler/2,
- delete_report_handler/1]).
+ delete_report_handler/1,
+ which_report_handlers/0]).
--export([init/1,
- handle_event/2, handle_call/2, handle_info/2,
- terminate/2]).
+%% logger callbacks
+-export([adding_handler/2, removing_handler/1, log/2]).
-export([get_format_depth/0, limit_term/1]).
--define(buffer_size, 10).
-
%%-----------------------------------------------------------------
%% Types used in this file
%%-----------------------------------------------------------------
@@ -43,8 +44,6 @@
| 'info' | 'info_msg' | 'info_report'
| 'warning_msg' | 'warning_report'.
--type state() :: {non_neg_integer(), non_neg_integer(), [term()]}.
-
%%% BIF
-export([warning_map/0]).
@@ -59,26 +58,137 @@ warning_map() ->
%%-----------------------------------------------------------------
--spec start() -> {'ok', pid()} | {'error', any()}.
+%%%-----------------------------------------------------------------
+%%% Start the event manager process under logger_sup, which is part of
+%%% the kernel application's supervision tree.
+-spec start() -> 'ok' | {'error', any()}.
start() ->
- case gen_event:start({local, error_logger}) of
- {ok, Pid} ->
- simple_logger(?buffer_size),
- {ok, Pid};
- Error -> Error
+ case whereis(?MODULE) of
+ undefined ->
+ ErrorLogger =
+ #{id => ?MODULE,
+ start => {?MODULE, start_link, []},
+ restart => transient,
+ shutdown => 2000,
+ type => worker,
+ modules => dynamic},
+ case supervisor:start_child(logger_sup, ErrorLogger) of
+ {ok,_} ->
+ ok;
+ Error ->
+ Error
+ end;
+ _ ->
+ ok
end.
+%%%-----------------------------------------------------------------
+%%% Start callback specified in child specification to supervisor, see start/0
-spec start_link() -> {'ok', pid()} | {'error', any()}.
start_link() ->
- case gen_event:start_link({local, error_logger}) of
- {ok, Pid} ->
- simple_logger(?buffer_size),
- {ok, Pid};
- Error -> Error
+ gen_event:start_link({local, ?MODULE},
+ [{spawn_opt,[{message_queue_data, off_heap}]}]).
+
+%%%-----------------------------------------------------------------
+%%% Stop the event manager
+-spec stop() -> ok.
+stop() ->
+ _ = supervisor:terminate_child(logger_sup,?MODULE),
+ _ = supervisor:delete_child(logger_sup,?MODULE),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Callbacks for logger
+-spec adding_handler(logger:handler_id(),logger:config()) ->
+ {ok,logger:config()} | {error,term()}.
+adding_handler(?MODULE,Config) ->
+ case start() of
+ ok ->
+ {ok,Config};
+ Error ->
+ Error
end.
+-spec removing_handler(logger:handler_id()) -> ok.
+removing_handler(?MODULE) ->
+ stop(),
+ ok.
+
+-spec log(logger:log(),logger:config()) -> ok.
+log(#{level:=Level,msg:=Msg,meta:=Meta},_Config) ->
+ do_log(Level,Msg,Meta).
+
+do_log(Level,{report,Msg},#{?MODULE:=#{tag:=Tag,type:=Type}}=Meta) ->
+ %% From error_logger:*_report/1,2, or logger call which added
+ %% error_logger data to obtain backwards compatibility with
+ %% error_logger:*_report/1,2
+ Report =
+ case Msg of
+ #{label:=_,report:=R} -> R;
+ _ -> Msg
+ end,
+ notify(Level,Tag,Type,Report,Meta);
+do_log(Level,{report,Msg},#{?MODULE:=#{tag:=Tag}}=Meta) ->
+ {Format,Args} =
+ case Msg of
+ #{label:=_,format:=F,args:=A} ->
+ %% From error_logger:*_msg/1,2.
+ %% In order to be backwards compatible with handling
+ %% of faulty parameters to error_logger:*_msg/1,2,
+ %% don't use report_cb here.
+ {F,A};
+ _ ->
+ %% From logger call which added error_logger data to
+ %% obtain backwards compatibility with error_logger:*_msg/1,2
+ RCBFun=maps:get(report_cb,Meta,fun logger:format_report/1),
+ try RCBFun(Msg) of
+ {F,A} when is_list(F), is_list(A) ->
+ {F,A};
+ Other ->
+ {"REPORT_CB ERROR: ~tp; Returned: ~tp",[Msg,Other]}
+ catch C:R ->
+ {"REPORT_CB CRASH: ~tp; Reason: ~tp",[Msg,{C,R}]}
+ end
+ end,
+ notify(Level,Tag,Format,Args,Meta);
+do_log(Level,{Format,Args},#{?MODULE:=#{tag:=Tag}}=Meta)
+ when is_list(Format), is_list(Args) ->
+ %% From logger call which added error_logger data to obtain
+ %% backwards compatibility with error_logger:*_msg/1,2
+ notify(Level,Tag,Format,Args,Meta);
+do_log(_Level,_Msg,_Meta) ->
+ %% Ignore the rest - i.e. to get backwards compatibility with
+ %% error_logger, you must use the error_logger API for logging.
+ %% Some modules within OTP go around this by adding an
+ %% error_logger field to its metadata. This is done only to allow
+ %% complete backwards compatibility for log events originating
+ %% from within OTP, while still using the new logger interface.
+ ok.
+
+-spec notify(logger:level(), msg_tag(), any(), any(), map()) -> 'ok'.
+notify(Level,Tag0,FormatOrType0,ArgsOrReport,#{pid:=Pid0,gl:=GL,?MODULE:=My}) ->
+ Tag = fix_warning_tag(Level,Tag0),
+ Pid = case maps:get(emulator,My,false) of
+ true -> emulator;
+ _ -> Pid0
+ end,
+ FormatOrType = fix_warning_type(Level,FormatOrType0),
+ gen_event:notify(?MODULE,{Tag,GL,{Pid,FormatOrType,ArgsOrReport}}).
+
+%% This is to fix the case when the client has explicitly added the
+%% error logger tag and type in metadata, and not checked the warning map.
+fix_warning_tag(error,warning_msg) -> error;
+fix_warning_tag(error,warning_report) -> error_report;
+fix_warning_tag(info,warning_msg) -> info_msg;
+fix_warning_tag(info,warning_report) -> info_report;
+fix_warning_tag(_,Tag) -> Tag.
+
+fix_warning_type(error,std_warning) -> std_error;
+fix_warning_type(info,std_warning) -> std_info;
+fix_warning_type(_,Type) -> Type.
+
%%-----------------------------------------------------------------
%% These two simple old functions generate events tagged 'error'
%% Used for simple messages; error or information.
@@ -95,14 +205,18 @@ error_msg(Format) ->
Data :: list().
error_msg(Format, Args) ->
- notify({error, group_leader(), {self(), Format, Args}}).
+ logger:log(error,
+ #{label=>{?MODULE,error_msg},
+ format=>Format,
+ args=>Args},
+ meta(error)).
-spec format(Format, Data) -> 'ok' when
Format :: string(),
Data :: list().
format(Format, Args) ->
- notify({error, group_leader(), {self(), Format, Args}}).
+ error_msg(Format, Args).
%%-----------------------------------------------------------------
%% This functions should be used for error reports. Events
@@ -124,7 +238,10 @@ error_report(Report) ->
Report :: report().
error_report(Type, Report) ->
- notify({error_report, group_leader(), {self(), Type, Report}}).
+ logger:log(error,
+ #{label=>{?MODULE,error_report},
+ report=>Report},
+ meta(error_report,Type)).
%%-----------------------------------------------------------------
%% This function should be used for warning reports.
@@ -146,7 +263,8 @@ warning_report(Report) ->
Report :: report().
warning_report(Type, Report) ->
- {Tag, NType} = case error_logger:warning_map() of
+ Level = error_logger:warning_map(),
+ {Tag, NType} = case Level of
info ->
if
Type =:= std_warning ->
@@ -164,7 +282,10 @@ warning_report(Type, Report) ->
{error_report, Type}
end
end,
- notify({Tag, group_leader(), {self(), NType, Report}}).
+ logger:log(Level,
+ #{label=>{?MODULE,warning_report},
+ report=>Report},
+ meta(Tag,NType)).
%%-----------------------------------------------------------------
%% This function provides similar functions as error_msg for
@@ -183,7 +304,8 @@ warning_msg(Format) ->
Data :: list().
warning_msg(Format, Args) ->
- Tag = case error_logger:warning_map() of
+ Level = error_logger:warning_map(),
+ Tag = case Level of
warning ->
warning_msg;
info ->
@@ -191,7 +313,11 @@ warning_msg(Format, Args) ->
error ->
error
end,
- notify({Tag, group_leader(), {self(), Format, Args}}).
+ logger:log(Level,
+ #{label=>{?MODULE,warning_msg},
+ format=>Format,
+ args=>Args},
+ meta(Tag)).
%%-----------------------------------------------------------------
%% This function should be used for information reports. Events
@@ -210,7 +336,10 @@ info_report(Report) ->
Report :: report().
info_report(Type, Report) ->
- notify({info_report, group_leader(), {self(), Type, Report}}).
+ logger:log(info,
+ #{label=>{?MODULE,info_report},
+ report=>Report},
+ meta(info_report,Type)).
%%-----------------------------------------------------------------
%% This function provides similar functions as error_msg for
@@ -228,7 +357,11 @@ info_msg(Format) ->
Data :: list().
info_msg(Format, Args) ->
- notify({info_msg, group_leader(), {self(), Format, Args}}).
+ logger:log(info,
+ #{label=>{?MODULE,info_msg},
+ format=>Format,
+ args=>Args},
+ meta(info_msg)).
%%-----------------------------------------------------------------
%% Used by the init process. Events are tagged 'info'.
@@ -236,38 +369,75 @@ info_msg(Format, Args) ->
-spec error_info(Error :: any()) -> 'ok'.
+%% unused?
error_info(Error) ->
- notify({info, group_leader(), {self(), Error, []}}).
-
--spec notify({msg_tag(), pid(), {pid(), any(), any()}}) -> 'ok'.
-
-notify(Msg) ->
- gen_event:notify(error_logger, Msg).
-
--type swap_handler_type() :: 'false' | 'silent' | 'tty' | {'logfile', string()}.
--spec swap_handler(Type :: swap_handler_type()) -> any().
-
-swap_handler(tty) ->
- R = gen_event:swap_handler(error_logger, {error_logger, swap},
- {error_logger_tty_h, []}),
- ok = simple_logger(),
- R;
-swap_handler({logfile, File}) ->
- R = gen_event:swap_handler(error_logger, {error_logger, swap},
- {error_logger_file_h, File}),
- ok = simple_logger(),
- R;
-swap_handler(silent) ->
- _ = gen_event:delete_handler(error_logger, error_logger, delete),
- ok = simple_logger();
-swap_handler(false) ->
- ok. % keep primitive event handler as-is
+ {Format,Args} =
+ case string_p(Error) of
+ true -> {Error,[]};
+ false -> {"~p",[Error]}
+ end,
+ MyMeta = #{tag=>info,type=>Error},
+ logger:log(info, Format, Args, #{?MODULE=>MyMeta,domain=>[Error]}).
+
+%%-----------------------------------------------------------------
+%% Create metadata
+meta(Tag) ->
+ meta(Tag,undefined).
+meta(Tag,Type) ->
+ meta(Tag,Type,#{report_cb=>fun report_to_format/1}).
+meta(Tag,undefined,Meta0) ->
+ Meta0#{?MODULE=>#{tag=>Tag}};
+meta(Tag,Type,Meta0) ->
+ maybe_add_domain(Tag,Type,Meta0#{?MODULE=>#{tag=>Tag,type=>Type}}).
+
+%% This is to prevent events of non standard type from being printed
+%% with the standard logger. Similar to how error_logger_tty_h
+%% discards events of non standard type.
+maybe_add_domain(error_report,std_error,Meta) -> Meta;
+maybe_add_domain(info_report,std_info,Meta) -> Meta;
+maybe_add_domain(warning_report,std_warning,Meta) -> Meta;
+maybe_add_domain(_,Type,Meta) -> Meta#{domain=>[Type]}.
+
+%% -----------------------------------------------------------------
+%% Report formatting - i.e. Term => {Format,Args}
+%% This was earlier done in the event handler (error_logger_tty_h, etc)
+%% -----------------------------------------------------------------
+report_to_format(#{label:={?MODULE,_},
+ report:=Report}) when is_map(Report) ->
+ %% logger:format_otp_report does maps:to_list, and for backwards
+ %% compatibility reasons we don't want that.
+ {"~tp\n",[Report]};
+report_to_format(#{label:={?MODULE,_},
+ format:=Format,
+ args:=Args}) ->
+ %% This is not efficient, but needed for backwards compatibility
+ %% in giving faulty arguments to the *_msg functions.
+ try io_lib:scan_format(Format,Args) of
+ _ -> {Format,Args}
+ catch _:_ ->
+ {"ERROR: ~tp - ~tp",[Format,Args]}
+ end;
+report_to_format(Term) ->
+ logger:format_otp_report(Term).
+string_p(List) when is_list(List) ->
+ string_p1(lists:flatten(List));
+string_p(_) ->
+ false.
+
+string_p1([]) ->
+ false;
+string_p1(FlatList) ->
+ io_lib:printable_list(FlatList).
+
+%% -----------------------------------------------------------------
+%% Stuff directly related to the event manager
+%% -----------------------------------------------------------------
-spec add_report_handler(Handler) -> any() when
Handler :: module().
add_report_handler(Module) when is_atom(Module) ->
- gen_event:add_handler(error_logger, Module, []).
+ add_report_handler(Module, []).
-spec add_report_handler(Handler, Args) -> Result when
Handler :: module(),
@@ -275,24 +445,37 @@ add_report_handler(Module) when is_atom(Module) ->
Result :: gen_event:add_handler_ret().
add_report_handler(Module, Args) when is_atom(Module) ->
- gen_event:add_handler(error_logger, Module, Args).
+ _ = logger:add_handler(?MODULE,?MODULE,#{level=>info,filter_default=>log}),
+ gen_event:add_handler(?MODULE, Module, Args).
-spec delete_report_handler(Handler) -> Result when
Handler :: module(),
Result :: gen_event:del_handler_ret().
delete_report_handler(Module) when is_atom(Module) ->
- gen_event:delete_handler(error_logger, Module, []).
-
-%% Start the lowest level error_logger handler with Buffer.
-
-simple_logger(Buffer_size) when is_integer(Buffer_size) ->
- gen_event:add_handler(error_logger, error_logger, Buffer_size).
-
-%% Start the lowest level error_logger handler without Buffer.
+ case whereis(?MODULE) of
+ Pid when is_pid(Pid) ->
+ Return = gen_event:delete_handler(?MODULE, Module, []),
+ case gen_event:which_handlers(?MODULE) of
+ [] ->
+ %% Don't want a lot of logs here if it's not needed
+ _ = logger:remove_handler(?MODULE),
+ ok;
+ _ ->
+ ok
+ end,
+ Return;
+ _ ->
+ ok
+ end.
-simple_logger() ->
- gen_event:add_handler(error_logger, error_logger, []).
+which_report_handlers() ->
+ case whereis(?MODULE) of
+ Pid when is_pid(Pid) ->
+ gen_event:which_handlers(?MODULE);
+ undefined ->
+ []
+ end.
%% Log all errors to File for all eternity
@@ -308,26 +491,35 @@ simple_logger() ->
FilenameReason :: no_log_file.
logfile({open, File}) ->
- case lists:member(error_logger_file_h,
- gen_event:which_handlers(error_logger)) of
+ case lists:member(error_logger_file_h,which_report_handlers()) of
true ->
{error, allready_have_logfile};
_ ->
- gen_event:add_handler(error_logger, error_logger_file_h, File)
+ add_report_handler(error_logger_file_h, File)
end;
logfile(close) ->
- case gen_event:delete_handler(error_logger, error_logger_file_h, normal) of
- {error,Reason} ->
- {error,Reason};
- _ ->
- ok
+ case whereis(?MODULE) of
+ Pid when is_pid(Pid) ->
+ case gen_event:delete_handler(?MODULE, error_logger_file_h, normal) of
+ {error,Reason} ->
+ {error,Reason};
+ _ ->
+ ok
+ end;
+ _ ->
+ {error,module_not_found}
end;
logfile(filename) ->
- case gen_event:call(error_logger, error_logger_file_h, filename) of
- {error,_} ->
- {error, no_log_file};
- Val ->
- Val
+ case whereis(?MODULE) of
+ Pid when is_pid(Pid) ->
+ case gen_event:call(?MODULE, error_logger_file_h, filename) of
+ {error,_} ->
+ {error, no_log_file};
+ Val ->
+ Val
+ end;
+ _ ->
+ {error, no_log_file}
end.
%% Possibly turn off all tty printouts, maybe we only want the errors
@@ -337,193 +529,17 @@ logfile(filename) ->
Flag :: boolean().
tty(true) ->
- Hs = gen_event:which_handlers(error_logger),
- case lists:member(error_logger_tty_h, Hs) of
+ case lists:member(error_logger_tty_h, which_report_handlers()) of
false ->
- gen_event:add_handler(error_logger, error_logger_tty_h, []);
- true ->
+ add_report_handler(error_logger_tty_h, []);
+ true ->
ignore
end,
ok;
tty(false) ->
- gen_event:delete_handler(error_logger, error_logger_tty_h, []),
- ok.
-
+ delete_report_handler(error_logger_tty_h).
-%%% ---------------------------------------------------
-%%% This is the default error_logger handler.
-%%% ---------------------------------------------------
-
--spec init(term()) -> {'ok', state() | []}.
-
-init(Max) when is_integer(Max) ->
- {ok, {Max, 0, []}};
-%% This one is called if someone took over from us, and now wants to
-%% go back.
-init({go_back, _PostState}) ->
- {ok, {?buffer_size, 0, []}};
-init(_) ->
- %% The error logger process may receive a huge amount of
- %% messages. Make sure that they are stored off heap to
- %% avoid exessive GCs.
- process_flag(message_queue_data, off_heap),
- {ok, []}.
-
--spec handle_event(term(), state()) -> {'ok', state()}.
-
-handle_event({Type, GL, Msg}, State) when node(GL) =/= node() ->
- gen_event:notify({error_logger, node(GL)},{Type, GL, Msg}),
- %% handle_event2({Type, GL, Msg}, State); %% Shall we do something
- {ok, State}; %% at this node too ???
-handle_event({info_report, _, {_, Type, _}}, State) when Type =/= std_info ->
- {ok, State}; %% Ignore other info reports here
-handle_event(Event, State) ->
- handle_event2(Event, State).
-
--spec handle_info(term(), state()) -> {'ok', state()}.
-
-handle_info({emulator, GL, Chars}, State) when node(GL) =/= node() ->
- {error_logger, node(GL)} ! {emulator, GL, add_node(Chars,self())},
- {ok, State};
-handle_info({emulator, GL, Chars}, State) ->
- handle_event2({emulator, GL, Chars}, State);
-handle_info(_, State) ->
- {ok, State}.
-
--spec handle_call(term(), state()) -> {'ok', {'error', 'bad_query'}, state()}.
-
-handle_call(_Query, State) -> {ok, {error, bad_query}, State}.
-
--spec terminate(term(), state()) -> {'error_logger', [term()]}.
-
-terminate(swap, {_, 0, Buff}) ->
- {error_logger, Buff};
-terminate(swap, {_, Lost, Buff}) ->
- Myevent = {info, group_leader(), {self(), {lost_messages, Lost}, []}},
- {error_logger, [tag_event(Myevent)|Buff]};
-terminate(_, _) ->
- {error_logger, []}.
-
-handle_event2(Event, {1, Lost, Buff}) ->
- display(tag_event(Event)),
- {ok, {1, Lost+1, Buff}};
-handle_event2(Event, {N, Lost, Buff}) ->
- Tagged = tag_event(Event),
- display(Tagged),
- {ok, {N-1, Lost, [Tagged|Buff]}};
-handle_event2(_, State) ->
- {ok, State}.
-
-tag_event(Event) ->
- {erlang:localtime(), Event}.
-
-display({Tag,{error,_,{_,Format,Args}}}) ->
- display2(Tag,Format,Args);
-display({Tag,{error_report,_,{_,Type,Report}}}) ->
- display2(Tag,Type,Report);
-display({Tag,{info_report,_,{_,Type,Report}}}) ->
- display2(Tag,Type,Report);
-display({Tag,{info,_,{_,Error,_}}}) ->
- display2(Tag,Error,[]);
-display({Tag,{info_msg,_,{_,Format,Args}}}) ->
- display2(Tag,Format,Args);
-display({Tag,{warning_report,_,{_,Type,Report}}}) ->
- display2(Tag,Type,Report);
-display({Tag,{warning_msg,_,{_,Format,Args}}}) ->
- display2(Tag,Format,Args);
-display({Tag,{emulator,_,Chars}}) ->
- display2(Tag,Chars,[]).
-
-add_node(X, Pid) when is_atom(X) ->
- add_node(atom_to_list(X), Pid);
-add_node(X, Pid) ->
- lists:concat([X,"** at node ",node(Pid)," **~n"]).
-
-%% Can't do io_lib:format
-
-display2({{_Y,_Mo,_D},{_H,_Mi,_S}} = Date, F, A) ->
- display_date(Date),
- display3(string_p(F), F, A).
-
-display_date({{Y,Mo,D},{H,Mi,S}}) ->
- erlang:display_string(
- integer_to_list(Y) ++ "-" ++
- two_digits(Mo) ++ "-" ++
- two_digits(D) ++ " " ++
- two_digits(H) ++ ":" ++
- two_digits(Mi) ++ ":" ++
- two_digits(S) ++ " ").
-
-two_digits(N) when 0 =< N, N =< 9 ->
- [$0, $0 + N];
-two_digits(N) ->
- integer_to_list(N).
-
-display3(true, F, A) ->
- %% Format string with arguments
- erlang:display_string(F ++ "\n"),
- [begin
- erlang:display_string("\t"),
- erlang:display(Arg)
- end || Arg <- A],
- ok;
-display3(false, Atom, A) when is_atom(Atom) ->
- %% The widest atom seems to be 'supervisor_report' at 17.
- ColumnWidth = 20,
- AtomString = atom_to_list(Atom),
- AtomLength = length(AtomString),
- Padding = lists:duplicate(ColumnWidth - AtomLength, $\s),
- erlang:display_string(AtomString ++ Padding),
- display4(A);
-display3(_, F, A) ->
- erlang:display({F, A}).
-
-display4([A, []]) ->
- %% Not sure why crash reports look like this.
- display4(A);
-display4(A = [_|_]) ->
- case lists:all(fun({Key,_Value}) -> is_atom(Key); (_) -> false end, A) of
- true ->
- erlang:display_string("\n"),
- lists:foreach(
- fun({Key, Value}) ->
- erlang:display_string(
- " " ++
- atom_to_list(Key) ++
- ": "),
- erlang:display(Value)
- end, A);
- false ->
- erlang:display(A)
- end;
-display4(A) ->
- erlang:display(A).
-
-string_p([]) ->
- false;
-string_p(Term) ->
- string_p1(Term).
-
-string_p1([H|T]) when is_integer(H), H >= $\040, H =< $\176 ->
- string_p1(T);
-string_p1([H|T]) when is_integer(H), H >= 16#A0, H < 16#D800;
- is_integer(H), H > 16#DFFF, H < 16#FFFE;
- is_integer(H), H > 16#FFFF, H =< 16#10FFFF ->
- string_p1(T);
-string_p1([$\n|T]) -> string_p1(T);
-string_p1([$\r|T]) -> string_p1(T);
-string_p1([$\t|T]) -> string_p1(T);
-string_p1([$\v|T]) -> string_p1(T);
-string_p1([$\b|T]) -> string_p1(T);
-string_p1([$\f|T]) -> string_p1(T);
-string_p1([$\e|T]) -> string_p1(T);
-string_p1([H|T]) when is_list(H) ->
- case string_p1(H) of
- true -> string_p1(T);
- _ -> false
- end;
-string_p1([]) -> true;
-string_p1(_) -> false.
+%%%-----------------------------------------------------------------
-spec limit_term(term()) -> term().
@@ -536,9 +552,4 @@ limit_term(Term) ->
-spec get_format_depth() -> 'unlimited' | pos_integer().
get_format_depth() ->
- case application:get_env(kernel, error_logger_format_depth) of
- {ok, Depth} when is_integer(Depth) ->
- max(10, Depth);
- undefined ->
- unlimited
- end.
+ logger:get_format_depth().
diff --git a/lib/kernel/src/kernel.app.src b/lib/kernel/src/kernel.app.src
index 82a3571da9..afffcd156e 100644
--- a/lib/kernel/src/kernel.app.src
+++ b/lib/kernel/src/kernel.app.src
@@ -60,6 +60,17 @@
kernel_refc,
local_tcp,
local_udp,
+ logger,
+ logger_backend,
+ logger_config,
+ logger_disk_log_h,
+ logger_filters,
+ logger_formatter,
+ logger_h_common,
+ logger_server,
+ logger_simple,
+ logger_std_h,
+ logger_sup,
net,
net_adm,
net_kernel,
@@ -117,6 +128,8 @@
kernel_config,
kernel_refc,
kernel_sup,
+ logger,
+ logger_sup,
net_kernel,
net_sup,
rex,
@@ -127,7 +140,7 @@
inet_db,
pg2]},
{applications, []},
- {env, [{error_logger, tty}]},
+ {env, []},
{mod, {kernel, []}},
{runtime_dependencies, ["erts-10.0", "stdlib-3.5", "sasl-3.0"]}
]
diff --git a/lib/kernel/src/kernel.appup.src b/lib/kernel/src/kernel.appup.src
index 4ee497bbbd..305a1c788c 100644
--- a/lib/kernel/src/kernel.appup.src
+++ b/lib/kernel/src/kernel.appup.src
@@ -18,9 +18,11 @@
%% %CopyrightEnd%
{"%VSN%",
%% Up from - max one major revision back
- [{<<"5\\.[0-3](\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-19.*, OTP-20.0
- {<<"5\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-20.1+
+ [{<<"5\\.3(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.0
+ {<<"5\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.1+
+ {<<"6\\.0(\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-21
%% Down to - max one major revision back
- [{<<"5\\.[0-3](\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-19.*, OTP-20.0
- {<<"5\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-20.1+
+ [{<<"5\\.3(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.0
+ {<<"5\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.1+
+ {<<"6\\.0(\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-21
}.
diff --git a/lib/kernel/src/kernel.erl b/lib/kernel/src/kernel.erl
index 0382764b39..20aa47f602 100644
--- a/lib/kernel/src/kernel.erl
+++ b/lib/kernel/src/kernel.erl
@@ -40,8 +40,7 @@ start(_, []) ->
ok = gen_event:add_handler(erl_signal_server, erl_signal_handler, [])
end,
%% add error handler
- Type = get_error_logger_type(),
- case error_logger:swap_handler(Type) of
+ case logger:setup_standard_handler() of
ok -> {ok, Pid, []};
Error ->
%% Not necessary since the node will crash anyway:
@@ -62,16 +61,6 @@ config_change(Changed, New, Removed) ->
do_global_groups_change(Changed, New, Removed),
ok.
-get_error_logger_type() ->
- case application:get_env(kernel, error_logger) of
- {ok, tty} -> tty;
- {ok, {file, File}} when is_list(File) -> {logfile, File};
- {ok, false} -> false;
- {ok, silent} -> silent;
- undefined -> tty; % default value
- {ok, Bad} -> exit({bad_config, {kernel, {error_logger, Bad}}})
- end.
-
%%%-----------------------------------------------------------------
%%% The process structure in kernel is as shown in the figure.
%%%
@@ -153,9 +142,18 @@ init([]) ->
type => supervisor,
modules => [?MODULE]},
+
+ LoggerSup = #{id => logger_sup,
+ start => {logger_sup, start_link, []},
+ restart => permanent,
+ shutdown => infinity,
+ type => supervisor,
+ modules => [logger_sup]},
+
case init:get_argument(mode) of
{ok, [["minimal"]]} ->
- {ok, {SupFlags, [Code, File, StdError, User, Config, RefC, SafeSup]}};
+ {ok, {SupFlags,
+ [Code, File, StdError, User, Config, RefC, SafeSup, LoggerSup]}};
_ ->
Rpc = #{id => rex,
start => {rpc, start_link, []},
@@ -206,7 +204,7 @@ init([]) ->
{ok, {SupFlags,
[Code, Rpc, Global, InetDb | DistAC] ++
[NetSup, GlGroup, File, SigSrv,
- StdError, User, Config, RefC, SafeSup] ++ Timer}}
+ StdError, User, Config, RefC, SafeSup, LoggerSup] ++ Timer}}
end;
init(safe) ->
SupFlags = #{strategy => one_for_one,
diff --git a/lib/kernel/src/logger.erl b/lib/kernel/src/logger.erl
new file mode 100644
index 0000000000..943ef8c2d1
--- /dev/null
+++ b/lib/kernel/src/logger.erl
@@ -0,0 +1,803 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger).
+
+%% Log interface
+-export([emergency/1,emergency/2,emergency/3,
+ alert/1,alert/2,alert/3,
+ critical/1,critical/2,critical/3,
+ error/1,error/2,error/3,
+ warning/1,warning/2,warning/3,
+ notice/1,notice/2,notice/3,
+ info/1,info/2,info/3,
+ debug/1,debug/2,debug/3]).
+-export([log/2,log/3,log/4]).
+
+%% Called by macro
+-export([allow/2,macro_log/3,macro_log/4,macro_log/5,add_default_metadata/1]).
+
+%% Configuration
+-export([add_handler/3, remove_handler/1,
+ add_logger_filter/2, add_handler_filter/3,
+ remove_logger_filter/1, remove_handler_filter/2,
+ set_module_level/2, reset_module_level/1,
+ set_logger_config/1, set_logger_config/2,
+ set_handler_config/2, set_handler_config/3,
+ get_logger_config/0, get_handler_config/1]).
+
+%% Misc
+-export([compare_levels/2]).
+-export([set_process_metadata/1, unset_process_metadata/0,
+ get_process_metadata/0]).
+-export([i/0, i/1]).
+-export([setup_standard_handler/0, replace_simple_handler/3]).
+-export([limit_term/1, get_format_depth/0, get_max_size/0, get_utc_config/0]).
+
+%% Basic report formatting
+-export([format_report/1, format_otp_report/1]).
+
+-export([internal_log/2,filter_stacktrace/2]).
+
+-include("logger_internal.hrl").
+-include("logger.hrl").
+
+%%%-----------------------------------------------------------------
+%%% Types
+-type log() :: #{level=>level(),
+ msg=>{io:format(),[term()]} |
+ {report,report()} |
+ {string,unicode:chardata()},
+ meta=>metadata()}.
+-type level() :: emergency | alert | critical | error |
+ warning | notice | info | debug.
+-type report() :: map() | [{atom(),term()}].
+-type msg_fun() :: fun((term()) -> {io:format(),[term()]} |
+ report() |
+ unicode:chardata()).
+-type metadata() :: map().
+
+-type handler_id() :: atom().
+-type filter_id() :: atom().
+-type filter() :: {fun((log(),term()) -> filter_return()),term()}.
+-type filter_return() :: stop | ignore | log().
+-type config() :: map().
+
+-export_type([log/0,level/0,report/0,msg_fun/0,metadata/0,config/0,handler_id/0,
+ filter_id/0,filter/0,filter_return/0]).
+
+%%%-----------------------------------------------------------------
+%%% API
+emergency(X) ->
+ log(emergency,X).
+emergency(X,Y) ->
+ log(emergency,X,Y).
+emergency(X,Y,Z) ->
+ log(emergency,X,Y,Z).
+
+alert(X) ->
+ log(alert,X).
+alert(X,Y) ->
+ log(alert,X,Y).
+alert(X,Y,Z) ->
+ log(alert,X,Y,Z).
+
+critical(X) ->
+ log(critical,X).
+critical(X,Y) ->
+ log(critical,X,Y).
+critical(X,Y,Z) ->
+ log(critical,X,Y,Z).
+
+error(X) ->
+ log(error,X).
+error(X,Y) ->
+ log(error,X,Y).
+error(X,Y,Z) ->
+ log(error,X,Y,Z).
+
+warning(X) ->
+ log(warning,X).
+warning(X,Y) ->
+ log(warning,X,Y).
+warning(X,Y,Z) ->
+ log(warning,X,Y,Z).
+
+notice(X) ->
+ log(notice,X).
+notice(X,Y) ->
+ log(notice,X,Y).
+notice(X,Y,Z) ->
+ log(notice,X,Y,Z).
+
+info(X) ->
+ log(info,X).
+info(X,Y) ->
+ log(info,X,Y).
+info(X,Y,Z) ->
+ log(info,X,Y,Z).
+
+debug(X) ->
+ log(debug,X).
+debug(X,Y) ->
+ log(debug,X,Y).
+debug(X,Y,Z) ->
+ log(debug,X,Y,Z).
+
+-spec log(Level,StringOrReport) -> ok when
+ Level :: level(),
+ StringOrReport :: unicode:chardata() | report().
+log(Level, StringOrReport) ->
+ do_log(Level,StringOrReport,#{}).
+
+-spec log(Level,StringOrReport,Metadata) -> ok when
+ Level :: level(),
+ StringOrReport :: unicode:chardata() | report(),
+ Metadata :: metadata();
+ (Level,Format,Args) -> ok when
+ Level :: level(),
+ Format :: io:format(),
+ Args ::[term()];
+ (Level,Fun,FunArgs) -> ok when
+ Level :: level(),
+ Fun :: msg_fun(),
+ FunArgs :: term().
+log(Level, StringOrReport, Metadata)
+ when is_map(Metadata), not is_function(StringOrReport) ->
+ do_log(Level,StringOrReport,Metadata);
+log(Level, FunOrFormat, Args) ->
+ do_log(Level,{FunOrFormat,Args},#{}).
+
+-spec log(Level,Format, Args, Metadata) -> ok when
+ Level :: level(),
+ Format :: io:format(),
+ Args :: [term()],
+ Metadata :: metadata();
+ (Level,Fun,FunArgs,Metadata) -> ok when
+ Level :: level(),
+ Fun :: msg_fun(),
+ FunArgs :: term(),
+ Metadata :: metadata().
+log(Level, FunOrFormat, Args, Metadata) ->
+ do_log(Level,{FunOrFormat,Args},Metadata).
+
+-spec allow(Level,Module) -> boolean() when
+ Level :: level(),
+ Module :: module().
+allow(Level,Module) when ?IS_LEVEL(Level), is_atom(Module) ->
+ logger_config:allow(?LOGGER_TABLE,Level,Module).
+
+
+-spec macro_log(Location,Level,StringOrReport) -> ok when
+ Location :: map(),
+ Level :: level(),
+ StringOrReport :: unicode:chardata() | report().
+macro_log(Location,Level,StringOrReport) ->
+ log_allowed(Location,Level,StringOrReport,#{}).
+
+-spec macro_log(Location,Level,StringOrReport,Meta) -> ok when
+ Location :: map(),
+ Level :: level(),
+ StringOrReport :: unicode:chardata() | report(),
+ Meta :: metadata();
+ (Location,Level,Format,Args) -> ok when
+ Location :: map(),
+ Level :: level(),
+ Format :: io:format(),
+ Args ::[term()];
+ (Location,Level,Fun,FunArgs) -> ok when
+ Location :: map(),
+ Level :: level(),
+ Fun :: msg_fun(),
+ FunArgs :: term().
+macro_log(Location,Level,StringOrReport,Meta)
+ when is_map(Meta), not is_function(StringOrReport) ->
+ log_allowed(Location,Level,StringOrReport,Meta);
+macro_log(Location,Level,FunOrFormat,Args) ->
+ log_allowed(Location,Level,{FunOrFormat,Args},#{}).
+
+-spec macro_log(Location,Level,Format,Args,Meta) -> ok when
+ Location :: map(),
+ Level :: level(),
+ Format :: io:format(),
+ Args ::[term()],
+ Meta :: metadata();
+ (Location,Level,Fun,FunArgs,Meta) -> ok when
+ Location :: map(),
+ Level :: level(),
+ Fun :: msg_fun(),
+ FunArgs :: term(),
+ Meta :: metadata().
+macro_log(Location,Level,FunOrFormat,Args,Meta) ->
+ log_allowed(Location,Level,{FunOrFormat,Args},Meta).
+
+-spec format_otp_report(Report) -> FormatArgs when
+ Report :: report(),
+ FormatArgs :: {io:format(),[term()]}.
+format_otp_report(#{label:=_,report:=Report}) ->
+ format_report(Report);
+format_otp_report(Report) ->
+ format_report(Report).
+
+-spec format_report(Report) -> FormatArgs when
+ Report :: report(),
+ FormatArgs :: {io:format(),[term()]}.
+format_report(Report) when is_map(Report) ->
+ format_report(maps:to_list(Report));
+format_report(Report) when is_list(Report) ->
+ case lists:flatten(Report) of
+ [] ->
+ {"~tp",[[]]};
+ FlatList ->
+ case string_p1(FlatList) of
+ true ->
+ {"~ts",[FlatList]};
+ false ->
+ format_term_list(Report,[],[])
+ end
+ end;
+format_report(Report) ->
+ {"~tp",[Report]}.
+
+format_term_list([{Tag,Data}|T],Format,Args) ->
+ PorS = case string_p(Data) of
+ true -> "s";
+ false -> "p"
+ end,
+ format_term_list(T,[" ~tp: ~t"++PorS|Format],[Data,Tag|Args]);
+format_term_list([Data|T],Format,Args) ->
+ format_term_list(T,[" ~tp"|Format],[Data|Args]);
+format_term_list([],Format,Args) ->
+ {lists:flatten(lists:join($\n,lists:reverse(Format))),lists:reverse(Args)}.
+
+string_p(List) when is_list(List) ->
+ string_p1(lists:flatten(List));
+string_p(_) ->
+ false.
+
+string_p1([]) ->
+ false;
+string_p1(FlatList) ->
+ io_lib:printable_unicode_list(FlatList).
+
+internal_log(Level,Term) when is_atom(Level) ->
+ erlang:display_string("Logger - "++ atom_to_list(Level) ++ ": "),
+ erlang:display(Term).
+
+%%%-----------------------------------------------------------------
+%%% Configuration
+-spec add_logger_filter(FilterId,Filter) -> ok | {error,term()} when
+ FilterId :: filter_id(),
+ Filter :: filter().
+add_logger_filter(FilterId,Filter) ->
+ logger_server:add_filter(logger,{FilterId,Filter}).
+
+-spec add_handler_filter(HandlerId,FilterId,Filter) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ FilterId :: filter_id(),
+ Filter :: filter().
+add_handler_filter(HandlerId,FilterId,Filter) ->
+ logger_server:add_filter(HandlerId,{FilterId,Filter}).
+
+
+-spec remove_logger_filter(FilterId) -> ok | {error,term()} when
+ FilterId :: filter_id().
+remove_logger_filter(FilterId) ->
+ logger_server:remove_filter(logger,FilterId).
+
+-spec remove_handler_filter(HandlerId,FilterId) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ FilterId :: filter_id().
+remove_handler_filter(HandlerId,FilterId) ->
+ logger_server:remove_filter(HandlerId,FilterId).
+
+-spec add_handler(HandlerId,Module,Config) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ Module :: module(),
+ Config :: config().
+add_handler(HandlerId,Module,Config) ->
+ logger_server:add_handler(HandlerId,Module,Config).
+
+-spec remove_handler(HandlerId) -> ok | {error,term()} when
+ HandlerId :: handler_id().
+remove_handler(HandlerId) ->
+ logger_server:remove_handler(HandlerId).
+
+-spec set_logger_config(Key,Value) -> ok | {error,term()} when
+ Key :: atom(),
+ Value :: term().
+set_logger_config(Key,Value) ->
+ logger_server:set_config(logger,Key,Value).
+
+-spec set_logger_config(Config) -> ok | {error,term()} when
+ Config :: config().
+set_logger_config(Config) ->
+ logger_server:set_config(logger,Config).
+
+-spec set_handler_config(HandlerId,Key,Value) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ Key :: atom(),
+ Value :: term().
+set_handler_config(HandlerId,Key,Value) ->
+ logger_server:set_config(HandlerId,Key,Value).
+
+-spec set_handler_config(HandlerId,Config) -> ok | {error,term()} when
+ HandlerId :: handler_id(),
+ Config :: config().
+set_handler_config(HandlerId,Config) ->
+ logger_server:set_config(HandlerId,Config).
+
+-spec get_logger_config() -> {ok,Config} when
+ Config :: config().
+get_logger_config() ->
+ logger_config:get(?LOGGER_TABLE,logger).
+
+-spec get_handler_config(HandlerId) -> {ok,{Module,Config}} | {error,term()} when
+ HandlerId :: handler_id(),
+ Module :: module(),
+ Config :: config().
+get_handler_config(HandlerId) ->
+ logger_config:get(?LOGGER_TABLE,HandlerId).
+
+-spec set_module_level(Module,Level) -> ok | {error,term()} when
+ Module :: module(),
+ Level :: level().
+set_module_level(Module,Level) ->
+ logger_server:set_module_level(Module,Level).
+
+-spec reset_module_level(Module) -> ok | {error,term()} when
+ Module :: module().
+reset_module_level(Module) ->
+ logger_server:reset_module_level(Module).
+
+%%%-----------------------------------------------------------------
+%%% Misc
+-spec compare_levels(Level1,Level2) -> eq | gt | lt when
+ Level1 :: level(),
+ Level2 :: level().
+compare_levels(Level,Level) when ?IS_LEVEL(Level) ->
+ eq;
+compare_levels(Level1,Level2) when ?IS_LEVEL(Level1), ?IS_LEVEL(Level2) ->
+ Int1 = logger_config:level_to_int(Level1),
+ Int2 = logger_config:level_to_int(Level2),
+ if Int1 < Int2 -> gt;
+ true -> lt
+ end;
+compare_levels(Level1,Level2) ->
+ erlang:error(badarg,[Level1,Level2]).
+
+-spec set_process_metadata(Meta) -> ok when
+ Meta :: metadata().
+set_process_metadata(Meta) when is_map(Meta) ->
+ _ = put(?LOGGER_META_KEY,Meta),
+ ok;
+set_process_metadata(Meta) ->
+ erlang:error(badarg,[Meta]).
+
+-spec get_process_metadata() -> Meta | undefined when
+ Meta :: metadata().
+get_process_metadata() ->
+ get(?LOGGER_META_KEY).
+
+-spec unset_process_metadata() -> ok.
+unset_process_metadata() ->
+ _ = erase(?LOGGER_META_KEY),
+ ok.
+
+-spec i() -> #{logger=>config(),
+ handlers=>[{handler_id(),module(),config()}],
+ module_levels=>[{module(),level()}]}.
+i() ->
+ i(term).
+
+-spec i(term) -> #{logger=>config(),
+ handlers=>[{handler_id(),module(),config()}],
+ module_levels=>[{module(),level()}]};
+ (print) -> ok;
+ (string) -> iolist().
+i(_Action = print) ->
+ io:put_chars(i(string));
+i(_Action = string) ->
+ #{logger := #{level := Level, handlers := Handlers,
+ filters := Filters, filter_default := FilterDefault},
+ handlers := HandlerConfigs,
+ module_levels := Modules} = i(term),
+ [io_lib:format("Current logger configuration:~n", []),
+ io_lib:format(" Level: ~p~n",[Level]),
+ io_lib:format(" Filter Default: ~p~n", [FilterDefault]),
+ io_lib:format(" Filters: ~n", []),
+ print_filters(4, Filters),
+ io_lib:format(" Handlers: ~n", []),
+ print_handlers([C || {Id, _, _} = C <- HandlerConfigs,
+ lists:member(Id, Handlers)]),
+ io_lib:format(" Level set per module: ~n", []),
+ print_module_levels(Modules)
+ ];
+i(_Action = term) ->
+ {Logger, Handlers, Modules} = logger_config:get(tid()),
+ #{logger=>Logger,
+ handlers=>Handlers,
+ module_levels=>Modules}.
+
+print_filters(Indent, {Id, {Fun, Config}}) ->
+ io_lib:format("~sId: ~p~n"
+ "~s Fun: ~p~n"
+ "~s Config: ~p~n",[Indent, Id, Indent, Fun, Indent, Config]);
+print_filters(Indent, Filters) ->
+ IndentStr = io_lib:format("~.*s",[Indent, ""]),
+ lists:map(fun(Filter) ->print_filters(IndentStr, Filter) end, Filters).
+
+
+print_handlers({Id,Module,
+ #{level := Level,
+ filters := Filters, filter_default := FilterDefault,
+ formatter := {FormatterModule,FormatterConfig}} = Config}) ->
+ MyKeys = [filter_default, filters, formatter, level, id],
+ UnhandledConfig = maps:filter(fun(Key, _) ->
+ not lists:member(Key, MyKeys)
+ end, Config),
+ Unhandled = lists:map(fun({Key, Value}) ->
+ io_lib:format(" ~p: ~p~n",[Key, Value])
+ end, maps:to_list(UnhandledConfig)),
+ io_lib:format(" Id: ~p~n"
+ " Module: ~p~n"
+ " Level: ~p~n"
+ " Formatter:~n"
+ " Module: ~p~n"
+ " Config: ~p~n"
+ " Filter Default: ~p~n"
+ " Filters:~n~s"
+ " Handler Config:~n"
+ "~s"
+ "",[Id, Module, Level, FormatterModule, FormatterConfig,
+ FilterDefault, print_filters(8, Filters), Unhandled]);
+print_handlers(Handlers) ->
+ lists:map(fun print_handlers/1, Handlers).
+
+print_module_levels({Module,Level}) ->
+ io_lib:format(" Module: ~p~n"
+ " Level: ~p~n",
+ [Module,Level]);
+print_module_levels(ModuleLevels) ->
+ lists:map(fun print_module_levels/1, ModuleLevels).
+
+-spec setup_standard_handler() -> ok | {error,term()}.
+setup_standard_handler() ->
+ case get_logger_type() of
+ {ok,silent} ->
+ Level = get_logger_level(),
+ ok = set_logger_config(level,Level),
+ remove_handler(logger_simple);
+ {ok,Type} ->
+ Level = get_logger_level(),
+ ok = set_logger_config(level,Level),
+ Filters = get_logger_filters(),
+ setup_standard_handler(Type,#{level=>Level,
+ filter_default=>stop,
+ filters=>Filters});
+ Error ->
+ Error
+ end.
+
+-spec setup_standard_handler(Type,Config) -> ok | {error,term()} when
+ Type :: tty | standard_io | standard_error | {file,File} |
+ {file,File,Modes} | {disk_log,LogOpts} | false,
+ File :: file:filename(),
+ Modes :: [term()], % [file:mode()], or more specific?
+ Config :: config(),
+ LogOpts :: map().
+setup_standard_handler(false,#{level:=Level,filters:=Filters}) ->
+ case set_handler_config(logger_simple,level,Level) of
+ ok ->
+ set_handler_config(logger_simple,filters,Filters);
+ Error ->
+ Error
+ end;
+setup_standard_handler(Type,Config) ->
+ {Module,TypeConfig} = get_type_config(Type),
+ replace_simple_handler(?STANDARD_HANDLER,
+ Module,
+ maps:merge(Config,TypeConfig)).
+
+-spec replace_simple_handler(Id,Module,Config) -> ok | {error,term()} when
+ Id :: handler_id(),
+ Module :: module(),
+ Config :: config().
+replace_simple_handler(Id,Module,Config) ->
+ _ = code:ensure_loaded(Module),
+ DoBuffer = erlang:function_exported(Module,swap_buffer,2),
+ case add_handler(Id,Module,Config#{wait_for_buffer=>DoBuffer}) of
+ ok ->
+ if DoBuffer ->
+ {ok,Buffered} = logger_simple:get_buffer(),
+ _ = remove_handler(logger_simple),
+ Module:swap_buffer(?STANDARD_HANDLER,Buffered);
+ true ->
+ _ = remove_handler(logger_simple),
+ ok
+ end,
+ ok;
+ Error ->
+ Error
+ end.
+
+get_logger_type() ->
+ Type0 =
+ case application:get_env(kernel, logger_dest) of
+ undefined ->
+ application:get_env(kernel, error_logger);
+ T ->
+ T
+ end,
+ case Type0 of
+ {ok, tty} ->
+ {ok, tty};
+ {ok, {file, File}} when is_list(File) ->
+ {ok, {file, File}};
+ {ok, {file, File, Modes}} when is_list(File), is_list(Modes) ->
+ {ok, {file, File, Modes}};
+ {ok, {disk_log, File}} when is_list(File) ->
+ {ok, {disk_log, get_disk_log_config(File)}};
+ {ok, false} ->
+ {ok, false};
+ {ok, silent} ->
+ {ok, silent};
+ undefined ->
+ {ok, tty}; % default value
+ {ok, Bad} ->
+ {error,{bad_config, {kernel, {logger_dest, Bad}}}}
+ end.
+
+get_disk_log_config(File) ->
+ Config1 =
+ case application:get_env(kernel,logger_disk_log_maxfiles) of
+ undefined -> #{};
+ {ok,MF} -> #{max_no_files=>MF}
+ end,
+ Config2 =
+ case application:get_env(kernel,logger_disk_log_maxbytes) of
+ undefined -> Config1;
+ {ok,MB} -> Config1#{max_no_bytes=>MB}
+ end,
+ Config3 =
+ case application:get_env(kernel,logger_disk_log_type) of
+ undefined -> Config2;
+ {ok,T} -> Config1#{type=>T}
+ end,
+ Config3#{file=>File}.
+
+get_logger_level() ->
+ case application:get_env(kernel,logger_level) of
+ undefined -> info;
+ {ok,Level} when ?IS_LEVEL(Level) -> Level
+ end.
+
+get_logger_filters() ->
+ case application:get_env(kernel, logger_sasl_compatible, false) of
+ true ->
+ ?DEFAULT_HANDLER_FILTERS([beam,erlang,otp]);
+ false ->
+ Extra =
+ case application:get_env(kernel, logger_log_progress, false) of
+ true ->
+ [];
+ false ->
+ [{stop_progress,
+ {fun logger_filters:progress/2,stop}}]
+ end,
+ Extra ++ ?DEFAULT_HANDLER_FILTERS([beam,erlang,otp,sasl])
+ end.
+
+get_type_config({disk_log,LogOpts}) ->
+ {logger_disk_log_h,#{disk_log_opts=>LogOpts}};
+get_type_config(tty) ->
+ %% This is only for backwards compatibility with error_logger and
+ %% old kernel and sasl environment variables
+ get_type_config(standard_io);
+get_type_config(Type) when Type==standard_io;
+ Type==standard_error;
+ element(1,Type)==file ->
+ {logger_std_h,#{logger_std_h=>#{type=>Type}}};
+get_type_config(Type) ->
+ {error,{illegal_logger_type,Type}}.
+
+%%%-----------------------------------------------------------------
+-spec limit_term(term()) -> term().
+
+limit_term(Term) ->
+ try get_format_depth() of
+ unlimited -> Term;
+ D -> io_lib:limit_term(Term, D)
+ catch error:badarg ->
+ %% This could happen during system termination, after
+ %% application_controller process is dead.
+ unlimited
+ end.
+
+-spec get_format_depth() -> 'unlimited' | pos_integer().
+
+get_format_depth() ->
+ Depth =
+ case application:get_env(kernel, logger_format_depth) of
+ {ok, D} when is_integer(D) ->
+ D;
+ undefined ->
+ case application:get_env(kernel, error_logger_format_depth) of
+ {ok, D} when is_integer(D) ->
+ D;
+ undefined ->
+ unlimited
+ end
+ end,
+ max(10, Depth).
+
+-spec get_max_size() -> 'unlimited' | pos_integer().
+
+get_max_size() ->
+ case application:get_env(kernel, logger_max_size) of
+ {ok, Size} when is_integer(Size) ->
+ max(50, Size);
+ undefined ->
+ unlimited
+ end.
+
+-spec get_utc_config() -> boolean().
+
+get_utc_config() ->
+ %% Kernel's logger_utc configuration overrides SASL utc_log, which
+ %% in turn overrides stdlib config - in order to have uniform
+ %% timestamps in log messages
+ case application:get_env(kernel, logger_utc) of
+ {ok, Val} -> Val;
+ undefined ->
+ case application:get_env(sasl, utc_log) of
+ {ok, Val} -> Val;
+ undefined ->
+ case application:get_env(stdlib, utc_log) of
+ {ok, Val} -> Val;
+ undefined -> false
+ end
+ end
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+do_log(warning,Msg,Meta) ->
+ do_log_1(error_logger:warning_map(),Msg,Meta);
+do_log(Level,Msg,Meta) ->
+ do_log_1(Level,Msg,Meta).
+
+do_log_1(Level,Msg,#{mfa:={Module,_,_}}=Meta) ->
+ case logger_config:allow(?LOGGER_TABLE,Level,Module) of
+ true ->
+ log_allowed(#{},Level,Msg,Meta);
+ false ->
+ ok
+ end;
+do_log_1(Level,Msg,Meta) ->
+ case logger_config:allow(?LOGGER_TABLE,Level) of
+ true ->
+ log_allowed(#{},Level,Msg,Meta);
+ false ->
+ ok
+ end.
+
+-spec log_allowed(Location,Level,Msg,Meta) -> ok when
+ Location :: map(),
+ Level :: level(),
+ Msg :: {msg_fun(),term()} |
+ {io:format(),[term()]} |
+ report() |
+ unicode:chardata(),
+ Meta :: metadata().
+log_allowed(Location,Level,{Fun,FunArgs},Meta) when is_function(Fun,1) ->
+ try Fun(FunArgs) of
+ Msg={Format,Args} when is_list(Format), is_list(Args) ->
+ log_allowed(Location,Level,Msg,Meta);
+ Report when ?IS_REPORT(Report) ->
+ log_allowed(Location,Level,Report,Meta);
+ String when ?IS_STRING(String) ->
+ log_allowed(Location,Level,String,Meta);
+ Other ->
+ log_allowed(Location,Level,
+ {"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{Fun,FunArgs},Other]},
+ Meta)
+ catch C:R ->
+ log_allowed(Location,Level,
+ {"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{Fun,FunArgs},{C,R}]},
+ Meta)
+ end;
+log_allowed(Location,Level,Msg,Meta0) when is_map(Meta0) ->
+ %% Metadata priorities are:
+ %% Location (added in API macros) - will be overwritten by process
+ %% metadata (set by set_process_metadata/1), which in turn will be
+ %% overwritten by the metadata given as argument in the log call
+ %% (function or macro).
+ Meta = add_default_metadata(
+ maps:merge(Location,maps:merge(proc_meta(),Meta0))),
+ case node(maps:get(gl,Meta)) of
+ Node when Node=/=node() ->
+ log_remote(Node,Level,Msg,Meta),
+ do_log_allowed(Level,Msg,Meta);
+ _ ->
+ do_log_allowed(Level,Msg,Meta)
+ end.
+
+do_log_allowed(Level,{Format,Args}=Msg,Meta)
+ when ?IS_LEVEL(Level),
+ is_list(Format),
+ is_list(Args),
+ is_map(Meta) ->
+ logger_backend:log_allowed(#{level=>Level,msg=>Msg,meta=>Meta},tid());
+do_log_allowed(Level,Report,Meta)
+ when ?IS_LEVEL(Level),
+ ?IS_REPORT(Report),
+ is_map(Meta) ->
+ logger_backend:log_allowed(#{level=>Level,msg=>{report,Report},meta=>Meta},
+ tid());
+do_log_allowed(Level,String,Meta)
+ when ?IS_LEVEL(Level),
+ ?IS_STRING(String),
+ is_map(Meta) ->
+ logger_backend:log_allowed(#{level=>Level,msg=>{string,String},meta=>Meta},
+ tid()).
+tid() ->
+ ets:whereis(?LOGGER_TABLE).
+
+log_remote(Node,Level,{Format,Args},Meta) ->
+ log_remote(Node,{log,Level,Format,Args,Meta});
+log_remote(Node,Level,Msg,Meta) ->
+ log_remote(Node,{log,Level,Msg,Meta}).
+
+log_remote(Node,Request) ->
+ {logger,Node} ! Request,
+ ok.
+
+add_default_metadata(Meta) ->
+ add_default_metadata([pid,gl,time],Meta).
+
+add_default_metadata([Key|Keys],Meta) ->
+ case maps:is_key(Key,Meta) of
+ true ->
+ add_default_metadata(Keys,Meta);
+ false ->
+ add_default_metadata(Keys,Meta#{Key=>default(Key)})
+ end;
+add_default_metadata([],Meta) ->
+ Meta.
+
+proc_meta() ->
+ case get_process_metadata() of
+ ProcMeta when is_map(ProcMeta) -> ProcMeta;
+ _ -> #{}
+ end.
+
+default(pid) -> self();
+default(gl) -> group_leader();
+default(time) -> erlang:monotonic_time(microsecond).
+
+%% Remove everything upto and including this module from the stacktrace
+filter_stacktrace(Module,[{Module,_,_,_}|_]) ->
+ [];
+filter_stacktrace(Module,[H|T]) ->
+ [H|filter_stacktrace(Module,T)];
+filter_stacktrace(_,[]) ->
+ [].
diff --git a/lib/kernel/src/logger_backend.erl b/lib/kernel/src/logger_backend.erl
new file mode 100644
index 0000000000..d9f5aa6faf
--- /dev/null
+++ b/lib/kernel/src/logger_backend.erl
@@ -0,0 +1,133 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_backend).
+
+-export([log_allowed/2]).
+
+-include("logger_internal.hrl").
+
+-define(OWN_KEYS,[level,filters,filter_default,handlers]).
+
+%%%-----------------------------------------------------------------
+%%% The default logger backend
+log_allowed(Log, Tid) ->
+ {ok,Config} = logger_config:get(Tid,logger),
+ Filters = maps:get(filters,Config,[]),
+ case apply_filters(logger,Log,Filters,Config) of
+ stop ->
+ ok;
+ Log1 ->
+ Handlers = maps:get(handlers,Config,[]),
+ call_handlers(Log1,Handlers,Tid)
+ end,
+ ok.
+
+call_handlers(#{level:=Level}=Log,[Id|Handlers],Tid) ->
+ case logger_config:get(Tid,Id,Level) of
+ {ok,{Module,Config}} ->
+ Filters = maps:get(filters,Config,[]),
+ case apply_filters(Id,Log,Filters,Config) of
+ stop ->
+ ok;
+ Log1 ->
+ Config1 = maps:without(?OWN_KEYS,Config),
+ try Module:log(Log1,Config1)
+ catch C:R:S ->
+ case logger:remove_handler(Id) of
+ ok ->
+ logger:internal_log(
+ error,{removed_failing_handler,Id}),
+ ?LOG_INTERNAL(
+ debug,
+ [{logger,removed_failing_handler},
+ {handler,{Id,Module}},
+ {log,Log1},
+ {config,Config1},
+ {reason,{C,R,filter_stacktrace(S)}}]);
+ {error,{not_found,_}} ->
+ %% Probably already removed by other client
+ %% Don't report again
+ ok;
+ {error,Reason} ->
+ ?LOG_INTERNAL(
+ debug,
+ [{logger,remove_handler_failed},
+ {reason,Reason}])
+ end
+ end
+ end;
+ _ ->
+ ok
+ end,
+ call_handlers(Log,Handlers,Tid);
+call_handlers(_Log,[],_Tid) ->
+ ok.
+
+apply_filters(Owner,Log,Filters,Config) ->
+ case do_apply_filters(Owner,Log,Filters,ignore) of
+ stop ->
+ stop;
+ ignore ->
+ case maps:get(filter_default,Config) of
+ log ->
+ Log;
+ stop ->
+ stop
+ end;
+ Log1 ->
+ Log1
+ end.
+
+do_apply_filters(Owner,Log,[{_Id,{FilterFun,FilterArgs}}=Filter|Filters],State) ->
+ try FilterFun(Log,FilterArgs) of
+ stop ->
+ stop;
+ ignore ->
+ do_apply_filters(Owner,Log,Filters,State);
+ Log1=#{level:=Level,msg:=Msg,meta:=Meta}
+ when is_atom(Level), ?IS_MSG(Msg), is_map(Meta) ->
+ do_apply_filters(Owner,Log1,Filters,log);
+ Bad ->
+ handle_filter_failed(Filter,Owner,Log,{bad_return_value,Bad})
+ catch C:R:S ->
+ handle_filter_failed(Filter,Owner,Log,{C,R,filter_stacktrace(S)})
+ end;
+do_apply_filters(_Owner,_Log,[],ignore) ->
+ ignore;
+do_apply_filters(_Owner,Log,[],log) ->
+ Log.
+
+handle_filter_failed({Id,_}=Filter,Owner,Log,Reason) ->
+ case logger_server:remove_filter(Owner,Id) of
+ ok ->
+ logger:internal_log(error,{removed_failing_filter,Id}),
+ ?LOG_INTERNAL(debug,
+ [{logger,removed_failing_filter},
+ {filter,Filter},
+ {owner,Owner},
+ {log,Log},
+ {reason,Reason}]);
+ _ ->
+ ok
+ end,
+ ignore.
+
+filter_stacktrace(Stacktrace) ->
+ logger:filter_stacktrace(?MODULE,Stacktrace).
diff --git a/lib/kernel/src/logger_config.erl b/lib/kernel/src/logger_config.erl
new file mode 100644
index 0000000000..799aea9617
--- /dev/null
+++ b/lib/kernel/src/logger_config.erl
@@ -0,0 +1,151 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_config).
+
+-export([new/1,delete/2,
+ exist/2,
+ allow/2,allow/3,
+ get/2, get/3, get/1,
+ create/3, create/4, set/3,
+ set_module_level/3,reset_module_level/2,
+ cache_module_level/2,
+ level_to_int/1]).
+
+-include("logger_internal.hrl").
+
+new(Name) ->
+ _ = ets:new(Name,[set,protected,named_table]),
+ ets:whereis(Name).
+
+delete(Tid,Id) ->
+ ets:delete(Tid,table_key(Id)).
+
+allow(Tid,Level,Module) ->
+ LevelInt = level_to_int(Level),
+ case ets:lookup(Tid,Module) of
+ [{Module,{ModLevel,cached}}] when is_integer(ModLevel),
+ LevelInt =< ModLevel ->
+ true;
+ [{Module,ModLevel}] when is_integer(ModLevel),
+ LevelInt =< ModLevel ->
+ true;
+ [] ->
+ logger_server:cache_module_level(Module),
+ allow(Tid,Level);
+ _ ->
+ false
+ end.
+
+allow(Tid,Level) ->
+ GlobalLevelInt = ets:lookup_element(Tid,?LOGGER_KEY,2),
+ level_to_int(Level) =< GlobalLevelInt.
+
+exist(Tid,What) ->
+ ets:member(Tid,table_key(What)).
+
+get(Tid,What) ->
+ case ets:lookup(Tid,table_key(What)) of
+ [{_,_,Config}] ->
+ {ok,Config};
+ [{_,_,Config,Module}] ->
+ {ok,{Module,Config}};
+ [] ->
+ {error,{not_found,What}}
+ end.
+
+get(Tid,What,Level) ->
+ MS = [{{table_key(What),'$1','$2'}, % logger config
+ [{'>=','$1',level_to_int(Level)}],
+ ['$2']},
+ {{table_key(What),'$1','$2','$3'}, % handler config
+ [{'>=','$1',level_to_int(Level)}],
+ [{{'$3','$2'}}]}],
+ case ets:select(Tid,MS) of
+ [] -> error;
+ [Data] -> {ok,Data}
+ end.
+
+create(Tid,What,Module,Config) ->
+ LevelInt = level_to_int(maps:get(level,Config)),
+ ets:insert(Tid,{table_key(What),LevelInt,Config,Module}).
+create(Tid,What,Config) ->
+ LevelInt = level_to_int(maps:get(level,Config)),
+ ets:insert(Tid,{table_key(What),LevelInt,Config}).
+
+set(Tid,What,Config) ->
+ LevelInt = level_to_int(maps:get(level,Config)),
+ %% Should do this only if the level has actually changed. Possibly
+ %% overwrite instead of delete?
+ case What of
+ logger ->
+ _ = ets:select_delete(Tid,[{{'_',{'$1',cached}},
+ [{'=/=','$1',LevelInt}],
+ [true]}]),
+ ok;
+ _ ->
+ ok
+ end,
+ ets:update_element(Tid,table_key(What),[{2,LevelInt},{3,Config}]),
+ ok.
+
+set_module_level(Tid,Module,Level) ->
+ ets:insert(Tid,{Module,level_to_int(Level)}),
+ ok.
+
+reset_module_level(Tid,Module) ->
+ ets:delete(Tid,Module), % should possibley overwrite instead of delete?
+ ok.
+
+cache_module_level(Tid,Module) ->
+ GlobalLevelInt = ets:lookup_element(Tid,?LOGGER_KEY,2),
+ ets:insert_new(Tid,{Module,{GlobalLevelInt,cached}}),
+ ok.
+
+get(Tid) ->
+ {ok,Logger} = get(Tid,logger),
+ HMS = [{{table_key('$1'),'_','$2','$3'},[],[{{'$1','$3','$2'}}]}],
+ Handlers = ets:select(Tid,HMS),
+ MMS = [{{'$1','$2'},[{is_atom,'$1'},{is_integer,'$2'}],[{{'$1','$2'}}]}],
+ Modules = ets:select(Tid,MMS),
+ {Logger,Handlers,[{M,int_to_level(L)} || {M,L} <- Modules]}.
+
+level_to_int(emergency) -> ?EMERGENCY;
+level_to_int(alert) -> ?ALERT;
+level_to_int(critical) -> ?CRITICAL;
+level_to_int(error) -> ?ERROR;
+level_to_int(warning) -> ?WARNING;
+level_to_int(notice) -> ?NOTICE;
+level_to_int(info) -> ?INFO;
+level_to_int(debug) -> ?DEBUG.
+
+int_to_level(?EMERGENCY) -> emergency;
+int_to_level(?ALERT) -> alert;
+int_to_level(?CRITICAL) -> critical;
+int_to_level(?ERROR) -> error;
+int_to_level(?WARNING) -> warning;
+int_to_level(?NOTICE) -> notice;
+int_to_level(?INFO) -> info;
+int_to_level(?DEBUG) -> debug.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+
+table_key(logger) -> ?LOGGER_KEY;
+table_key(HandlerId) -> {?HANDLER_KEY,HandlerId}.
diff --git a/lib/kernel/src/logger_disk_log_h.erl b/lib/kernel/src/logger_disk_log_h.erl
new file mode 100644
index 0000000000..3b71f936d8
--- /dev/null
+++ b/lib/kernel/src/logger_disk_log_h.erl
@@ -0,0 +1,694 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_disk_log_h).
+
+-behaviour(gen_server).
+
+-include("logger.hrl").
+-include("logger_internal.hrl").
+-include("logger_h_common.hrl").
+
+%%% API
+-export([start_link/3, info/1, disk_log_sync/1, reset/1]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%% logger callbacks
+-export([log/2,
+ adding_handler/2, removing_handler/1,
+ changing_config/3, swap_buffer/2]).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%% Start a disk_log handler process and link to caller.
+%%% This function is called by the kernel supervisor when this
+%%% handler process gets added (as a result of calling add/3).
+-spec start_link(Name, Config, HandlerState) -> {ok,Pid} | {error,Reason} when
+ Name :: atom(),
+ Config :: logger:config(),
+ HandlerState :: map(),
+ Pid :: pid(),
+ Reason :: term().
+
+start_link(Name, Config, HandlerState) ->
+ proc_lib:start_link(?MODULE,init,[[Name,Config,HandlerState]]).
+
+%%%-----------------------------------------------------------------
+%%%
+-spec disk_log_sync(Name) -> ok | {error,Reason} when
+ Name :: atom(),
+ Reason :: handler_busy | {badarg,term()}.
+
+disk_log_sync(Name) when is_atom(Name) ->
+ try
+ gen_server:call(Name, disk_log_sync, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+disk_log_sync(Name) ->
+ {error,{badarg,{disk_log_sync,[Name]}}}.
+
+%%%-----------------------------------------------------------------
+%%%
+-spec info(Name) -> Info | {error,Reason} when
+ Name :: atom(),
+ Info :: term(),
+ Reason :: handler_busy | {badarg,term()}.
+
+info(Name) when is_atom(Name) ->
+ try
+ gen_server:call(Name, info, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+info(Name) ->
+ {error,{badarg,{info,[Name]}}}.
+
+%%%-----------------------------------------------------------------
+%%%
+-spec reset(Name) -> ok | {error,Reason} when
+ Name :: atom(),
+ Reason :: handler_busy | {badarg,term()}.
+
+reset(Name) when is_atom(Name) ->
+ try
+ gen_server:call(Name, reset, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+reset(Name) ->
+ {error,{badarg,{reset,[Name]}}}.
+
+
+%%%===================================================================
+%%% logger callbacks
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%% Handler being added
+adding_handler(Name, Config) ->
+ case check_config(adding, Name, Config) of
+ {ok, Config1} ->
+ %% create initial handler state by merging defaults with config
+ HConfig = maps:get(?MODULE, Config1, #{}),
+ HState = maps:merge(get_init_state(), HConfig),
+ case logger_h_common:overload_levels_ok(HState) of
+ true ->
+ case start(Name, Config1, HState) of
+ ok ->
+ %% Make sure wait_for_buffer is not stored, so we
+ %% won't hang and wait for buffer on a restart
+ {ok, maps:remove(wait_for_buffer,Config1)};
+ Error ->
+ Error
+ end;
+ false ->
+ #{toggle_sync_qlen := TSQL,
+ drop_new_reqs_qlen := DNRQL,
+ flush_reqs_qlen := FRQL} = HState,
+ {error,{invalid_levels,{TSQL,DNRQL,FRQL}}}
+ end;
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Updating handler config
+changing_config(Name,
+ OldConfig=#{id:=Id, disk_log_opts:=DLOpts},
+ NewConfig=#{id:=Id, disk_log_opts:=DLOpts}) ->
+ case check_config(changing, Name, NewConfig) of
+ Result = {ok,NewConfig1} ->
+ try gen_server:call(Name, {change_config,OldConfig,NewConfig1},
+ ?DEFAULT_CALL_TIMEOUT) of
+ ok -> Result;
+ HError -> HError
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+ Error ->
+ Error
+ end;
+changing_config(_Name, OldConfig, NewConfig) ->
+ {error,{illegal_config_change,OldConfig,NewConfig}}.
+
+check_config(adding, Name, Config0) ->
+ %% Merge in defaults on top level
+ Config = maps:merge(#{id => Name}, Config0),
+ %% Merge in defaults on handler level
+ LogOpts0 = maps:get(disk_log_opts, Config, #{}),
+ LogOpts = merge_default_logopts(Name, LogOpts0),
+ case check_log_opts(maps:to_list(LogOpts)) of
+ ok ->
+ MyConfig = maps:get(?MODULE, Config, #{}),
+ case check_my_config(maps:to_list(MyConfig)) of
+ ok ->
+ {ok,Config#{disk_log_opts=>LogOpts,
+ ?MODULE=>MyConfig}};
+ Error ->
+ Error
+ end;
+ Error ->
+ Error
+ end;
+check_config(changing, _Name, Config) ->
+ MyConfig = maps:get(?MODULE, Config, #{}),
+ case check_my_config(maps:to_list(MyConfig)) of
+ ok -> {ok,Config};
+ Error -> Error
+ end.
+
+merge_default_logopts(Name, LogOpts) ->
+ Type = maps:get(type, LogOpts, wrap),
+ {DefaultNoFiles,DefaultNoBytes} =
+ case Type of
+ halt -> {undefined,infinity};
+ _wrap -> {10,1048576}
+ end,
+ {ok,Dir} = file:get_cwd(),
+ Default = #{file => filename:join(Dir,Name),
+ max_no_files => DefaultNoFiles,
+ max_no_bytes => DefaultNoBytes,
+ type => Type},
+ maps:merge(Default,LogOpts).
+
+check_log_opts([{file,File}|Opts]) when is_list(File) ->
+ check_log_opts(Opts);
+check_log_opts([{max_no_files,undefined}|Opts]) ->
+ check_log_opts(Opts);
+check_log_opts([{max_no_files,N}|Opts]) when is_integer(N), N>0 ->
+ check_log_opts(Opts);
+check_log_opts([{max_no_bytes,infinity}|Opts]) ->
+ check_log_opts(Opts);
+check_log_opts([{max_no_bytes,N}|Opts]) when is_integer(N), N>0 ->
+ check_log_opts(Opts);
+check_log_opts([{type,Type}|Opts]) when Type==wrap; Type==halt ->
+ check_log_opts(Opts);
+check_log_opts([Invalid|_]) ->
+ {error,{invalid_config,disk_log_opt,Invalid}};
+check_log_opts([]) ->
+ ok.
+
+check_my_config([Other | Config]) ->
+ case logger_h_common:check_common_config(Other) of
+ valid ->
+ check_my_config(Config);
+ invalid ->
+ {error,{invalid_config,?MODULE,Other}}
+ end;
+check_my_config([]) ->
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Handler being removed
+removing_handler(Name) ->
+ stop(Name).
+
+%%%-----------------------------------------------------------------
+%%% Get buffer when swapping from simple handler
+swap_buffer(Name,Buffer) ->
+ case whereis(Name) of
+ undefined ->
+ ok;
+ _ ->
+ Name ! {buffer,Buffer}
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Log a string or report
+-spec log(Log, Config) -> ok | dropped when
+ Log :: logger:log(),
+ Config :: logger:config().
+
+log(Log,Config=#{id:=Name}) ->
+ %% if the handler has crashed, we must drop this request
+ %% and hope the handler restarts so we can try again
+ true = is_pid(whereis(Name)),
+ Bin = logger_h_common:log_to_binary(Log,Config),
+ logger_h_common:call_cast_or_drop(Name, Bin).
+
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+init([Name, Config = #{disk_log_opts := LogOpts},
+ State = #{dl_sync_int := DLSyncInt}]) ->
+ register(Name, self()),
+ process_flag(trap_exit, true),
+ process_flag(message_queue_data, off_heap),
+
+ ?init_test_hooks(),
+ ?start_observation(Name),
+
+ case open_disk_log(Name, LogOpts) of
+ ok ->
+ catch ets:new(Name, [public, named_table]),
+ ?set_mode(Name, async),
+ proc_lib:init_ack({ok,self()}),
+ T0 = ?timestamp(),
+ State1 =
+ ?merge_with_stats(State#{id => Name,
+ mode => async,
+ dl_sync => DLSyncInt,
+ log_opts => LogOpts,
+ last_qlen => 0,
+ last_log_ts => T0,
+ burst_win_ts => T0,
+ burst_msg_count => 0,
+ prev_log_result => ok,
+ prev_sync_result => ok,
+ prev_disk_log_info => undefined}),
+ gen_server:cast(self(), {repeated_disk_log_sync,T0}),
+ enter_loop(Config, State1);
+ Error ->
+ logger_h_common:error_notify({open_disk_log,Name,Error}),
+ proc_lib:init_ack(Error)
+ end.
+
+enter_loop(#{wait_for_buffer:=true}=Config,State) ->
+ State1 =
+ receive
+ {buffer,Buffer} ->
+ lists:foldl(
+ fun(Log,S) ->
+ Bin = logger_h_common:log_to_binary(Log,Config),
+ {_,S1} = do_log(Bin,cast,S),
+ S1
+ end,
+ State,
+ Buffer)
+ end,
+ gen_server:enter_loop(?MODULE,[],State1);
+enter_loop(_Config,State) ->
+ gen_server:enter_loop(?MODULE,[],State).
+
+%% This is the synchronous log request.
+handle_call({log, Bin}, _From, State) ->
+ {Result,State1} = do_log(Bin, call, State),
+ %% Result == ok | dropped
+ {reply, Result, State1};
+
+handle_call(disk_log_sync, _From, State = #{id := Name}) ->
+ State1 = #{prev_sync_result := Result} = disk_log_sync(Name, State),
+ {reply, Result, State1};
+
+handle_call({change_config,_OldConfig,NewConfig}, _From,
+ State = #{filesync_repeat_interval := FSyncInt0,
+ last_log_ts := LastLogTS}) ->
+ HConfig = maps:get(?MODULE, NewConfig, #{}),
+ State1 = #{toggle_sync_qlen := TSQL,
+ drop_new_reqs_qlen := DNRQL,
+ flush_reqs_qlen := FRQL} = maps:merge(State, HConfig),
+ case logger_h_common:overload_levels_ok(State1) of
+ true ->
+ _ =
+ case maps:get(filesync_repeat_interval, HConfig, undefined) of
+ undefined ->
+ ok;
+ no_repeat ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
+ State,
+ undefined));
+ FSyncInt0 ->
+ ok;
+ _FSyncInt1 ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
+ State,
+ undefined)),
+ _ = gen_server:cast(self(), {repeated_disk_log_sync,
+ LastLogTS})
+ end,
+ {reply, ok, State1};
+ false ->
+ {reply, {error,{invalid_levels,{TSQL,DNRQL,FRQL}}}, State}
+ end;
+
+handle_call(info, _From, State) ->
+ {reply, State, State};
+
+handle_call(reset, _From, State) ->
+ State1 = ?merge_with_stats(State),
+ {reply, ok, State1#{last_qlen => 0,
+ last_log_ts => ?timestamp(),
+ prev_log_result => ok,
+ prev_sync_result => ok,
+ prev_disk_log_info => undefined}};
+
+handle_call(stop, _From, State) ->
+ {stop, {shutdown,stopped}, ok, State}.
+
+
+%% This is the asynchronous log request.
+handle_cast({log, Bin}, State) ->
+ {_,State1} = do_log(Bin, cast, State),
+ {noreply, State1};
+
+%% If FILESYNC_REPEAT_INTERVAL is set to a millisec value, this
+%% clause gets called repeatedly by the handler. In order to
+%% guarantee that a filesync *always* happens after the last log
+%% request, the repeat operation must be active!
+handle_cast({repeated_disk_log_sync,LastLogTS0},
+ State = #{id := Name,
+ filesync_repeat_interval := FSyncInt,
+ last_log_ts := LastLogTS1}) ->
+ State1 =
+ if is_integer(FSyncInt) ->
+ %% only do filesync if something has been
+ %% written since last time we checked
+ NewState = if LastLogTS1 == LastLogTS0 ->
+ State;
+ true ->
+ disk_log_sync(Name, State)
+ end,
+ {ok,TRef} =
+ timer:apply_after(FSyncInt, gen_server,cast,
+ [self(),
+ {repeated_disk_log_sync,LastLogTS1}]),
+ NewState#{rep_sync_tref => TRef};
+ true ->
+ State
+ end,
+ {noreply,State1}.
+
+%% The disk log owner must handle status messages from disk_log.
+handle_info({disk_log, _Node, _Log, {wrap,_NoLostItems}}, State) ->
+ {noreply, State};
+handle_info({disk_log, _Node, Log, Info = {truncated,_NoLostItems}},
+ State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
+ error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}),
+ {noreply, State#{prev_disk_log_info => Info}};
+handle_info({disk_log, _Node, Log, Info = {blocked_log,_Items}},
+ State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
+ error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}),
+ {noreply, State#{prev_disk_log_info => Info}};
+handle_info({disk_log, _Node, Log, full},
+ State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
+ error_notify_new(full, PrevInfo, {disk_log,Name,Log,full}),
+ {noreply, State#{prev_disk_log_info => full}};
+handle_info({disk_log, _Node, Log, Info = {error_status,_Status}},
+ State = #{id := Name, prev_disk_log_info := PrevInfo}) ->
+ error_notify_new(Info, PrevInfo, {disk_log,Name,Log,Info}),
+ {noreply, State#{prev_disk_log_info => Info}};
+
+handle_info({'EXIT',_Pid,_Why}, State = #{id := _Name}) ->
+ {noreply, State};
+
+handle_info(_, State) ->
+ {noreply, State}.
+
+terminate(Reason, State = #{id := Name}) ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, State,
+ undefined)),
+ _ = close_disk_log(Name, normal),
+ logger_h_common:stop_or_restart(Name, Reason, State).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%%-----------------------------------------------------------------
+%%% Internal functions
+
+%%%-----------------------------------------------------------------
+%%%
+get_init_state() ->
+ #{toggle_sync_qlen => ?TOGGLE_SYNC_QLEN,
+ drop_new_reqs_qlen => ?DROP_NEW_REQS_QLEN,
+ flush_reqs_qlen => ?FLUSH_REQS_QLEN,
+ enable_burst_limit => ?ENABLE_BURST_LIMIT,
+ burst_limit_size => ?BURST_LIMIT_SIZE,
+ burst_window_time => ?BURST_WINDOW_TIME,
+ enable_kill_overloaded => ?ENABLE_KILL_OVERLOADED,
+ handler_overloaded_qlen => ?HANDLER_OVERLOADED_QLEN,
+ handler_overloaded_mem => ?HANDLER_OVERLOADED_MEM,
+ handler_restart_after => ?HANDLER_RESTART_AFTER,
+ dl_sync_int => ?CONTROLLER_SYNC_INTERVAL,
+ filesync_ok_qlen => ?FILESYNC_OK_QLEN,
+ filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}.
+
+%%%-----------------------------------------------------------------
+%%% Add a disk_log handler to the logger.
+%%% This starts a dedicated handler process which should always
+%%% exist if the handler is registered with logger (and should not
+%%% exist if the handler is not registered).
+%%%
+%%% Config is the logger:config() map containing a sub map with any of
+%%% the following associations:
+%%%
+%%% Config = #{disk_log_opts => #{file => file:filename(),
+%%% max_no_bytes => integer(),
+%%% max_no_files => integer(),
+%%% type => wrap | halt}}.
+%%%
+%%% This map will be merged with the logger configuration data for
+%%% the disk_log LogName. If type == halt, then max_no_files is
+%%% ignored.
+%%%
+%%% Handler specific config should be provided with a sub map associated
+%%% with a key named the same as this module, e.g:
+%%%
+%%% Config = #{logger_disk_log_h => #{toggle_sync_qlen => 50}
+%%%
+%%% The disk_log handler process is linked to logger_sup, which is
+%%% part of the kernel application's supervision tree.
+start(Name, Config, HandlerState) ->
+ LoggerDLH =
+ #{id => Name,
+ start => {?MODULE, start_link, [Name,Config,HandlerState]},
+ restart => temporary,
+ shutdown => 2000,
+ type => worker,
+ modules => [?MODULE]},
+ case supervisor:start_child(logger_sup, LoggerDLH) of
+ {ok,_} ->
+ ok;
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Stop and remove the handler.
+stop(Name) ->
+ case whereis(Name) of
+ undefined ->
+ ok;
+ _ ->
+ %% We don't want to do supervisor:terminate_child here
+ %% since we need to distinguish this explicit stop from a
+ %% system termination in order to avoid circular attempts
+ %% at removing the handler (implying deadlocks and
+ %% timeouts).
+ _ = gen_server:call(Name,stop),
+ _ = supervisor:delete_child(logger_sup, Name),
+ ok
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Logging and overload control.
+-define(update_dl_sync(C, Interval),
+ if C == 0 -> Interval;
+ true -> C-1 end).
+
+%% check for overload between every request (and set Mode to async,
+%% sync or drop accordingly), but never flush the whole mailbox
+%% before LogWindowSize requests have been handled
+do_log(Bin, CallOrCast, State = #{id:=Name, mode := _Mode0}) ->
+ T1 = ?timestamp(),
+
+ %% check if the handler is getting overloaded, or if it's
+ %% recovering from overload (the check must be done for each
+ %% request to react quickly to large bursts of requests and
+ %% to ensure that the handler can never end up in drop mode
+ %% with an empty mailbox, which would stop operation)
+ {Mode1,QLen,Mem,State1} = logger_h_common:check_load(State),
+
+ %% kill the handler if it can't keep up with the load
+ logger_h_common:kill_if_choked(Name, QLen, Mem, State),
+
+ if Mode1 == flush ->
+ flush(Name, QLen, T1, State1);
+ true ->
+ write(Name, Mode1, T1, Bin, CallOrCast, State1)
+ end.
+
+%% this function is called by do_log/3 after an overload check
+%% has been performed, where QLen > FlushQLen
+flush(Name, _QLen0, T1, State=#{last_log_ts := _T0}) ->
+ %% flush messages in the mailbox (a limited number in
+ %% order to not cause long delays)
+ _NewFlushed = logger_h_common:flush_log_requests(?FLUSH_MAX_N),
+
+ %% because of the receive loop when flushing messages, the
+ %% handler will be scheduled out often and the mailbox could
+ %% grow very large, so we'd better check the queue again here
+ {_,_QLen1} = process_info(self(), message_queue_len),
+ ?observe(Name,{max_qlen,_QLen1}),
+
+ %% Add 1 for the current log request
+ ?observe(Name,{flushed,_NewFlushed+1}),
+
+ State1 = ?update_max_time(?diff_time(T1,_T0),State),
+ {dropped,?update_other(flushed,FLUSHED,_NewFlushed,
+ State1#{mode => ?set_mode(Name,async),
+ last_qlen => 0,
+ last_log_ts => T1})}.
+
+%% this function is called to write to disk_log
+write(Name, Mode, T1, Bin, _CallOrCast,
+ State = #{dl_sync := DLSync,
+ dl_sync_int := DLSyncInt,
+ last_qlen := LastQLen,
+ last_log_ts := T0}) ->
+ %% check if we need to limit the number of writes
+ %% during a burst of log requests
+ {DoWrite,BurstWinT,BurstMsgCount} = logger_h_common:limit_burst(State),
+
+ %% only send a synhrounous request to the disk_log process
+ %% every DLSyncInt time, to give the handler time between
+ %% writes so it can keep up with incoming messages
+ {Status,LastQLen1,State1} =
+ if DoWrite, DLSync == 0 ->
+ ?observe(Name,{_CallOrCast,1}),
+ NewState = disk_log_write(Name, Bin, State),
+ {ok, element(2,process_info(self(),message_queue_len)),
+ NewState};
+ DoWrite ->
+ ?observe(Name,{_CallOrCast,1}),
+ NewState = disk_log_write(Name, Bin, State),
+ {ok, LastQLen, NewState};
+ not DoWrite ->
+ ?observe(Name,{flushed,1}),
+ {dropped, LastQLen, State}
+ end,
+
+ %% Check if the time since the previous log request is long enough -
+ %% and the queue length small enough - to assume the mailbox has
+ %% been emptied, and if so, do filesync operation and reset mode to
+ %% async. Note that this is the best we can do to detect an idle
+ %% handler without setting a timer after each log call/cast. If the
+ %% time between two consecutive log requests is fast and no new
+ %% request comes in after the last one, idle state won't be detected!
+ Time = ?diff_time(T1,T0),
+ {Mode1,BurstMsgCount1,State2} =
+ if (LastQLen1 < ?FILESYNC_OK_QLEN) andalso
+ (Time > ?IDLE_DETECT_TIME_USEC) ->
+ {?change_mode(Name,Mode,async), 0, disk_log_sync(Name,State1)};
+ true ->
+ {Mode, BurstMsgCount,State1}
+ end,
+
+ State3 =
+ ?update_calls_or_casts(_CallOrCast,1,State2),
+ State4 =
+ ?update_max_time(Time,
+ State3#{mode => Mode1,
+ last_qlen := LastQLen1,
+ last_log_ts => T1,
+ burst_win_ts => BurstWinT,
+ burst_msg_count => BurstMsgCount1,
+ dl_sync => ?update_dl_sync(DLSync,DLSyncInt)}),
+ {Status,State4}.
+
+
+open_disk_log(Name, LogOpts) ->
+ #{file := File,
+ max_no_bytes := MaxNoBytes,
+ max_no_files := MaxNoFiles,
+ type := Type} = LogOpts,
+ case filelib:ensure_dir(File) of
+ ok ->
+ Size =
+ if Type == halt -> MaxNoBytes;
+ Type == wrap -> {MaxNoBytes,MaxNoFiles}
+ end,
+ Opts = [{name, Name},
+ {file, File},
+ {size, Size},
+ {type, Type},
+ {linkto, self()},
+ {repair, false},
+ {format, external},
+ {notify, true},
+ {quiet, true},
+ {mode, read_write}],
+ case disk_log:open(Opts) of
+ {ok,Name} ->
+ ok;
+ Error = {error,_Reason} ->
+ Error
+ end;
+ Error ->
+ Error
+ end.
+
+close_disk_log(Name, _) ->
+ _ = ?disk_log_sync(Name),
+ _ = disk_log:lclose(Name),
+ ok.
+
+disk_log_write(Name, Bin, State) ->
+ Result =
+ case ?disk_log_blog(Name, Bin) of
+ ok ->
+ ok;
+ LogError ->
+ _ = case maps:get(prev_log_result, State) of
+ LogError ->
+ %% don't report same error twice
+ ok;
+ _ ->
+ LogOpts = maps:get(log_opts, State),
+ logger_h_common:error_notify({Name,log,
+ LogOpts,
+ LogError})
+ end,
+ LogError
+ end,
+ State#{prev_log_result => Result}.
+
+disk_log_sync(Name, State) ->
+ Result =
+ case ?disk_log_sync(Name) of
+ ok ->
+ ok;
+ SyncError ->
+ _ = case maps:get(prev_sync_result, State) of
+ SyncError ->
+ %% don't report same error twice
+ ok;
+ _ ->
+ LogOpts = maps:get(log_opts, State),
+ logger_h_common:error_notify({Name,sync,
+ LogOpts,
+ SyncError})
+ end,
+ SyncError
+ end,
+ State#{prev_sync_result => Result}.
+
+error_notify_new(Info,Info, _Term) ->
+ ok;
+error_notify_new(_Info0,_Info1, Term) ->
+ logger_h_common:error_notify(Term).
diff --git a/lib/kernel/src/logger_filters.erl b/lib/kernel/src/logger_filters.erl
new file mode 100644
index 0000000000..85928f0fd6
--- /dev/null
+++ b/lib/kernel/src/logger_filters.erl
@@ -0,0 +1,123 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_filters).
+
+-export([domain/2,
+ level/2,
+ progress/2,
+ remote_gl/2]).
+
+-include("logger_internal.hrl").
+-define(IS_ACTION(A), (A==log orelse A==stop)).
+
+-spec domain(Log,Extra) -> logger:filter_return() when
+ Log :: logger:log(),
+ Extra :: {Action,Compare,MatchDomain},
+ Action :: log | stop,
+ Compare :: prefix_of | starts_with | equals | no_domain,
+ MatchDomain :: list(atom()).
+domain(#{meta:=Meta}=Log,{Action,Compare,MatchDomain})
+ when ?IS_ACTION(Action) andalso
+ (Compare==prefix_of orelse
+ Compare==starts_with orelse
+ Compare==equals orelse
+ Compare==no_domain) andalso
+ is_list(MatchDomain) ->
+ filter_domain(Compare,Meta,MatchDomain,on_match(Action,Log));
+domain(Log,Extra) ->
+ erlang:error(badarg,[Log,Extra]).
+
+-spec level(Log,Extra) -> logger:filter_return() when
+ Log :: logger:log(),
+ Extra :: {Action,Operator,MatchLevel},
+ Action :: log | stop,
+ Operator :: neq | eq | lt | gt | lteq | gteq,
+ MatchLevel :: logger:level().
+level(#{level:=L1}=Log,{Action,Op,L2})
+ when ?IS_ACTION(Action) andalso
+ (Op==neq orelse
+ Op==eq orelse
+ Op==lt orelse
+ Op==gt orelse
+ Op==lteq orelse
+ Op==gteq) andalso
+ ?IS_LEVEL(L2) ->
+ filter_level(Op,L1,L2,on_match(Action,Log));
+level(Log,Extra) ->
+ erlang:error(badarg,[Log,Extra]).
+
+-spec progress(Log,Extra) -> logger:filter_return() when
+ Log :: logger:log(),
+ Extra :: log | stop.
+progress(Log,Action) when ?IS_ACTION(Action) ->
+ filter_progress(Log,on_match(Action,Log));
+progress(Log,Action) ->
+ erlang:error(badarg,[Log,Action]).
+
+-spec remote_gl(Log,Extra) -> logger:filter_return() when
+ Log :: logger:log(),
+ Extra :: log | stop.
+remote_gl(Log,Action) when ?IS_ACTION(Action) ->
+ filter_remote_gl(Log,on_match(Action,Log));
+remote_gl(Log,Action) ->
+ erlang:error(badarg,[Log,Action]).
+
+%%%-----------------------------------------------------------------
+%%% Internal
+filter_domain(prefix_of,#{domain:=Domain},MatchDomain,OnMatch) ->
+ is_prefix(Domain,MatchDomain,OnMatch);
+filter_domain(starts_with,#{domain:=Domain},MatchDomain,OnMatch) ->
+ is_prefix(MatchDomain,Domain,OnMatch);
+filter_domain(equals,#{domain:=Domain},Domain,OnMatch) ->
+ OnMatch;
+filter_domain(Action,Meta,_,OnMatch) ->
+ case maps:is_key(domain,Meta) of
+ false when Action==no_domain -> OnMatch;
+ _ -> ignore
+ end.
+
+is_prefix(D1,D2,OnMatch) when is_list(D1), is_list(D2) ->
+ case lists:prefix(D1,D2) of
+ true -> OnMatch;
+ false -> ignore
+ end;
+is_prefix(_,_,_) ->
+ ignore.
+
+filter_level(Op,L1,L2,OnMatch) ->
+ case logger:compare_levels(L1,L2) of
+ eq when Op==eq; Op==lteq; Op==gteq -> OnMatch;
+ lt when Op==lt; Op==lteq; Op==neq -> OnMatch;
+ gt when Op==gt; Op==gteq; Op==neq -> OnMatch;
+ _ -> ignore
+ end.
+
+filter_progress(#{msg:={report,#{label:={_,progress}}}},OnMatch) ->
+ OnMatch;
+filter_progress(_,_) ->
+ ignore.
+
+filter_remote_gl(#{meta:=#{gl:=GL}},OnMatch) when node(GL)=/=node() ->
+ OnMatch;
+filter_remote_gl(_,_) ->
+ ignore.
+
+on_match(log,Log) -> Log;
+on_match(stop,_) -> stop.
diff --git a/lib/kernel/src/logger_formatter.erl b/lib/kernel/src/logger_formatter.erl
new file mode 100644
index 0000000000..386e7832e2
--- /dev/null
+++ b/lib/kernel/src/logger_formatter.erl
@@ -0,0 +1,295 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_formatter).
+
+-export([format/2]).
+
+-include("logger_internal.hrl").
+
+%%%-----------------------------------------------------------------
+%%% Types
+-type template() :: [atom()|tuple()|string()].
+
+%%%-----------------------------------------------------------------
+%%% API
+-spec format(Log,Config) -> String when
+ Log :: logger:log(),
+ Config :: #{single_line=>boolean(),
+ legacy_header=>boolean(),
+ report_cb=>fun((logger:report()) -> {io:format(),[term()]}),
+ chars_limit=>pos_integer()| unlimited,
+ max_size=>pos_integer() | unlimited,
+ depth=>pos_integer() | unlimited,
+ template=>template(),
+ utc=>boolean()},
+ String :: string().
+format(#{level:=Level,msg:=Msg0,meta:=Meta},Config0)
+ when is_map(Config0) ->
+ Config = add_default_config(Config0),
+ Meta1 = maybe_add_legacy_header(Level,Meta,Config),
+ Template = maps:get(template,Config),
+ {BT,AT0} = lists:splitwith(fun(msg) -> false; (_) -> true end, Template),
+ {DoMsg,AT} =
+ case AT0 of
+ [msg|Rest] -> {true,Rest};
+ _ ->{false,AT0}
+ end,
+ B = do_format(Level,"",Meta1,BT,Config),
+ A = do_format(Level,"",Meta1,AT,Config),
+ MsgStr =
+ if DoMsg ->
+ Config1 =
+ case maps:get(chars_limit,Config) of
+ unlimited ->
+ Config;
+ Size0 ->
+ Size =
+ case Size0 - string:length([B,A]) of
+ S when S>=0 -> S;
+ _ -> 0
+ end,
+ Config#{chars_limit=>Size}
+ end,
+ MsgStr0 = format_msg(Msg0,Meta1,Config1),
+ case maps:get(single_line,Config) of
+ true ->
+ %% Trim leading and trailing whitespaces, and replace
+ %% newlines with ", "
+ re:replace(string:trim(MsgStr0),",?\r?\n\s*",", ",
+ [{return,list},global]);
+ _false ->
+ MsgStr0
+ end;
+ true ->
+ ""
+ end,
+ truncate(B ++ MsgStr ++ A,maps:get(max_size,Config)).
+
+do_format(Level,Msg,Data,[level|Format],Config) ->
+ [to_string(level,Level,Config)|do_format(Level,Msg,Data,Format,Config)];
+do_format(Level,Msg,Data,[msg|Format],Config) ->
+ [Msg|do_format(Level,Msg,Data,Format,Config)];
+do_format(Level,Msg,Data,[Key|Format],Config) when is_atom(Key); is_tuple(Key) ->
+ Value = value(Key,Data),
+ [to_string(Key,Value,Config)|do_format(Level,Msg,Data,Format,Config)];
+do_format(Level,Msg,Data,[Str|Format],Config) ->
+ [Str|do_format(Level,Msg,Data,Format,Config)];
+do_format(_Level,_Msg,_Data,[],_Config) ->
+ [].
+
+value(Key,Meta) when is_atom(Key), is_map(Meta) ->
+ maps:get(Key,Meta,"");
+value(Key,_) when is_atom(Key) ->
+ "";
+value(Keys,Meta) when is_tuple(Keys) ->
+ value(tuple_to_list(Keys),Meta);
+value([Key|Keys],Meta) ->
+ value(Keys,value(Key,Meta));
+value([],Value) ->
+ Value.
+
+to_string(time,Time,Config) ->
+ format_time(Time,Config);
+to_string(mfa,MFA,_Config) ->
+ format_mfa(MFA);
+to_string(_,Value,_Config) ->
+ to_string(Value).
+
+to_string(X) when is_atom(X) ->
+ atom_to_list(X);
+to_string(X) when is_integer(X) ->
+ integer_to_list(X);
+to_string(X) when is_pid(X) ->
+ pid_to_list(X);
+to_string(X) when is_reference(X) ->
+ ref_to_list(X);
+to_string(X) when is_list(X) ->
+ case io_lib:printable_unicode_list(lists:flatten(X)) of
+ true -> X;
+ _ -> io_lib:format("~tp",[X])
+ end;
+to_string(X) ->
+ io_lib:format("~tp",[X]).
+
+format_msg({string,Chardata},Meta,Config) ->
+ try unicode:characters_to_list(Chardata)
+ catch _:_ -> format_msg({"INVALID STRING: ~tp",[Chardata]},Meta,Config)
+ end;
+format_msg({report,_}=Msg,Meta,#{report_cb:=Fun}=Config) when is_function(Fun,1) ->
+ format_msg(Msg,Meta#{report_cb=>Fun},maps:remove(report_cb,Config));
+format_msg({report,Report},#{report_cb:=Fun}=Meta,Config) when is_function(Fun,1) ->
+ try Fun(Report) of
+ {Format,Args} when is_list(Format), is_list(Args) ->
+ format_msg({Format,Args},maps:remove(report_cb,Meta),Config);
+ Other ->
+ format_msg({"REPORT_CB ERROR: ~tp; Returned: ~tp",
+ [Report,Other]},Meta,Config)
+ catch C:R ->
+ format_msg({"REPORT_CB CRASH: ~tp; Reason: ~tp",
+ [Report,{C,R}]},Meta,Config)
+ end;
+format_msg({report,Report},Meta,Config) ->
+ format_msg({report,Report},
+ Meta#{report_cb=>fun logger:format_report/1},
+ Config);
+format_msg(Msg,_Meta,#{depth:=Depth,chars_limit:=CharsLimit}) ->
+ limit_size(Msg, Depth, CharsLimit).
+
+limit_size(Msg,Depth,unlimited) ->
+ limit_size(Msg,Depth,[]);
+limit_size(Msg,Depth,CharsLimit) when is_integer(CharsLimit) ->
+ limit_size(Msg,Depth,[{chars_limit,CharsLimit}]);
+limit_size({Format,Args},unlimited,Opts) when is_list(Opts) ->
+ try io_lib:format(Format,Args,Opts)
+ catch _:_ ->
+ io_lib:format("FORMAT ERROR: ~tp - ~tp",[Format,Args],Opts)
+ end;
+limit_size({Format0,Args},Depth,Opts) when is_integer(Depth) ->
+ try
+ Format1 = io_lib:scan_format(Format0, Args),
+ Format = limit_format(Format1, Depth),
+ io_lib:build_text(Format,Opts)
+ catch _:_ ->
+ limit_size({"FORMAT ERROR: ~tp - ~tp",[Format0,Args]},Depth,Opts)
+ end.
+
+limit_format([#{control_char:=C0}=M0|T], Depth) when C0 =:= $p;
+ C0 =:= $w ->
+ C = C0 - ($a - $A), %To uppercase.
+ #{args:=Args} = M0,
+ M = M0#{control_char:=C,args:=Args++[Depth]},
+ [M|limit_format(T, Depth)];
+limit_format([H|T], Depth) ->
+ [H|limit_format(T, Depth)];
+limit_format([], _) ->
+ [].
+
+truncate(String,unlimited) ->
+ String;
+truncate(String,Size) ->
+ Length = string:length(String),
+ if Length>Size ->
+ case lists:reverse(lists:flatten(String)) of
+ [$\n|_] ->
+ string:slice(String,0,Size-4)++"...\n";
+ _ ->
+ string:slice(String,0,Size-3)++"..."
+ end;
+ true ->
+ String
+ end.
+
+format_time(Timestamp,Config) when is_integer(Timestamp) ->
+ {Date,Time,Micro} = timestamp_to_datetimemicro(Timestamp,Config),
+ format_time(Date,Time,Micro);
+format_time(Other,_Config) ->
+ %% E.g. a string
+ to_string(Other).
+
+format_time({Y,M,D},{H,Min,S},Micro) ->
+ io_lib:format("~4w-~2..0w-~2..0w ~2w:~2..0w:~2..0w.~6..0w",
+ [Y,M,D,H,Min,S,Micro]).
+
+%% Assuming this is monotonic time in microseconds
+timestamp_to_datetimemicro(Timestamp,Config) when is_integer(Timestamp) ->
+ SysTime = Timestamp + erlang:time_offset(microsecond),
+ Micro = SysTime rem 1000000,
+ Sec = SysTime div 1000000,
+ UniversalTime = erlang:posixtime_to_universaltime(Sec),
+ {Date,Time} =
+ case Config of
+ #{utc:=true} -> UniversalTime;
+ _ -> erlang:universaltime_to_localtime(UniversalTime)
+ end,
+ {Date,Time,Micro}.
+
+format_mfa({M,F,A}) when is_atom(M), is_atom(F), is_integer(A) ->
+ atom_to_list(M)++":"++atom_to_list(F)++"/"++integer_to_list(A);
+format_mfa({M,F,A}) when is_atom(M), is_atom(F), is_list(A) ->
+ format_mfa({M,F,length(A)});
+format_mfa(MFA) ->
+ to_string(MFA).
+
+maybe_add_legacy_header(Level,
+ #{time:=Timestamp}=Meta,
+ #{legacy_header:=true}=Config) ->
+ #{title:=Title}=MyMeta = add_legacy_title(Level,maps:get(?MODULE,Meta,#{})),
+ {{Y,Mo,D},{H,Mi,S},Micro} = timestamp_to_datetimemicro(Timestamp,Config),
+ Header = io_lib:format("=~ts==== ~w-~s-~4w::~2..0w:~2..0w:~2..0w.~6..0w ~s===",
+ [Title,D,month(Mo),Y,H,Mi,S,Micro,utcstr(Config)]),
+ Meta#{?MODULE=>MyMeta#{header=>Header}};
+maybe_add_legacy_header(_,Meta,_) ->
+ Meta.
+
+add_legacy_title(_Level,#{title:=_}=MyMeta) ->
+ MyMeta;
+add_legacy_title(Level,MyMeta) ->
+ Title = string:uppercase(atom_to_list(Level)) ++ " REPORT",
+ MyMeta#{title=>Title}.
+
+month(1) -> "Jan";
+month(2) -> "Feb";
+month(3) -> "Mar";
+month(4) -> "Apr";
+month(5) -> "May";
+month(6) -> "Jun";
+month(7) -> "Jul";
+month(8) -> "Aug";
+month(9) -> "Sep";
+month(10) -> "Oct";
+month(11) -> "Nov";
+month(12) -> "Dec".
+
+utcstr(#{utc:=true}) -> "UTC ";
+utcstr(_) -> "".
+
+add_default_config(#{utc:=_}=Config0) ->
+ Default =
+ #{legacy_header=>false,
+ single_line=>false,
+ chars_limit=>unlimited},
+ MaxSize = get_max_size(maps:get(max_size,Config0,false)),
+ Depth = get_depth(maps:get(depth,Config0,false)),
+ add_default_template(maps:merge(Default,Config0#{max_size=>MaxSize,
+ depth=>Depth}));
+add_default_config(Config) ->
+ add_default_config(Config#{utc=>logger:get_utc_config()}).
+
+add_default_template(#{template:=_}=Config) ->
+ Config;
+add_default_template(Config) ->
+ Config#{template=>default_template(Config)}.
+
+default_template(#{legacy_header:=true}) ->
+ ?DEFAULT_FORMAT_TEMPLATE_HEADER;
+default_template(#{single_line:=true}) ->
+ ?DEFAULT_FORMAT_TEMPLATE_SINGLE;
+default_template(_) ->
+ ?DEFAULT_FORMAT_TEMPLATE.
+
+get_max_size(false) ->
+ logger:get_max_size();
+get_max_size(S) ->
+ max(10,S).
+
+get_depth(false) ->
+ logger:get_format_depth();
+get_depth(S) ->
+ max(5,S).
diff --git a/lib/kernel/src/logger_h_common.erl b/lib/kernel/src/logger_h_common.erl
new file mode 100644
index 0000000000..7caad366ae
--- /dev/null
+++ b/lib/kernel/src/logger_h_common.erl
@@ -0,0 +1,301 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_h_common).
+
+-include("logger_h_common.hrl").
+-include("logger_internal.hrl").
+
+-export([log_to_binary/2,
+ check_common_config/1,
+ call_cast_or_drop/2,
+ check_load/1,
+ limit_burst/1,
+ kill_if_choked/4,
+ flush_log_requests/0,
+ flush_log_requests/1,
+ handler_exit/2,
+ cancel_timer/1,
+ stop_or_restart/3,
+ overload_levels_ok/1,
+ error_notify/1,
+ info_notify/1]).
+
+%%%-----------------------------------------------------------------
+%%% Covert log data on any form to binary
+-spec log_to_binary(Log,Config) -> LogString when
+ Log :: logger:log(),
+ Config :: logger:config(),
+ LogString :: binary().
+log_to_binary(#{msg:={report,_},meta:=#{report_cb:=_}}=Log,Config) ->
+ do_log_to_binary(Log,Config);
+log_to_binary(#{msg:={report,_},meta:=Meta}=Log,Config) ->
+ DefaultReportCb = fun logger:format_otp_report/1,
+ do_log_to_binary(Log#{meta=>Meta#{report_cb=>DefaultReportCb}},Config);
+log_to_binary(Log,Config) ->
+ do_log_to_binary(Log,Config).
+
+do_log_to_binary(Log,Config) ->
+ {Formatter,FormatterConfig} = maps:get(formatter,Config),
+ String = try_format(Log,Formatter,FormatterConfig),
+ try unicode:characters_to_binary(String)
+ catch _:_ ->
+ ?LOG_INTERNAL(debug,[{formatter_error,Formatter},
+ {config,FormatterConfig},
+ {log,Log},
+ {bad_return_value,String}]),
+ <<"FORMATTER ERROR: bad_return_value">>
+ end.
+
+try_format(Log,Formatter,FormatterConfig) ->
+ try Formatter:format(Log,FormatterConfig)
+ catch
+ C:R:S ->
+ ?LOG_INTERNAL(debug,[{formatter_crashed,Formatter},
+ {config,FormatterConfig},
+ {log,Log},
+ {reason,
+ {C,R,logger:filter_stacktrace(?MODULE,S)}}]),
+ case {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG} of
+ {Formatter,FormatterConfig} ->
+ "DEFAULT FORMATTER CRASHED";
+ {DefaultFormatter,DefaultConfig} ->
+ try_format(Log#{msg=>{"FORMATTER CRASH: ~tp",
+ [maps:get(msg,Log)]}},
+ DefaultFormatter,DefaultConfig)
+ end
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Check that the configuration term is valid
+check_common_config({toggle_sync_qlen,N}) when is_integer(N) ->
+ valid;
+check_common_config({drop_new_reqs_qlen,N}) when is_integer(N) ->
+ valid;
+check_common_config({flush_reqs_qlen,N}) when is_integer(N) ->
+ valid;
+check_common_config({enable_burst_limit,Bool}) when Bool == true;
+ Bool == false ->
+ valid;
+check_common_config({burst_limit_size,N}) when is_integer(N) ->
+ valid;
+check_common_config({burst_window_time,N}) when is_integer(N) ->
+ valid;
+check_common_config({enable_kill_overloaded,Bool}) when Bool == true;
+ Bool == false ->
+ valid;
+check_common_config({handler_overloaded_qlen,N}) when is_integer(N) ->
+ valid;
+check_common_config({handler_overloaded_mem,N}) when is_integer(N) ->
+ valid;
+check_common_config({handler_restart_after,NorA}) when is_integer(NorA);
+ NorA == never ->
+ valid;
+check_common_config({filesync_repeat_interval,NorA}) when is_integer(NorA);
+ NorA == no_repeat ->
+ valid;
+check_common_config(_) ->
+ invalid.
+
+
+%%%-----------------------------------------------------------------
+%%% Overload Protection
+call_cast_or_drop(Name, Bin) ->
+ %% If the handler process is getting overloaded, the log request
+ %% will be synchronous instead of asynchronous (slows down the
+ %% logging tempo of a process doing lots of logging. If the
+ %% handler is choked, drop mode is set and no request will be sent.
+ try ?get_mode(Name) of
+ async ->
+ gen_server:cast(Name, {log,Bin});
+ sync ->
+ try gen_server:call(Name, {log,Bin}, ?DEFAULT_CALL_TIMEOUT) of
+ %% if return value from call == dropped, the
+ %% message has been flushed by handler and should
+ %% therefore not be counted as dropped in stats
+ ok -> ok;
+ dropped -> ok
+ catch
+ _:{timeout,_} ->
+ ?observe(Name,{dropped,1})
+ end;
+ drop -> ?observe(Name,{dropped,1})
+ catch
+ %% if the ETS table doesn't exist (maybe because of a
+ %% handler restart), we can only drop the request
+ _:_ -> ?observe(Name,{dropped,1})
+ end,
+ ok.
+
+handler_exit(_Name, Reason) ->
+ exit(Reason).
+
+check_load(State = #{id:=Name, mode := Mode,
+ toggle_sync_qlen := ToggleSyncQLen,
+ drop_new_reqs_qlen := DropNewQLen,
+ flush_reqs_qlen := FlushQLen}) ->
+ {_,Mem} = process_info(self(), memory),
+ ?observe(Name,{max_mem,Mem}),
+ %% make sure the handler process doesn't get scheduled
+ %% out between the message_queue_len check below and the
+ %% action that follows (flush or write).
+ {_,QLen} = process_info(self(), message_queue_len),
+ ?observe(Name,{max_qlen,QLen}),
+
+ {Mode1,_NewDrops,_NewFlushes} =
+ if
+ QLen >= FlushQLen ->
+ {flush, 0,1};
+ QLen >= DropNewQLen ->
+ %% Note that drop mode will force log requests to
+ %% be dropped on the client side (never sent get to
+ %% the handler).
+ IncDrops = if Mode == drop -> 0; true -> 1 end,
+ {?change_mode(Name, Mode, drop), IncDrops,0};
+ QLen >= ToggleSyncQLen ->
+ {?change_mode(Name, Mode, sync), 0,0};
+ true ->
+ {?change_mode(Name, Mode, async), 0,0}
+ end,
+ State1 = ?update_other(drops,DROPS,_NewDrops,State),
+ {Mode1, QLen, Mem,
+ ?update_other(flushes,FLUSHES,_NewFlushes,
+ State1#{last_qlen => QLen})}.
+
+limit_burst(#{enable_burst_limit := false}) ->
+ {true,0,0};
+limit_burst(#{burst_win_ts := BurstWinT0,
+ burst_msg_count := BurstMsgCount,
+ burst_window_time := BurstWinTime,
+ burst_limit_size := BurstLimitSz}) ->
+ if (BurstMsgCount >= BurstLimitSz) ->
+ %% the limit for allowed messages has been reached
+ BurstWinT1 = ?timestamp(),
+ case ?diff_time(BurstWinT1,BurstWinT0) of
+ BurstCheckTime when BurstCheckTime < (BurstWinTime*1000) ->
+ %% we're still within the burst time frame
+ {false,BurstWinT0,BurstMsgCount};
+ _BurstCheckTime ->
+ %% burst time frame passed, reset counters
+ {true,BurstWinT1,0}
+ end;
+ true ->
+ %% the limit for allowed messages not yet reached
+ {true,BurstWinT0,BurstMsgCount+1}
+ end.
+
+kill_if_choked(Name, QLen, Mem,
+ #{enable_kill_overloaded := KillIfOL,
+ handler_overloaded_qlen := HOLQLen,
+ handler_overloaded_mem := HOLMem}) ->
+ if KillIfOL andalso
+ ((QLen > HOLQLen) orelse (Mem > HOLMem)) ->
+ handler_exit(Name, {shutdown,{overloaded,Name,QLen,Mem}});
+ true ->
+ ok
+ end.
+
+flush_log_requests() ->
+ flush_log_requests(-1).
+
+flush_log_requests(Limit) ->
+ process_flag(priority, high),
+ Flushed = flush_log_requests(0, Limit),
+ process_flag(priority, normal),
+ Flushed.
+
+flush_log_requests(Limit, Limit) ->
+ Limit;
+flush_log_requests(N, Limit) ->
+ %% flush log requests but leave other requests, such as
+ %% file/disk_log_sync, info and change_config, so that these
+ %% have a chance to be processed even under heavy load
+ receive
+ {'$gen_cast',{log,_}} ->
+ flush_log_requests(N+1, Limit);
+ {'$gen_call',{Pid,MRef},{log,_}} ->
+ Pid ! {MRef, dropped},
+ flush_log_requests(N+1, Limit)
+ after
+ 0 -> N
+ end.
+
+cancel_timer(TRef) when is_atom(TRef) -> ok;
+cancel_timer(TRef) -> timer:cancel(TRef).
+
+
+stop_or_restart(Name, {shutdown,Reason={overloaded,_Name,_QLen,_Mem}},
+ #{handler_restart_after := RestartAfter}) ->
+ %% If we're terminating because of an overload situation (see
+ %% logger_h_common:kill_if_choked/4), we need to remove the handler
+ %% and set a restart timer. A separate process must perform this
+ %% in order to avoid deadlock.
+ HandlerPid = self(),
+ RemoveAndRestart =
+ fun() ->
+ MRef = erlang:monitor(process, HandlerPid),
+ receive
+ {'DOWN',MRef,_,_,_} ->
+ ok
+ after 30000 ->
+ error_notify(Reason),
+ exit(HandlerPid, kill)
+ end,
+ case logger:get_handler_config(Name) of
+ {ok,{HMod,HConfig}} when is_integer(RestartAfter) ->
+ _ = logger:remove_handler(Name),
+ _ = timer:apply_after(RestartAfter, logger, add_handler,
+ [Name,HMod,HConfig]);
+ {ok,_} ->
+ _ = logger:remove_handler(Name);
+ {error,CfgReason} when is_integer(RestartAfter) ->
+ error_notify({Name,restart_impossible,CfgReason});
+ {error,_} ->
+ ok
+ end
+ end,
+ spawn(RemoveAndRestart),
+ ok;
+
+stop_or_restart(Name, shutdown, _State) ->
+ %% Probably terminated by supervisor. Remove the handler to avoid
+ %% error printouts due to failing handler.
+ _ = case logger:get_handler_config(Name) of
+ {ok,_} ->
+ %% Spawning to avoid deadlock
+ spawn(logger,remove_handler,[Name]);
+ _ ->
+ ok
+ end,
+ ok;
+
+stop_or_restart(_Name, _Reason, _State) ->
+ ok.
+
+overload_levels_ok(HandlerConfig) ->
+ TSQL = maps:get(toggle_sync_qlen, HandlerConfig, ?TOGGLE_SYNC_QLEN),
+ DNRQL = maps:get(drop_new_reqs_qlen, HandlerConfig, ?DROP_NEW_REQS_QLEN),
+ FRQL = maps:get(flush_reqs_qlen, HandlerConfig, ?FLUSH_REQS_QLEN),
+ (TSQL < DNRQL) andalso (DNRQL < FRQL).
+
+error_notify(Term) ->
+ ?internal_log(error, Term).
+
+info_notify(Term) ->
+ ?internal_log(info, Term).
diff --git a/lib/kernel/src/logger_h_common.hrl b/lib/kernel/src/logger_h_common.hrl
new file mode 100644
index 0000000000..89378dbb10
--- /dev/null
+++ b/lib/kernel/src/logger_h_common.hrl
@@ -0,0 +1,262 @@
+
+%%%-----------------------------------------------------------------
+%%% Overload protection configuration
+
+%%! *** NOTE ***
+%%! It's important that:
+%%! TOGGLE_SYNC_QLEN < DROP_NEW_REQS_QLEN < FLUSH_REQS_QLEN
+%%! and that DROP_NEW_REQS_QLEN >= 2.
+%%! Otherwise the handler could end up in drop mode with no new
+%%! log requests to process. This would cause all future requests
+%%! to be dropped (no switch to async mode would ever take place).
+
+%% This specifies the message_queue_len value where the log
+%% requests switch from asynchronous casts to synchronous calls.
+-define(TOGGLE_SYNC_QLEN, 10).
+%% Above this message_queue_len, log requests will be dropped,
+%% i.e. no log requests get sent to the handler process.
+-define(DROP_NEW_REQS_QLEN, 200).
+%% Above this message_queue_len, the handler process will flush
+%% its mailbox and only leave this number of messages in it.
+-define(FLUSH_REQS_QLEN, 1000).
+
+%% Never flush more than this number of messages in one go,
+%% or the handler will be unresponsive for seconds (keep this
+%% number as large as possible or the mailbox could grow large).
+-define(FLUSH_MAX_N, 5000).
+
+%% BURST_LIMIT is the max number of log requests allowed to be
+%% written within a BURST_WINDOW_TIME time frame.
+-define(ENABLE_BURST_LIMIT, true).
+-define(BURST_LIMIT_SIZE, 500).
+-define(BURST_WINDOW_TIME, 1000).
+
+%% This enables/disables the feature to automatically get the
+%% handler terminated if it gets too loaded (and can't keep up).
+-define(ENABLE_KILL_OVERLOADED, false).
+%% If the message_queue_len goes above this size even after
+%% flushing has been performed, the handler is terminated.
+-define(HANDLER_OVERLOADED_QLEN, 20000).
+%% If the memory usage exceeds this level
+-define(HANDLER_OVERLOADED_MEM, 3000000).
+
+%% This is the default time that the handler will wait before
+%% restarting and accepting new requests. The value 'never'
+%% disables restarts.
+-define(HANDLER_RESTART_AFTER, 5000).
+%%-define(HANDLER_RESTART_AFTER, never).
+
+%% The handler sends asynchronous write requests to the process
+%% controlling the i/o device, but every once in this interval
+%% will the write request be synchronous, so that the i/o device
+%% process doesn't get overloaded. This gives the handler time
+%% to keep up with its mailbox in overload situations, even if
+%% the i/o is slow.
+-define(CONTROLLER_SYNC_INTERVAL, 20).
+%% The handler will not perform a file sync operation if the
+%% mailbox size is greater than this number. This is to ensure
+%% the handler process doesn't get overloaded while waiting for
+%% an expensive file sync operation to finish.
+-define(FILESYNC_OK_QLEN, 2).
+%% Do a file/disk_log sync operation every integer() millisec
+%% (if necessary) or set to 'no_repeat' to only do file sync when
+%% the handler is idle. Note that file sync is not guaranteed to
+%% happen automatically if this operation is disabled.
+-define(FILESYNC_REPEAT_INTERVAL, 5000).
+%%-define(FILESYNC_REPEAT_INTERVAL, no_repeat).
+
+%% This is the time after last message received that we think/hope
+%% that the handler has an empty mailbox (no new log request has
+%% come in).
+-define(IDLE_DETECT_TIME_MSEC, 100).
+-define(IDLE_DETECT_TIME_USEC, 100000).
+
+%% Default disk log option values
+-define(DISK_LOG_TYPE, wrap).
+-define(DISK_LOG_MAX_NO_FILES, 10).
+-define(DISK_LOG_MAX_NO_BYTES, 1048576).
+
+%%%-----------------------------------------------------------------
+%%% Overload protection macros
+
+-define(timestamp(), erlang:monotonic_time(microsecond)).
+
+-define(get_mode(HandlerName),
+ case ets:lookup(HandlerName, mode) of
+ [{mode,sync}] ->
+ case whereis(HandlerName)==self() of
+ true -> async;
+ _ -> sync
+ end;
+ [{mode,M}] -> M;
+ _ -> async
+ end).
+
+-define(set_mode(HandlerName, M),
+ begin ets:insert(HandlerName, {mode,M}), M end).
+
+-define(change_mode(HandlerName, M0, M1),
+ if M0 == M1 ->
+ M0;
+ true ->
+ ets:insert(HandlerName, {mode,M1}),
+ M1
+ end).
+
+-define(min(X1, X2),
+ if X2 == undefined -> X1;
+ X2 < X1 -> X2;
+ true -> X1
+ end).
+
+-define(max(X1, X2),
+ if
+ X2 == undefined -> X1;
+ X2 > X1 -> X2;
+ true -> X1
+ end).
+
+-define(diff_time(OS_T1, OS_T0), OS_T1-OS_T0).
+
+%%%-----------------------------------------------------------------
+%%% The test hook macros make it possible to observe and manipulate
+%%% internal handler functionality. When enabled, these macros will
+%%% slow down execution and therefore should not be include in code
+%%% to be officially released.
+
+%% -define(TEST_HOOKS, true).
+-ifdef(TEST_HOOKS).
+ -define(TEST_HOOKS_TAB, logger_h_test_hooks).
+
+ -define(init_test_hooks(),
+ _ = case ets:whereis(?TEST_HOOKS_TAB) of
+ undefined -> ets:new(?TEST_HOOKS_TAB, [named_table,public]);
+ _ -> ok
+ end,
+ ets:insert(?TEST_HOOKS_TAB, {internal_log,{logger,internal_log}}),
+ ets:insert(?TEST_HOOKS_TAB, {file_write,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {file_datasync,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_blog,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_sync,ok})).
+
+ -define(set_internal_log(MOD_FUNC),
+ ets:insert(?TEST_HOOKS_TAB, {internal_log,MOD_FUNC})).
+
+ -define(set_result(OPERATION, RESULT),
+ ets:insert(?TEST_HOOKS_TAB, {OPERATION,RESULT})).
+
+ -define(set_defaults(),
+ ets:insert(?TEST_HOOKS_TAB, {internal_log,{logger,internal_log}}),
+ ets:insert(?TEST_HOOKS_TAB, {file_write,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {file_datasync,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_blog,ok}),
+ ets:insert(?TEST_HOOKS_TAB, {disk_log_sync,ok})).
+
+ -define(internal_log(TYPE, TERM),
+ try ets:lookup(?TEST_HOOKS_TAB, internal_log) of
+ [{_,{LMOD,LFUNC}}] -> apply(LMOD, LFUNC, [TYPE,TERM]);
+ _ -> logger:internal_log(TYPE, TERM)
+ catch _:_ -> logger:internal_log(TYPE, TERM) end).
+
+ -define(file_write(DEVICE, DATA),
+ try ets:lookup(?TEST_HOOKS_TAB, file_write) of
+ [{_,ok}] -> file:write(DEVICE, DATA);
+ [{_,ERROR}] -> ERROR
+ catch _:_ -> file:write(DEVICE, DATA) end).
+
+ -define(file_datasync(DEVICE),
+ try ets:lookup(?TEST_HOOKS_TAB, file_datasync) of
+ [{_,ok}] -> file:datasync(DEVICE);
+ [{_,ERROR}] -> ERROR
+ catch _:_ -> file:datasync(DEVICE) end).
+
+ -define(disk_log_blog(LOG, DATA),
+ try ets:lookup(?TEST_HOOKS_TAB, disk_log_blog) of
+ [{_,ok}] -> disk_log:blog(LOG, DATA);
+ [{_,ERROR}] -> ERROR
+ catch _:_ -> disk_log:blog(LOG, DATA) end).
+
+ -define(disk_log_sync(LOG),
+ try ets:lookup(?TEST_HOOKS_TAB, disk_log_sync) of
+ [{_,ok}] -> disk_log:sync(LOG);
+ [{_,ERROR}] -> ERROR
+ catch _:_ -> disk_log:sync(LOG) end).
+
+ -define(DEFAULT_CALL_TIMEOUT, 5000).
+
+-else. % DEFAULTS!
+ -define(TEST_HOOKS_TAB, undefined).
+ -define(init_test_hooks(), ok).
+ -define(set_internal_log(_MOD_FUNC), ok).
+ -define(set_result(_OPERATION, _RESULT), ok).
+ -define(set_defaults(), ok).
+ -define(internal_log(TYPE, TERM), logger:internal_log(TYPE, TERM)).
+ -define(file_write(DEVICE, DATA), file:write(DEVICE, DATA)).
+ -define(file_datasync(DEVICE), file:datasync(DEVICE)).
+ -define(disk_log_blog(LOG, DATA), disk_log:blog(LOG, DATA)).
+ -define(disk_log_sync(LOG), disk_log:sync(LOG)).
+ -define(DEFAULT_CALL_TIMEOUT, 10000).
+-endif.
+
+%%%-----------------------------------------------------------------
+%%% These macros enable statistics counters in the state of the
+%%% handler which is useful for analysing the overload protection
+%%% behaviour. These counters should not be included in code to be
+%%% officially released (as some counters will grow very large
+%%% over time).
+
+%%-define(SAVE_STATS, true).
+-ifdef(SAVE_STATS).
+ -define(merge_with_stats(STATE),
+ STATE#{flushes => 0, flushed => 0, drops => 0,
+ casts => 0, calls => 0,
+ max_qlen => 0, max_time => 0}).
+
+ -define(update_max_qlen(QLEN, STATE),
+ begin #{max_qlen := QLEN0} = STATE,
+ STATE#{max_qlen => ?max(QLEN0,QLEN)} end).
+
+ -define(update_calls_or_casts(CALL_OR_CAST, INC, STATE),
+ case CALL_OR_CAST of
+ cast ->
+ #{casts := CASTS0} = STATE,
+ STATE#{casts => CASTS0+INC};
+ call ->
+ #{calls := CALLS0} = STATE,
+ STATE#{calls => CALLS0+INC}
+ end).
+
+ -define(update_max_time(TIME, STATE),
+ begin #{max_time := TIME0} = STATE,
+ STATE#{max_time => ?max(TIME0,TIME)} end).
+
+ -define(update_other(OTHER, VAR, INCVAL, STATE),
+ begin #{OTHER := VAR} = STATE,
+ STATE#{OTHER => VAR+INCVAL} end).
+
+-else. % DEFAULT!
+ -define(merge_with_stats(STATE), STATE).
+ -define(update_max_qlen(_QLEN, STATE), STATE).
+ -define(update_calls_or_casts(_CALL_OR_CAST, _INC, STATE), STATE).
+ -define(update_max_time(_TIME, STATE), STATE).
+ -define(update_other(_OTHER, _VAR, _INCVAL, STATE), STATE).
+-endif.
+
+%%%-----------------------------------------------------------------
+%%% These macros enable callbacks that make it possible to analyse
+%%% the overload protection behaviour from outside the handler
+%%% process (including dropped requests on the client side).
+%%% An external callback module (?OBSERVER_MOD) is required which
+%%% is not part of the kernel application. For this reason, these
+%%% callbacks should not be included in code to be officially released.
+
+%%-define(OBSERVER_MOD, logger_test).
+-ifdef(OBSERVER_MOD).
+ -define(start_observation(NAME), ?OBSERVER:start_observation(NAME)).
+ -define(observe(NAME,EVENT), ?OBSERVER:observe(NAME,EVENT)).
+
+-else. % DEFAULT!
+ -define(start_observation(_NAME), ok).
+ -define(observe(_NAME,_EVENT), ok).
+-endif.
+%%! <---
diff --git a/lib/kernel/src/logger_internal.hrl b/lib/kernel/src/logger_internal.hrl
new file mode 100644
index 0000000000..82df499c2b
--- /dev/null
+++ b/lib/kernel/src/logger_internal.hrl
@@ -0,0 +1,98 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-include_lib("kernel/include/logger.hrl").
+-define(LOGGER_TABLE,logger).
+-define(LOGGER_KEY,'$logger_config$').
+-define(HANDLER_KEY,'$handler_config$').
+-define(LOGGER_META_KEY,'$logger_metadata$').
+-define(STANDARD_HANDLER, logger_std_h).
+-define(DEFAULT_HANDLER_FILTERS,
+ ?DEFAULT_HANDLER_FILTERS([beam,erlang,otp])).
+-define(DEFAULT_HANDLER_FILTERS(Domain),
+ [{remote_gl,{fun logger_filters:remote_gl/2,stop}},
+ {domain,{fun logger_filters:domain/2,{log,prefix_of,Domain}}},
+ {no_domain,{fun logger_filters:domain/2,{log,no_domain,[]}}}]).
+-define(DEFAULT_FORMATTER,logger_formatter).
+-define(DEFAULT_FORMAT_CONFIG,#{legacy_header=>true,
+ template=>?DEFAULT_FORMAT_TEMPLATE_HEADER}).
+-define(DEFAULT_FORMAT_TEMPLATE_HEADER,
+ [{logger_formatter,header},"\n",msg,"\n"]).
+-define(DEFAULT_FORMAT_TEMPLATE_SINGLE,
+ [time," ",level,": ",msg,"\n"]).
+-define(DEFAULT_FORMAT_TEMPLATE,
+ [time," ",level,":\n",msg,"\n"]).
+
+-define(DEFAULT_LOGGER_CALL_TIMEOUT, infinity).
+
+-define(LOG_INTERNAL(Level,Report),
+ case logger:allow(Level,?MODULE) of
+ true ->
+ %% Spawn this to avoid deadlocks
+ _ = spawn(logger,macro_log,[?LOCATION,Level,Report,
+ logger:add_default_metadata(#{})]),
+ ok;
+ false ->
+ ok
+ end).
+
+%%%-----------------------------------------------------------------
+%%% Levels
+%%% Using same as syslog
+-define(LEVELS,[emergency,
+ alert,
+ critical,
+ error,
+ warning,
+ notice,
+ info,
+ debug]).
+-define(EMERGENCY,0).
+-define(ALERT,1).
+-define(CRITICAL,2).
+-define(ERROR,3).
+-define(WARNING,4).
+-define(NOTICE,5).
+-define(INFO,6).
+-define(DEBUG,7).
+
+-define(IS_LEVEL(L),
+ (L=:=emergency orelse
+ L=:=alert orelse
+ L=:=critical orelse
+ L=:=error orelse
+ L=:=warning orelse
+ L=:=notice orelse
+ L=:=info orelse
+ L=:=debug)).
+
+-define(IS_MSG(Msg),
+ ((is_tuple(Msg) andalso tuple_size(Msg)==2)
+ andalso
+ (is_list(element(1,Msg)) andalso is_list(element(2,Msg)))
+ orelse
+ (element(1,Msg)==report andalso ?IS_REPORT(element(2,Msg)))
+ orelse
+ (element(1,Msg)==string andalso ?IS_STRING(element(2,Msg))))).
+
+-define(IS_REPORT(Report),
+ (is_map(Report) orelse (is_list(Report) andalso is_tuple(hd(Report))))).
+
+-define(IS_STRING(String),
+ (is_list(String) orelse is_binary(String))).
diff --git a/lib/kernel/src/logger_server.erl b/lib/kernel/src/logger_server.erl
new file mode 100644
index 0000000000..6ef3b8582a
--- /dev/null
+++ b/lib/kernel/src/logger_server.erl
@@ -0,0 +1,440 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_server).
+
+-behaviour(gen_server).
+
+%% API
+-export([start_link/0,
+ add_handler/3, remove_handler/1,
+ add_filter/2, remove_filter/2,
+ set_module_level/2, reset_module_level/1,
+ cache_module_level/1,
+ set_config/2, set_config/3]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2]).
+
+-include("logger_internal.hrl").
+
+-define(SERVER, logger).
+
+-record(state, {tid}).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+start_link() ->
+ gen_server:start_link({local, ?SERVER}, ?MODULE, [], []).
+
+add_handler(Id,Module,Config0) ->
+ case sanity_check(logger,handlers,[Id]) of
+ ok ->
+ try check_mod(Module) of
+ ok ->
+ case sanity_check(Id,Config0) of
+ ok ->
+ Default = default_config(Id),
+ Config = maps:merge(Default,Config0),
+ call({add_handler,Id,Module,Config});
+ Error ->
+ Error
+ end
+ catch throw:Error ->
+ {error,Error}
+ end;
+ Error ->
+ Error
+ end.
+
+remove_handler(HandlerId) ->
+ call({remove_handler,HandlerId}).
+
+add_filter(Owner,Filter) ->
+ case sanity_check(Owner,filters,[Filter]) of
+ ok -> call({add_filter,Owner,Filter});
+ Error -> Error
+ end.
+
+remove_filter(Owner,FilterId) ->
+ call({remove_filter,Owner,FilterId}).
+
+set_module_level(Module,Level) when is_atom(Module) ->
+ case sanity_check(logger,level,Level) of
+ ok -> call({set_module_level,Module,Level});
+ Error -> Error
+ end;
+set_module_level(Module,_) ->
+ {error,{not_a_module,Module}}.
+
+reset_module_level(Module) when is_atom(Module) ->
+ call({reset_module_level,Module});
+reset_module_level(Module) ->
+ {error,{not_a_module,Module}}.
+
+cache_module_level(Module) ->
+ gen_server:cast(?SERVER,{cache_module_level,Module}).
+
+
+set_config(Owner,Key,Value) ->
+ case sanity_check(Owner,Key,Value) of
+ ok -> call({update_config,Owner,#{Key=>Value}});
+ Error -> Error
+ end.
+
+set_config(Owner,Config0) ->
+ case sanity_check(Owner,Config0) of
+ ok ->
+ Config = maps:merge(default_config(Owner),Config0),
+ call({set_config,Owner,Config});
+ Error ->
+ Error
+ end.
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+init([]) ->
+ process_flag(trap_exit, true),
+ Tid = logger_config:new(?LOGGER_TABLE),
+ LoggerConfig = maps:merge(default_config(logger),
+ #{handlers=>[logger_simple]}),
+ logger_config:create(Tid,logger,LoggerConfig),
+ SimpleConfig0 = maps:merge(default_config(logger_simple),
+ #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS,
+ logger_simple=>#{buffer=>true}}),
+ %% If this fails, then the node should crash
+ {ok,SimpleConfig} =
+ logger_simple:adding_handler(logger_simple,SimpleConfig0),
+ logger_config:create(Tid,logger_simple,logger_simple,SimpleConfig),
+ {ok, #state{tid=Tid}}.
+
+handle_call({add_handler,Id,Module,HConfig}, _From, #state{tid=Tid}=State) ->
+ Reply =
+ case logger_config:exist(Tid,Id) of
+ true ->
+ {error,{already_exist,Id}};
+ false ->
+ %% inform the handler
+ case call_h(Module,adding_handler,[Id,HConfig],{ok,HConfig}) of
+ {ok,HConfig1} ->
+ logger_config:create(Tid,Id,Module,HConfig1),
+ {ok,Config} = do_get_config(Tid,logger),
+ Handlers = maps:get(handlers,Config,[]),
+ do_set_config(Tid,logger,
+ Config#{handlers=>[Id|Handlers]}),
+ ok;
+ {error,HReason} ->
+ {error,{handler_not_added,HReason}}
+ end
+ end,
+ {reply,Reply,State};
+handle_call({remove_handler,HandlerId}, _From, #state{tid=Tid}=State) ->
+ Reply =
+ case logger_config:get(Tid,HandlerId) of
+ {ok,{Module,_}} ->
+ {ok,Config} = do_get_config(Tid,logger),
+ Handlers0 = maps:get(handlers,Config,[]),
+ Handlers = lists:delete(HandlerId,Handlers0),
+ %% inform the handler
+ _ = call_h(Module,removing_handler,[HandlerId],ok),
+ do_set_config(Tid,logger,Config#{handlers=>Handlers}),
+ logger_config:delete(Tid,HandlerId),
+ ok;
+ _ ->
+ {error,{not_found,HandlerId}}
+ end,
+ {reply,Reply,State};
+handle_call({add_filter,Id,Filter}, _From,#state{tid=Tid}=State) ->
+ Reply = do_add_filter(Tid,Id,Filter),
+ {reply,Reply,State};
+handle_call({remove_filter,Id,FilterId}, _From, #state{tid=Tid}=State) ->
+ Reply = do_remove_filter(Tid,Id,FilterId),
+ {reply,Reply,State};
+handle_call({update_config,Id,NewConfig}, _From, #state{tid=Tid}=State) ->
+ Reply =
+ case logger_config:get(Tid,Id) of
+ {ok,{Module,OldConfig}} ->
+ Config = maps:merge(OldConfig,NewConfig),
+ case call_h(Module,changing_config,[Id,OldConfig,Config],
+ {ok,Config}) of
+ {ok,Config1} ->
+ do_set_config(Tid,Id,Config1);
+ Error ->
+ Error
+ end;
+ {ok,OldConfig} ->
+ Config = maps:merge(OldConfig,NewConfig),
+ do_set_config(Tid,Id,Config);
+ Error ->
+ Error
+ end,
+ {reply,Reply,State};
+handle_call({set_config,logger,Config}, _From, #state{tid=Tid}=State) ->
+ Reply = do_set_config(Tid,logger,Config),
+ {reply,Reply,State};
+handle_call({set_config,HandlerId,Config}, _From, #state{tid=Tid}=State) ->
+ Reply =
+ case logger_config:get(Tid,HandlerId) of
+ {ok,{Module,OldConfig}} ->
+ case call_h(Module,changing_config,[HandlerId,OldConfig,Config],
+ {ok,Config}) of
+ {ok,Config1} ->
+ do_set_config(Tid,HandlerId,Config1);
+ Error ->
+ Error
+ end;
+ _ ->
+ {error,{not_found,HandlerId}}
+ end,
+ {reply,Reply,State};
+handle_call({set_module_level,Module,Level}, _From, #state{tid=Tid}=State) ->
+ Reply = logger_config:set_module_level(Tid,Module,Level),
+ {reply,Reply,State};
+handle_call({reset_module_level,Module}, _From, #state{tid=Tid}=State) ->
+ Reply = logger_config:reset_module_level(Tid,Module),
+ {reply,Reply,State}.
+
+handle_cast({cache_module_level,Module}, #state{tid=Tid}=State) ->
+ logger_config:cache_module_level(Tid,Module),
+ {noreply, State}.
+
+%% Interface for those who can't call the API - e.g. the emulator, or
+%% places related to code loading.
+%%
+%% This can also be log events from remote nodes which are sent from
+%% logger.erl when the group leader of the client process is on a
+%% same node as the client process itself.
+handle_info({log,Level,Format,Args,Meta}, State) ->
+ logger:log(Level,Format,Args,Meta),
+ {noreply, State};
+handle_info({log,Level,Report,Meta}, State) ->
+ logger:log(Level,Report,Meta),
+ {noreply, State};
+handle_info({Ref,_Reply},State) when is_reference(Ref) ->
+ %% Assuming this is a timed-out gen_server reply - ignoring
+ {noreply, State}.
+
+terminate(_Reason, _State) ->
+ ok.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+call(Request) ->
+ case whereis(?SERVER) of
+ Pid when Pid==self() ->
+ {error,{attempting_syncronous_call_to_self,Request}};
+ _ ->
+ gen_server:call(?SERVER,Request,?DEFAULT_LOGGER_CALL_TIMEOUT)
+ end.
+
+do_add_filter(Tid,Id,{FId,_} = Filter) ->
+ case do_get_config(Tid,Id) of
+ {ok,Config} ->
+ Filters = maps:get(filters,Config,[]),
+ case lists:keymember(FId,1,Filters) of
+ true ->
+ {error,{already_exist,FId}};
+ false ->
+ do_set_config(Tid,Id,Config#{filters=>[Filter|Filters]})
+ end;
+ Error ->
+ Error
+ end.
+
+do_remove_filter(Tid,Id,FilterId) ->
+ case do_get_config(Tid,Id) of
+ {ok,Config} ->
+ Filters0 = maps:get(filters,Config,[]),
+ case lists:keytake(FilterId,1,Filters0) of
+ {value,_,Filters} ->
+ do_set_config(Tid,Id,Config#{filters=>Filters});
+ false ->
+ {error,{not_found,FilterId}}
+ end;
+ Error ->
+ Error
+ end.
+
+do_get_config(Tid,Id) ->
+ case logger_config:get(Tid,Id) of
+ {ok,{_,Config}} ->
+ {ok,Config};
+ {ok,Config} ->
+ {ok,Config};
+ Error ->
+ Error
+ end.
+
+do_set_config(Tid,Id,Config) ->
+ logger_config:set(Tid,Id,Config),
+ ok.
+
+default_config(logger) ->
+ #{level=>info,
+ filters=>[],
+ filter_default=>log,
+ handlers=>[]};
+default_config(_) ->
+ #{level=>info,
+ filters=>[],
+ filter_default=>log,
+ formatter=>{?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}}.
+
+sanity_check(Owner,Key,Value) ->
+ sanity_check_1(Owner,[{Key,Value}]).
+
+sanity_check(HandlerId,Config) when is_map(Config) ->
+ sanity_check_1(HandlerId,maps:to_list(Config));
+sanity_check(_,Config) ->
+ {error,{invalid_handler_config,Config}}.
+
+sanity_check_1(Owner,Config) when is_list(Config) ->
+ try
+ Type = get_type(Owner),
+ check_config(Type,Config)
+ catch throw:Error -> {error,Error}
+ end.
+
+get_type(logger) ->
+ logger;
+get_type(Id) ->
+ check_id(Id),
+ handler.
+
+check_config(Owner,[{level,Level}|Config]) ->
+ check_level(Level),
+ check_config(Owner,Config);
+check_config(logger,[{handlers,Handlers}|Config]) ->
+ check_handlers(Handlers),
+ check_config(logger,Config);
+check_config(Owner,[{filters,Filters}|Config]) ->
+ check_filters(Filters),
+ check_config(Owner,Config);
+check_config(Owner,[{filter_default,FD}|Config]) ->
+ check_filter_default(FD),
+ check_config(Owner,Config);
+check_config(handler,[{formatter,Formatter}|Config]) ->
+ check_formatter(Formatter),
+ check_config(handler,Config);
+check_config(logger,[C|_]) ->
+ throw({invalid_logger_config,C});
+check_config(handler,[{_,_}|Config]) ->
+ %% Arbitrary config elements are allowed for handlers
+ check_config(handler,Config);
+check_config(_,[]) ->
+ ok.
+
+check_id(Id) when is_atom(Id) ->
+ ok;
+check_id(Id) ->
+ throw({invalid_id,Id}).
+
+check_mod(Mod) when is_atom(Mod) ->
+ ok;
+check_mod(Mod) ->
+ throw({invalid_module,Mod}).
+
+check_level({LevelInt,cached}) when LevelInt>=?EMERGENCY, LevelInt=<?DEBUG ->
+ ok;
+check_level(Level) ->
+ case lists:member(Level,?LEVELS) of
+ true ->
+ ok;
+ false ->
+ throw({invalid_level,Level})
+ end.
+
+check_handlers([Id|Handlers]) ->
+ check_id(Id),
+ check_handlers(Handlers);
+check_handlers([]) ->
+ ok;
+check_handlers(Handlers) ->
+ throw({invalid_handlers,Handlers}).
+
+check_filters([{Id,{Fun,_Args}}|Filters]) when is_atom(Id), is_function(Fun,2) ->
+ check_filters(Filters);
+check_filters([Filter|_]) ->
+ throw({invalid_filter,Filter});
+check_filters([]) ->
+ ok;
+check_filters(Filters) ->
+ throw({invalid_filters,Filters}).
+
+check_filter_default(FD) when FD==stop; FD==log ->
+ ok;
+check_filter_default(FD) ->
+ throw({invalid_filter_default,FD}).
+
+check_formatter({logger_formatter,Config}) when is_map(Config) ->
+ check_logger_formatter_config(maps:to_list(Config));
+check_formatter({logger_formatter,Config}) ->
+ throw({invalid_formatter_config,Config});
+check_formatter({Mod,_}) ->
+ %% no knowledge of other formatters
+ check_mod(Mod);
+check_formatter(Formatter) ->
+ throw({invalid_formatter,Formatter}).
+
+
+check_logger_formatter_config([{template,T}|Config]) when is_list(T) ->
+ case lists:all(fun(X) when is_atom(X) -> true;
+ (X) when is_tuple(X), is_atom(element(1,X)) -> true;
+ (X) when is_list(X) -> io_lib:printable_unicode_list(X);
+ (_) -> false
+ end,
+ T) of
+ true ->
+ check_logger_formatter_config(Config);
+ false ->
+ throw({invalid_formatter_template,T})
+ end;
+check_logger_formatter_config([{legacy_header,LH}|Config]) when is_boolean(LH) ->
+ check_logger_formatter_config(Config);
+check_logger_formatter_config([{single_line,SL}|Config]) when is_boolean(SL) ->
+ check_logger_formatter_config(Config);
+check_logger_formatter_config([{utc,Utc}|Config]) when is_boolean(Utc) ->
+ check_logger_formatter_config(Config);
+check_logger_formatter_config([C|_]) ->
+ throw({invalid_formatter_config,C});
+check_logger_formatter_config([]) ->
+ ok.
+
+call_h(Module, Function, Args, DefRet) ->
+ %% Not calling code:ensure_loaded + erlang:function_exported here,
+ %% since in some rare terminal cases, the code_server might not
+ %% exist and we'll get a deadlock in removing the handler.
+ try apply(Module, Function, Args)
+ catch
+ C:R:S ->
+ case {C,R,S} of
+ {error,undef,[{Module,Function,Args,_}|_]} ->
+ DefRet;
+ _ ->
+ {error,{callback_crashed,
+ {C,R,logger:filter_stacktrace(?MODULE,S)}}}
+ end
+ end.
diff --git a/lib/kernel/src/logger_simple.erl b/lib/kernel/src/logger_simple.erl
new file mode 100644
index 0000000000..23ff6ccd2e
--- /dev/null
+++ b/lib/kernel/src/logger_simple.erl
@@ -0,0 +1,236 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_simple).
+
+-export([adding_handler/2, removing_handler/1, log/2]).
+-export([get_buffer/0]).
+
+%% This module implements a simple handler for logger. It is the
+%% default used during system start.
+
+%%%-----------------------------------------------------------------
+%%% API
+get_buffer() ->
+ case whereis(?MODULE) of
+ undefined ->
+ {error,noproc};
+ Pid ->
+ Ref = erlang:monitor(process,Pid),
+ Pid ! {get_buffer,self()},
+ receive
+ {buffer,Buffer} ->
+ erlang:demonitor(Ref,[flush]),
+ {ok,Buffer};
+ {'DOWN',Ref,process,Pid,Reason} ->
+ {error,Reason}
+ end
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Logger callback
+
+adding_handler(?MODULE,Config) ->
+ Me = self(),
+ case whereis(?MODULE) of
+ undefined ->
+ {Pid,Ref} = spawn_opt(fun() -> init(Me,Config) end,
+ [link,monitor,{message_queue_data,off_heap}]),
+ receive
+ {'DOWN',Ref,process,Pid,Reason} ->
+ {error,Reason};
+ {Pid,started} ->
+ erlang:demonitor(Ref),
+ {ok,Config}
+ end;
+ _ ->
+ {error,{handler_process_name_already_exists,?MODULE}}
+ end.
+
+removing_handler(?MODULE) ->
+ case whereis(?MODULE) of
+ undefined ->
+ ok;
+ Pid ->
+ Ref = erlang:monitor(process,Pid),
+ unlink(Pid),
+ Pid ! stop,
+ receive {'DOWN',Ref,process,Pid,_} ->
+ ok
+ end
+ end.
+
+log(#{meta:=#{error_logger:=#{tag:=info_report,type:=Type}}},_Config)
+ when Type=/=std_info ->
+ %% Skip info reports that are not 'std_info' (ref simple logger in
+ %% error_logger)
+ ok;
+log(#{msg:=_,meta:=#{time:=_}}=Log,_Config) ->
+ _ = case whereis(?MODULE) of
+ undefined ->
+ %% Is the node on the way down? Real emergency?
+ %% Log directly from client just to get it out
+ do_log(
+ #{level=>error,
+ msg=>{report,{error,simple_handler_process_dead}},
+ meta=>#{time=>erlang:monotonic_time(microsecond)}}),
+ do_log(Log);
+ _ ->
+ ?MODULE ! {log,Log}
+ end,
+ ok;
+log(_,_) ->
+ %% Unexpected log.
+ %% We don't want to crash the simple logger, so ignore this.
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Process
+init(Starter,Config) ->
+ register(?MODULE,self()),
+ Starter ! {self(),started},
+ BufferSize =
+ case Config of
+ #{?MODULE:=#{buffer:=true}} ->
+ 10;
+ _ ->
+ infinity
+ end,
+ loop(#{buffer_size=>BufferSize,dropped=>0,buffer=>[]},infinity).
+
+loop(Buffer,Timeout) ->
+ receive
+ stop ->
+ ok;
+ {get_buffer,From} ->
+ loop(Buffer#{send_to=>From},0);
+ {log,#{msg:=_,meta:=#{time:=_}}=Log} ->
+ do_log(Log),
+ loop(update_buffer(Buffer,Log),Timeout);
+ _ ->
+ %% Unexpected message - flush it!
+ loop(Buffer,Timeout)
+ after Timeout ->
+ #{dropped:=D,buffer:=B,send_to:=Pid} = Buffer,
+ LogList = lists:reverse(B) ++ drop_msg(D),
+ Pid ! {buffer,LogList},
+ loop(Buffer#{buffer_size=>infinity,
+ dropped=>0,
+ buffer=>[],
+ send_to=>false},
+ infinity)
+ end.
+
+update_buffer(#{buffer_size:=infinity}=Buffer,_Log) ->
+ Buffer;
+update_buffer(#{buffer_size:=0,dropped:=D}=Buffer,_Log) ->
+ Buffer#{dropped=>D+1};
+update_buffer(#{buffer_size:=S,buffer:=B}=Buffer,Log) ->
+ Buffer#{buffer_size=>S-1,buffer=>[Log|B]}.
+
+drop_msg(0) ->
+ [];
+drop_msg(N) ->
+ [#{level=>info,
+ msg=>{"Simple handler buffer full, dropped ~w messages",[N]},
+ meta=>#{time=>erlang:monotonic_time(microsecond)}}].
+
+%%%-----------------------------------------------------------------
+%%% Internal
+
+%% Can't do io_lib:format
+
+do_log(#{msg:={report,Report},
+ meta:=#{time:=T,error_logger:=#{type:=Type}}}) ->
+ display_date(T),
+ display_report(Type,Report);
+do_log(#{msg:=Msg,meta:=#{time:=T}}) ->
+ display_date(T),
+ display(Msg).
+
+display_date(Timestamp0) when is_integer(Timestamp0) ->
+ Timestamp = Timestamp0 + erlang:time_offset(microsecond),
+ Micro = Timestamp rem 1000000,
+ Sec = Timestamp div 1000000,
+ {{Y,Mo,D},{H,Mi,S}} = erlang:universaltime_to_localtime(
+ erlang:posixtime_to_universaltime(Sec)),
+ erlang:display_string(
+ integer_to_list(Y) ++ "-" ++
+ pad(Mo,2) ++ "-" ++
+ pad(D,2) ++ " " ++
+ pad(H,2) ++ ":" ++
+ pad(Mi,2) ++ ":" ++
+ pad(S,2) ++ "." ++
+ pad(Micro,6) ++ " ").
+
+pad(Int,Size) when is_integer(Int) ->
+ pad(integer_to_list(Int),Size);
+pad(Str,Size) when length(Str)==Size ->
+ Str;
+pad(Str,Size) ->
+ pad([$0|Str],Size).
+
+display({string,Chardata}) ->
+ try unicode:characters_to_list(Chardata) of
+ String -> erlang:display_string(String), erlang:display_string("\n")
+ catch _:_ -> erlang:display(Chardata)
+ end;
+display({report,Report}) when is_map(Report) ->
+ display_report(maps:to_list(Report));
+display({report,Report}) ->
+ display_report(Report);
+display({F, A}) when is_list(F), is_list(A) ->
+ erlang:display_string(F ++ "\n"),
+ [begin
+ erlang:display_string("\t"),
+ erlang:display(Arg)
+ end || Arg <- A],
+ ok.
+
+display_report(Atom, A) when is_atom(Atom) ->
+ %% The widest atom seems to be 'supervisor_report' at 17.
+ ColumnWidth = 20,
+ AtomString = atom_to_list(Atom),
+ AtomLength = length(AtomString),
+ Padding = lists:duplicate(ColumnWidth - AtomLength, $\s),
+ erlang:display_string(AtomString ++ Padding),
+ display_report(A);
+display_report(F, A) ->
+ erlang:display({F, A}).
+
+display_report([A, []]) ->
+ %% Special case for crash reports when process has no links
+ display_report(A);
+display_report(A = [_|_]) ->
+ case lists:all(fun({Key,_Value}) -> is_atom(Key); (_) -> false end, A) of
+ true ->
+ erlang:display_string("\n"),
+ lists:foreach(
+ fun({Key, Value}) ->
+ erlang:display_string(
+ " " ++
+ atom_to_list(Key) ++
+ ": "),
+ erlang:display(Value)
+ end, A);
+ false ->
+ erlang:display(A)
+ end;
+display_report(A) ->
+ erlang:display(A).
diff --git a/lib/kernel/src/logger_std_h.erl b/lib/kernel/src/logger_std_h.erl
new file mode 100644
index 0000000000..cbc9db372c
--- /dev/null
+++ b/lib/kernel/src/logger_std_h.erl
@@ -0,0 +1,799 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_std_h).
+
+-behaviour(gen_server).
+
+-include("logger.hrl").
+-include("logger_internal.hrl").
+-include("logger_h_common.hrl").
+
+-include_lib("kernel/include/file.hrl").
+
+%% API
+-export([start_link/3, info/1, filesync/1, reset/1]).
+
+%% gen_server and proc_lib callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%% logger callbacks
+-export([log/2, adding_handler/2, removing_handler/1,
+ changing_config/3, swap_buffer/2]).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%% Start a standard handler process and link to caller.
+%%% This function is called by the kernel supervisor when this
+%%% handler process gets added
+-spec start_link(Name, Config, HandlerState) -> {ok,Pid} | {error,Reason} when
+ Name :: atom(),
+ Config :: logger:config(),
+ HandlerState :: map(),
+ Pid :: pid(),
+ Reason :: term().
+
+start_link(Name, Config, HandlerState) ->
+ proc_lib:start_link(?MODULE,init,[[Name,Config,HandlerState]]).
+
+%%%-----------------------------------------------------------------
+%%%
+-spec filesync(Name) -> ok | {error,Reason} when
+ Name :: atom(),
+ Reason :: handler_busy | {badarg,term()}.
+
+filesync(Name) when is_atom(Name) ->
+ try
+ gen_server:call(Name, filesync, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+filesync(Name) ->
+ {error,{badarg,{filesync,[Name]}}}.
+
+%%%-----------------------------------------------------------------
+%%%
+-spec info(Name) -> Info | {error,Reason} when
+ Name :: atom(),
+ Info :: term(),
+ Reason :: handler_busy | {badarg,term()}.
+
+info(Name) when is_atom(Name) ->
+ try
+ gen_server:call(Name, info, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+info(Name) ->
+ {error,{badarg,{info,[Name]}}}.
+
+%%%-----------------------------------------------------------------
+%%%
+-spec reset(Name) -> ok | {error,Reason} when
+ Name :: atom(),
+ Reason :: handler_busy | {badarg,term()}.
+
+reset(Name) when is_atom(Name) ->
+ try
+ gen_server:call(Name, reset, ?DEFAULT_CALL_TIMEOUT)
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+reset(Name) ->
+ {error,{badarg,{reset,[Name]}}}.
+
+
+%%%===================================================================
+%%% logger callbacks
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%% Handler being added
+adding_handler(Name, Config) ->
+ case check_config(adding, Name, Config) of
+ {ok, Config1} ->
+ %% create initial handler state by merging defaults with config
+ HConfig = maps:get(?MODULE, Config1, #{}),
+ HState = maps:merge(get_init_state(), HConfig),
+ case logger_h_common:overload_levels_ok(HState) of
+ true ->
+ case start(Name, Config1, HState) of
+ ok ->
+ %% Make sure wait_for_buffer is not stored, so we
+ %% won't hang and wait for buffer on a restart
+ {ok, maps:remove(wait_for_buffer,Config1)};
+ Error ->
+ Error
+ end;
+ false ->
+ #{toggle_sync_qlen := TSQL,
+ drop_new_reqs_qlen := DNRQL,
+ flush_reqs_qlen := FRQL} = HState,
+ {error,{invalid_levels,{TSQL,DNRQL,FRQL}}}
+ end;
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Updating handler config
+changing_config(Name,
+ OldConfig=#{id:=Id, ?MODULE:=#{type:=Type}},
+ NewConfig=#{id:=Id}) ->
+ MyConfig = maps:get(?MODULE, NewConfig, #{}),
+ case maps:get(type, MyConfig, Type) of
+ Type ->
+ MyConfig1 = MyConfig#{type=>Type},
+ changing_config1(Name, OldConfig,
+ NewConfig#{?MODULE=>MyConfig1});
+ _ ->
+ {error,{illegal_config_change,OldConfig,NewConfig}}
+ end;
+changing_config(_Name, OldConfig, NewConfig) ->
+ {error,{illegal_config_change,OldConfig,NewConfig}}.
+
+changing_config1(Name, OldConfig, NewConfig) ->
+ case check_config(changing, Name, NewConfig) of
+ Result = {ok,NewConfig1} ->
+ try gen_server:call(Name, {change_config,OldConfig,NewConfig1},
+ ?DEFAULT_CALL_TIMEOUT) of
+ ok -> Result;
+ HError -> HError
+ catch
+ _:{timeout,_} -> {error,handler_busy}
+ end;
+ Error ->
+ Error
+ end.
+
+check_config(adding, Name, Config0) ->
+ %% Merge in defaults on top level
+ Config = maps:merge(#{id => Name}, Config0),
+ %% Merge in defaults on handler level
+ MyConfig0 = maps:get(?MODULE, Config, #{}),
+ MyConfig = maps:merge(#{type => standard_io},
+ MyConfig0),
+ case check_my_config(maps:to_list(MyConfig)) of
+ ok ->
+ {ok,Config#{?MODULE=>MyConfig}};
+ Error ->
+ Error
+ end;
+check_config(changing, _Name, Config) ->
+ MyConfig = maps:get(?MODULE, Config, #{}),
+ case check_my_config(maps:to_list(MyConfig)) of
+ ok -> {ok,Config};
+ Error -> Error
+ end.
+
+check_my_config([{type,Type} | Config]) when Type == standard_io;
+ Type == standard_error ->
+ check_my_config(Config);
+check_my_config([{type,{file,File}} | Config]) when is_list(File) ->
+ check_my_config(Config);
+check_my_config([{type,{file,File,Modes}} | Config]) when is_list(File),
+ is_list(Modes) ->
+ check_my_config(Config);
+check_my_config([Other | Config]) ->
+ case logger_h_common:check_common_config(Other) of
+ valid ->
+ check_my_config(Config);
+ invalid ->
+ {error,{invalid_config,?MODULE,Other}}
+ end;
+check_my_config([]) ->
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% Handler being removed
+removing_handler(Name) ->
+ stop(Name).
+
+%%%-----------------------------------------------------------------
+%%% Get buffer when swapping from simple handler
+swap_buffer(Name,Buffer) ->
+ case whereis(Name) of
+ undefined ->
+ ok;
+ _ ->
+ Name ! {buffer,Buffer}
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Log a string or report
+-spec log(Log, Config) -> ok | dropped when
+ Log :: logger:log(),
+ Config :: logger:config().
+
+log(Log,Config=#{id:=Name}) ->
+ %% if the handler has crashed, we must drop this request
+ %% and hope the handler restarts so we can try again
+ true = is_pid(whereis(Name)),
+ Bin = logger_h_common:log_to_binary(Log,Config),
+ logger_h_common:call_cast_or_drop(Name, Bin).
+
+%%%===================================================================
+%%% gen_server callbacks
+%%%===================================================================
+
+init([Name, Config,
+ State0 = #{type := Type, file_ctrl_sync_int := FileCtrlSyncInt}]) ->
+ register(Name, self()),
+ process_flag(trap_exit, true),
+ process_flag(message_queue_data, off_heap),
+
+ ?init_test_hooks(),
+ ?start_observation(Name),
+
+ case do_init(Name, Type) of
+ {ok,InitState} ->
+ catch ets:new(Name, [public, named_table]),
+ ?set_mode(Name, async),
+ State = maps:merge(State0, InitState),
+ T0 = ?timestamp(),
+ State1 =
+ ?merge_with_stats(State#{mode => async,
+ file_ctrl_sync => FileCtrlSyncInt,
+ last_qlen => 0,
+ last_log_ts => T0,
+ burst_win_ts => T0,
+ burst_msg_count => 0}),
+ proc_lib:init_ack({ok,self()}),
+ gen_server:cast(self(), {repeated_filesync,T0}),
+ enter_loop(Config, State1);
+ Error ->
+ logger_h_common:error_notify({init_handler,Name,Error}),
+ proc_lib:init_ack(Error)
+ end.
+
+do_init(Name, Std) when Std=:=standard_io; Std=:=standard_error ->
+ case open_log_file(Name, Std) of
+ {ok,FileCtrlPid} ->
+ {ok,#{id=>Name,type=>Std,file_ctrl_pid=>FileCtrlPid}};
+ Error ->
+ Error
+ end;
+do_init(Name, FileInfo) when is_tuple(FileInfo) ->
+ case open_log_file(Name, FileInfo) of
+ {ok,FileCtrlPid} ->
+ {ok,#{id=>Name,type=>FileInfo,file_ctrl_pid=>FileCtrlPid}};
+ Error ->
+ Error
+ end.
+
+enter_loop(#{wait_for_buffer:=true}=Config,State) ->
+ State1 =
+ receive
+ {buffer,Buffer} ->
+ lists:foldl(
+ fun(Log,S) ->
+ Bin = logger_h_common:log_to_binary(Log,Config),
+ {_,S1} = do_log(Bin,cast,S),
+ S1
+ end,
+ State,
+ Buffer)
+ end,
+ gen_server:enter_loop(?MODULE,[],State1);
+enter_loop(_Config,State) ->
+ gen_server:enter_loop(?MODULE,[],State).
+
+%% This is the synchronous log request.
+handle_call({log, Bin}, _From, State) ->
+ {Result,State1} = do_log(Bin, call, State),
+ %% Result == ok | dropped
+ {reply,Result, State1};
+
+handle_call(filesync, _From, State = #{type := Type,
+ file_ctrl_pid := FileCtrlPid}) ->
+ if is_atom(Type) ->
+ {reply, ok, State};
+ true ->
+ {reply, file_ctrl_filesync_sync(FileCtrlPid), State}
+ end;
+
+handle_call({change_config,_OldConfig,NewConfig}, _From,
+ State = #{filesync_repeat_interval := FSyncInt0,
+ last_log_ts := LastLogTS}) ->
+ HConfig = maps:get(?MODULE, NewConfig, #{}),
+ State1 = maps:merge(State, HConfig),
+ case logger_h_common:overload_levels_ok(State1) of
+ true ->
+ _ =
+ case maps:get(filesync_repeat_interval, HConfig, undefined) of
+ undefined ->
+ ok;
+ no_repeat ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
+ State,
+ undefined));
+ FSyncInt0 ->
+ ok;
+ _FSyncInt1 ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref,
+ State,
+ undefined)),
+ gen_server:cast(self(), {repeated_filesync,
+ LastLogTS})
+ end,
+ {reply, ok, State1};
+ false ->
+ #{toggle_sync_qlen := TSQL,
+ drop_new_reqs_qlen := DNRQL,
+ flush_reqs_qlen := FRQL} = State1,
+ {reply, {error,{invalid_levels,{TSQL,DNRQL,FRQL}}}, State}
+ end;
+
+handle_call(info, _From, State) ->
+ {reply, State, State};
+
+handle_call(reset, _From, State) ->
+ State1 = ?merge_with_stats(State),
+ {reply, ok, State1#{last_qlen => 0,
+ last_log_ts => ?timestamp()}};
+
+handle_call(stop, _From, State) ->
+ {stop, {shutdown,stopped}, ok, State}.
+
+%% This is the asynchronous log request.
+handle_cast({log, Bin}, State) ->
+ {_,State1} = do_log(Bin, cast, State),
+ {noreply, State1};
+
+%% If FILESYNC_REPEAT_INTERVAL is set to a millisec value, this
+%% clause gets called repeatedly by the handler. In order to
+%% guarantee that a filesync *always* happens after the last log
+%% request, the repeat operation must be active!
+handle_cast({repeated_filesync,LastLogTS0},
+ State = #{type := Type,
+ file_ctrl_pid := FileCtrlPid,
+ filesync_repeat_interval := FSyncInt,
+ last_log_ts := LastLogTS1}) ->
+ State1 =
+ if not is_atom(Type), is_integer(FSyncInt) ->
+ %% only do filesync if something has been
+ %% written since last time we checked
+ if LastLogTS1 == LastLogTS0 ->
+ ok;
+ true ->
+ file_ctrl_filesync_async(FileCtrlPid)
+ end,
+ {ok,TRef} =
+ timer:apply_after(FSyncInt, gen_server,cast,
+ [self(),{repeated_filesync,LastLogTS1}]),
+ State#{rep_sync_tref => TRef};
+ true ->
+ State
+ end,
+ {noreply,State1}.
+
+handle_info({'EXIT',Pid,Why}, State = #{id := Name, type := FileInfo}) ->
+ case maps:get(file_ctrl_pid, State, undefined) of
+ Pid ->
+ %% file error, terminate handler
+ logger_h_common:handler_exit(Name,
+ {error,{write_failed,FileInfo,Why}});
+ _Other ->
+ %% ignore EXIT
+ ok
+ end,
+ {noreply, State};
+
+handle_info(_Info, State) ->
+ {noreply, State}.
+
+terminate(Reason, State = #{id:=Name, file_ctrl_pid:=FWPid,
+ type:=_FileInfo}) ->
+ _ = logger_h_common:cancel_timer(maps:get(rep_sync_tref, State,
+ undefined)),
+ case is_process_alive(FWPid) of
+ true ->
+ unlink(FWPid),
+ _ = file_ctrl_stop(FWPid),
+ MRef = erlang:monitor(process, FWPid),
+ receive
+ {'DOWN',MRef,_,_,_} ->
+ ok
+ after
+ ?DEFAULT_CALL_TIMEOUT ->
+ exit(FWPid, kill)
+ end;
+ false ->
+ ok
+ end,
+ logger_h_common:stop_or_restart(Name, Reason, State).
+
+code_change(_OldVsn, State, _Extra) ->
+ {ok, State}.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+
+%%%-----------------------------------------------------------------
+%%%
+get_init_state() ->
+ #{toggle_sync_qlen => ?TOGGLE_SYNC_QLEN,
+ drop_new_reqs_qlen => ?DROP_NEW_REQS_QLEN,
+ flush_reqs_qlen => ?FLUSH_REQS_QLEN,
+ enable_burst_limit => ?ENABLE_BURST_LIMIT,
+ burst_limit_size => ?BURST_LIMIT_SIZE,
+ burst_window_time => ?BURST_WINDOW_TIME,
+ enable_kill_overloaded => ?ENABLE_KILL_OVERLOADED,
+ handler_overloaded_qlen => ?HANDLER_OVERLOADED_QLEN,
+ handler_overloaded_mem => ?HANDLER_OVERLOADED_MEM,
+ handler_restart_after => ?HANDLER_RESTART_AFTER,
+ file_ctrl_sync_int => ?CONTROLLER_SYNC_INTERVAL,
+ filesync_ok_qlen => ?FILESYNC_OK_QLEN,
+ filesync_repeat_interval => ?FILESYNC_REPEAT_INTERVAL}.
+
+%%%-----------------------------------------------------------------
+%%% Add a standard handler to the logger.
+%%% This starts a dedicated handler process which should always
+%%% exist if the handler is registered with logger (and should not
+%%% exist if the handler is not registered).
+%%%
+%%% Handler specific config should be provided with a sub map associated
+%%% with a key named the same as this module, e.g:
+%%%
+%%% Config = #{logger_std_h => #{toggle_sync_qlen => 50}
+%%%
+%%% The standard handler process is linked to logger_sup, which is
+%%% part of the kernel application's supervision tree.
+start(Name, Config, HandlerState) ->
+ LoggerStdH =
+ #{id => Name,
+ start => {?MODULE, start_link, [Name,Config,HandlerState]},
+ restart => temporary,
+ shutdown => 2000,
+ type => worker,
+ modules => [?MODULE]},
+ case supervisor:start_child(logger_sup, LoggerStdH) of
+ {ok,_Pid} ->
+ ok;
+ Error ->
+ Error
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Stop and remove the handler.
+stop(Name) ->
+ case whereis(Name) of
+ undefined ->
+ ok;
+ _ ->
+ %% We don't want to do supervisor:terminate_child here
+ %% since we need to distinguish this explicit stop from a
+ %% system termination in order to avoid circular attempts
+ %% at removing the handler (implying deadlocks and
+ %% timeouts).
+ _ = gen_server:call(Name,stop),
+ _ = supervisor:delete_child(logger_sup, Name),
+ ok
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Logging and overload control.
+-define(update_file_ctrl_sync(C, Interval),
+ if C == 0 -> Interval;
+ true -> C-1 end).
+
+%% check for overload between every request (and set Mode to async,
+%% sync or drop accordingly), but never flush the whole mailbox
+%% before LogWindowSize requests have been handled
+do_log(Bin, CallOrCast, State = #{id:=Name}) ->
+ T1 = ?timestamp(),
+
+ %% check if the handler is getting overloaded, or if it's
+ %% recovering from overload (the check must be done for each
+ %% request to react quickly to large bursts of requests and
+ %% to ensure that the handler can never end up in drop mode
+ %% with an empty mailbox, which would stop operation)
+ {Mode1,QLen,Mem,State1} = logger_h_common:check_load(State),
+
+ %% kill the handler if it can't keep up with the load
+ logger_h_common:kill_if_choked(Name, QLen, Mem, State),
+
+ if Mode1 == flush ->
+ flush(Name, QLen, T1, State1);
+ true ->
+ write(Name, Mode1, T1, Bin, CallOrCast, State1)
+ end.
+
+%% this clause is called by do_log/3 after an overload check
+%% has been performed, where QLen > FlushQLen
+flush(Name, _QLen0, T1, State=#{last_log_ts := _T0}) ->
+ %% flush messages in the mailbox (a limited number in
+ %% order to not cause long delays)
+ _NewFlushed = logger_h_common:flush_log_requests(?FLUSH_MAX_N),
+
+ %% because of the receive loop when flushing messages, the
+ %% handler will be scheduled out often and the mailbox could
+ %% grow very large, so we'd better check the queue again here
+ {_,_QLen1} = process_info(self(), message_queue_len),
+ ?observe(Name,{max_qlen,_QLen1}),
+
+ %% Add 1 for the current log request
+ ?observe(Name,{flushed,_NewFlushed+1}),
+
+ State1 = ?update_max_time(?diff_time(T1,_T0),State),
+ {dropped,?update_other(flushed,FLUSHED,_NewFlushed,
+ State1#{mode => ?set_mode(Name,async),
+ last_qlen => 0,
+ last_log_ts => T1})}.
+
+%% this clause is called to write to file
+write(Name, Mode, T1, Bin, _CallOrCast,
+ State = #{file_ctrl_pid := FileCtrlPid,
+ file_ctrl_sync := FileCtrlSync,
+ last_qlen := LastQLen,
+ last_log_ts := T0,
+ file_ctrl_sync_int := FileCtrlSyncInt}) ->
+ %% check if we need to limit the number of writes
+ %% during a burst of log requests
+ {DoWrite,BurstWinT,BurstMsgCount} = logger_h_common:limit_burst(State),
+
+ %% only send a synhrounous request to the file controller process
+ %% every FileCtrlSyncInt time, to give the handler time between
+ %% file writes so it can keep up with incoming messages
+ {Result,LastQLen1} =
+ if DoWrite, FileCtrlSync == 0 ->
+ ?observe(Name,{_CallOrCast,1}),
+ file_write_sync(FileCtrlPid, Bin, false),
+ {ok,element(2, process_info(self(), message_queue_len))};
+ DoWrite ->
+ ?observe(Name,{_CallOrCast,1}),
+ file_write_async(FileCtrlPid, Bin),
+ {ok,LastQLen};
+ not DoWrite ->
+ ?observe(Name,{flushed,1}),
+ {dropped,LastQLen}
+ end,
+
+ %% Check if the time since the previous log request is long enough -
+ %% and the queue length small enough - to assume the mailbox has
+ %% been emptied, and if so, do filesync operation and reset mode to
+ %% async. Note that this is the best we can do to detect an idle
+ %% handler without setting a timer after each log call/cast. If the
+ %% time between two consecutive log requests is fast and no new
+ %% request comes in after the last one, idle state won't be detected!
+ Time = ?diff_time(T1,T0),
+ {Mode1,BurstMsgCount1} =
+ if (LastQLen1 < ?FILESYNC_OK_QLEN) andalso
+ (Time > ?IDLE_DETECT_TIME_USEC) ->
+ %% do filesync if necessary
+ case maps:get(type, State) of
+ Std when is_atom(Std) ->
+ ok;
+ _File ->
+ file_ctrl_filesync_async(FileCtrlPid)
+ end,
+ {?change_mode(Name, Mode, async),0};
+ true ->
+ {Mode,BurstMsgCount}
+ end,
+ State1 =
+ ?update_calls_or_casts(_CallOrCast,1,State),
+ State2 =
+ ?update_max_time(Time,
+ State1#{mode => Mode1,
+ last_qlen := LastQLen1,
+ last_log_ts => T1,
+ burst_win_ts => BurstWinT,
+ burst_msg_count => BurstMsgCount1,
+ file_ctrl_sync =>
+ ?update_file_ctrl_sync(FileCtrlSync,
+ FileCtrlSyncInt)}),
+ {Result,State2}.
+
+open_log_file(HandlerName, FileInfo) ->
+ case file_ctrl_start(HandlerName, FileInfo) of
+ OK = {ok,_FileCtrlPid} -> OK;
+ Error -> Error
+ end.
+
+do_open_log_file({file,File}) ->
+ do_open_log_file({file,File,[raw,append,delayed_write]});
+
+do_open_log_file({file,File,[]}) ->
+ do_open_log_file({file,File,[raw,append,delayed_write]});
+
+do_open_log_file({file,File,Modes}) ->
+ try
+ case filelib:ensure_dir(File) of
+ ok ->
+ file:open(File, Modes);
+ Error ->
+ Error
+ end
+ catch
+ _:Reason -> {error,Reason}
+ end.
+
+close_log_file(Std) when Std == standard_io; Std == standard_error ->
+ ok;
+close_log_file(Fd) ->
+ _ = file:datasync(Fd),
+ _ = file:close(Fd).
+
+%%%-----------------------------------------------------------------
+%%% File control process
+
+file_ctrl_start(HandlerName, FileInfo) ->
+ Starter = self(),
+ FileCtrlPid =
+ spawn_link(fun() ->
+ file_ctrl_init(HandlerName, FileInfo, Starter)
+ end),
+ receive
+ {FileCtrlPid,ok} ->
+ {ok,FileCtrlPid};
+ {FileCtrlPid,Error} ->
+ Error
+ after
+ ?DEFAULT_CALL_TIMEOUT ->
+ {error,file_ctrl_process_not_started}
+ end.
+
+file_ctrl_stop(Pid) ->
+ Pid ! stop.
+
+file_write_async(Pid, Bin) ->
+ Pid ! {log,Bin},
+ ok.
+
+file_write_sync(Pid, Bin, FileSync) ->
+ case file_ctrl_call(Pid, {log,self(),Bin,FileSync}) of
+ {error,Reason} ->
+ {error,{write_failed,Bin,Reason}};
+ Result ->
+ Result
+ end.
+
+file_ctrl_filesync_async(Pid) ->
+ Pid ! filesync,
+ ok.
+
+file_ctrl_filesync_sync(Pid) ->
+ file_ctrl_call(Pid, {filesync,self()}).
+
+file_ctrl_call(Pid, Msg) ->
+ MRef = monitor(process, Pid),
+ Pid ! {Msg,MRef},
+ receive
+ {MRef,Result} ->
+ demonitor(MRef, [flush]),
+ Result;
+ {'DOWN',MRef,_Type,_Object,Reason} ->
+ {error,Reason}
+ after
+ ?DEFAULT_CALL_TIMEOUT ->
+ {error,{no_response,Pid}}
+ end.
+
+file_ctrl_init(HandlerName, FileInfo, Starter) when is_tuple(FileInfo) ->
+ process_flag(message_queue_data, off_heap),
+ FileName = element(2, FileInfo),
+ case do_open_log_file(FileInfo) of
+ {ok,Fd} ->
+ Starter ! {self(),ok},
+ file_ctrl_loop(Fd, file, FileName, false, ok, ok, HandlerName);
+ {error,Reason} ->
+ Starter ! {self(),{error,{open_failed,FileName,Reason}}}
+ end;
+file_ctrl_init(HandlerName, StdDev, Starter) ->
+ Starter ! {self(),ok},
+ file_ctrl_loop(StdDev, standard_io, StdDev, false, ok, ok, HandlerName).
+
+file_ctrl_loop(Fd, Type, DevName, Synced,
+ PrevWriteResult, PrevSyncResult, HandlerName) ->
+ receive
+ %% asynchronous request
+ {log,Bin} ->
+ Result = if Type == file ->
+ write_to_dev(Fd, Bin, DevName,
+ PrevWriteResult, HandlerName);
+ true ->
+ io:put_chars(Fd, Bin)
+ end,
+ file_ctrl_loop(Fd, Type, DevName, false,
+ Result, PrevSyncResult, HandlerName);
+
+ %% synchronous request
+ {{log,From,Bin,FileSync},MRef} ->
+ if Type == file ->
+ %% check that file hasn't been deleted
+ CheckFile =
+ fun() -> {ok,_} = file:read_file_info(DevName) end,
+ spawn_link(CheckFile),
+ WResult = write_to_dev(Fd, Bin, DevName,
+ PrevWriteResult, HandlerName),
+ {Synced1,SResult} =
+ if not FileSync ->
+ {false,PrevSyncResult};
+ true ->
+ case sync_dev(Fd, DevName,
+ PrevSyncResult, HandlerName) of
+ ok -> {true,ok};
+ Error -> {false,Error}
+ end
+ end,
+ From ! {MRef,ok},
+ file_ctrl_loop(Fd, Type, DevName, Synced1,
+ WResult, SResult, HandlerName);
+ true ->
+ _ = io:put_chars(Fd, Bin),
+ From ! {MRef,ok},
+ file_ctrl_loop(Fd, Type, DevName, false,
+ ok, PrevSyncResult, HandlerName)
+ end;
+
+ filesync when not Synced ->
+ Result = sync_dev(Fd, DevName, PrevSyncResult, HandlerName),
+ file_ctrl_loop(Fd, Type, DevName, true,
+ PrevWriteResult, Result, HandlerName);
+
+ filesync ->
+ file_ctrl_loop(Fd, Type, DevName, true,
+ PrevWriteResult, PrevSyncResult, HandlerName);
+
+ {{filesync,From},MRef} ->
+ Result = if not Synced ->
+ sync_dev(Fd, DevName, PrevSyncResult, HandlerName);
+ true ->
+ ok
+ end,
+ From ! {MRef,ok},
+ file_ctrl_loop(Fd, Type, DevName, true,
+ PrevWriteResult, Result, HandlerName);
+
+ stop ->
+ _ = close_log_file(Fd),
+ stopped
+ end.
+
+write_to_dev(Fd, Bin, FileName, PrevWriteResult, HandlerName) ->
+ case ?file_write(Fd, Bin) of
+ ok ->
+ ok;
+ PrevWriteResult ->
+ %% don't report same error twice
+ PrevWriteResult;
+ Error ->
+ logger_h_common:error_notify({HandlerName,write,FileName,Error}),
+ Error
+ end.
+
+sync_dev(Fd, DevName, PrevSyncResult, HandlerName) ->
+ case ?file_datasync(Fd) of
+ ok ->
+ ok;
+ PrevSyncResult ->
+ %% don't report same error twice
+ PrevSyncResult;
+ Error ->
+ logger_h_common:error_notify({HandlerName,filesync,DevName,Error}),
+ Error
+ end.
+
diff --git a/lib/kernel/src/logger_sup.erl b/lib/kernel/src/logger_sup.erl
new file mode 100644
index 0000000000..4e4de94d5c
--- /dev/null
+++ b/lib/kernel/src/logger_sup.erl
@@ -0,0 +1,53 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2017. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_sup).
+
+-behaviour(supervisor).
+
+%% API
+-export([start_link/0]).
+
+%% Supervisor callbacks
+-export([init/1]).
+
+-define(SERVER, ?MODULE).
+
+%%%===================================================================
+%%% API functions
+%%%===================================================================
+
+start_link() ->
+ supervisor:start_link({local, ?SERVER}, ?MODULE, []).
+
+%%%===================================================================
+%%% Supervisor callbacks
+%%%===================================================================
+
+init([]) ->
+
+ SupFlags = #{strategy => one_for_one,
+ intensity => 1,
+ period => 5},
+
+ {ok, {SupFlags, []}}.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
diff --git a/lib/kernel/test/Makefile b/lib/kernel/test/Makefile
index 03b6355056..8599a3d814 100644
--- a/lib/kernel/test/Makefile
+++ b/lib/kernel/test/Makefile
@@ -70,6 +70,15 @@ MODULES= \
interactive_shell_SUITE \
init_SUITE \
kernel_config_SUITE \
+ logger_SUITE \
+ logger_bench_SUITE \
+ logger_disk_log_h_SUITE \
+ logger_env_var_SUITE \
+ logger_filters_SUITE \
+ logger_formatter_SUITE \
+ logger_legacy_SUITE \
+ logger_simple_SUITE \
+ logger_std_h_SUITE \
os_SUITE \
pg2_SUITE \
seq_trace_SUITE \
@@ -102,7 +111,7 @@ TARGET_FILES= $(MODULES:%=$(EBIN)/%.$(EMULATOR))
INSTALL_PROGS= $(TARGET_FILES)
EMAKEFILE=Emakefile
-COVERFILE=kernel.cover
+COVERFILE=kernel.cover logger.cover
# ----------------------------------------------------
# Release directory specification
@@ -149,7 +158,8 @@ release_tests_spec: make_emakefile
$(INSTALL_DIR) "$(RELSYSDIR)"
$(INSTALL_DATA) $(ERL_FILES) "$(RELSYSDIR)"
$(INSTALL_DATA) $(APP_FILES) "$(RELSYSDIR)"
- $(INSTALL_DATA) kernel.spec kernel_smoke.spec kernel_bench.spec \
+ $(INSTALL_DATA) \
+ kernel.spec kernel_smoke.spec kernel_bench.spec logger.spec \
$(EMAKEFILE) $(COVERFILE) "$(RELSYSDIR)"
chmod -R u+w "$(RELSYSDIR)"
@tar cf - *_SUITE_data | (cd "$(RELSYSDIR)"; tar xf -)
diff --git a/lib/kernel/test/application_SUITE.erl b/lib/kernel/test/application_SUITE.erl
index 866043cfb4..c00fb44c46 100644
--- a/lib/kernel/test/application_SUITE.erl
+++ b/lib/kernel/test/application_SUITE.erl
@@ -1568,7 +1568,8 @@ loop5606(Pid) ->
%% Tests get_env/* functions.
get_env(Conf) when is_list(Conf) ->
- {ok, _} = application:get_env(kernel, error_logger),
+ ok = application:set_env(kernel, new_var, new_val),
+ {ok, new_val} = application:get_env(kernel, new_var),
undefined = application:get_env(undefined_app, a),
undefined = application:get_env(kernel, error_logger_xyz),
default = application:get_env(kernel, error_logger_xyz, default),
diff --git a/lib/kernel/test/error_logger_SUITE.erl b/lib/kernel/test/error_logger_SUITE.erl
index 2d26a7246c..6c4526d0cf 100644
--- a/lib/kernel/test/error_logger_SUITE.erl
+++ b/lib/kernel/test/error_logger_SUITE.erl
@@ -32,7 +32,8 @@
init_per_group/2,end_per_group/2,
off_heap/1,
error_report/1, info_report/1, error/1, info/1,
- emulator/1, tty/1, logfile/1, add/1, delete/1]).
+ emulator/1, via_logger_process/1, other_node/1,
+ tty/1, logfile/1, add/1, delete/1]).
-export([generate_error/2]).
@@ -46,16 +47,19 @@ suite() ->
{timetrap,{minutes,1}}].
all() ->
- [off_heap, error_report, info_report, error, info, emulator, tty,
- logfile, add, delete].
+ [off_heap, error_report, info_report, error, info, emulator,
+ via_logger_process, other_node, tty, logfile, add, delete].
groups() ->
[].
init_per_suite(Config) ->
+ logger:add_handler(error_logger,error_logger,
+ #{level=>info,filter_default=>log}),
Config.
end_per_suite(_Config) ->
+ logger:remove_handler(error_logger),
ok.
init_per_group(_GroupName, Config) ->
@@ -226,6 +230,40 @@ generate_error(Error, Stack) ->
erlang:raise(error, Error, Stack).
%%-----------------------------------------------------------------
+
+via_logger_process(Config) ->
+ case os:type() of
+ {win32,_} ->
+ {skip,"Skip on windows - cant change file mode"};
+ _ ->
+ error_logger:add_report_handler(?MODULE, self()),
+ Dir = filename:join(?config(priv_dir,Config),"dummydir"),
+ Msg = "File operation error: eacces. Target: " ++
+ Dir ++ ". Function: list_dir. ",
+ ok = file:make_dir(Dir),
+ ok = file:change_mode(Dir,8#0222),
+ error = erl_prim_loader:list_dir(Dir),
+ ok = file:change_mode(Dir,8#0664),
+ _ = file:del_dir(Dir),
+ reported(error_report, std_error, Msg),
+ my_yes = error_logger:delete_report_handler(?MODULE),
+ ok
+ end.
+
+%%-----------------------------------------------------------------
+
+other_node(_Config) ->
+ error_logger:add_report_handler(?MODULE, self()),
+ {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]),
+ ok = rpc:call(Node,logger,add_handler,[error_logger,error_logger,
+ #{level=>info,filter_default=>log}]),
+ rpc:call(Node,error_logger,error_report,[hi_from_remote]),
+ reported(error_report,std_error,hi_from_remote),
+ test_server:stop_node(Node),
+ ok.
+
+
+%%-----------------------------------------------------------------
%% We don't enables or disables tty error logging here. We do not
%% want to interact with the test run.
%%-----------------------------------------------------------------
@@ -279,7 +317,7 @@ reported(Tag, Type, Report) ->
test_server:messages_get(),
ok
after 1000 ->
- ct:fail(no_report_received)
+ ct:fail({no_report_received,test_server:messages_get()})
end.
%%-----------------------------------------------------------------
diff --git a/lib/kernel/test/init_SUITE.erl b/lib/kernel/test/init_SUITE.erl
index c8415b34e5..6a006cdc01 100644
--- a/lib/kernel/test/init_SUITE.erl
+++ b/lib/kernel/test/init_SUITE.erl
@@ -299,7 +299,7 @@ many_restarts() ->
many_restarts(Config) when is_list(Config) ->
{ok, Node} = loose_node:start(init_test, "", ?DEFAULT_TIMEOUT_SEC),
- loop_restart(50,Node,rpc:call(Node,erlang,whereis,[error_logger])),
+ loop_restart(50,Node,rpc:call(Node,erlang,whereis,[logger])),
loose_node:stop(Node),
ok.
@@ -316,13 +316,13 @@ loop_restart(N,Node,EHPid) ->
ct:fail(not_stopping)
end,
ok = wait_for(30, Node, EHPid),
- loop_restart(N-1,Node,rpc:call(Node,erlang,whereis,[error_logger])).
+ loop_restart(N-1,Node,rpc:call(Node,erlang,whereis,[logger])).
wait_for(0,Node,_) ->
loose_node:stop(Node),
error;
wait_for(N,Node,EHPid) ->
- case rpc:call(Node, erlang, whereis, [error_logger]) of
+ case rpc:call(Node, erlang, whereis, [logger]) of
Pid when is_pid(Pid), Pid =/= EHPid ->
%% erlang:display(ok),
ok;
diff --git a/lib/kernel/test/kernel.spec b/lib/kernel/test/kernel.spec
index 62afc9f97b..86d2155828 100644
--- a/lib/kernel/test/kernel.spec
+++ b/lib/kernel/test/kernel.spec
@@ -2,3 +2,4 @@
{config, "../test_server/ts.unix.config"}.
{suites,"../kernel_test", all}.
+{skip_suites,"../kernel_test",[logger_bench_SUITE],"Not ready"}.
diff --git a/lib/kernel/test/logger.cover b/lib/kernel/test/logger.cover
new file mode 100644
index 0000000000..b30bcfe920
--- /dev/null
+++ b/lib/kernel/test/logger.cover
@@ -0,0 +1,14 @@
+%% -*- erlang -*-
+{incl_mods,[error_logger,
+ logger,
+ logger_backend,
+ logger_config,
+ logger_disk_log_h,
+ logger_h_common,
+ logger_filters,
+ logger_formatter,
+ logger_server,
+ logger_simple,
+ logger_std_h,
+ logger_sup]}.
+
diff --git a/lib/kernel/test/logger.spec b/lib/kernel/test/logger.spec
new file mode 100644
index 0000000000..cd76a754a4
--- /dev/null
+++ b/lib/kernel/test/logger.spec
@@ -0,0 +1,11 @@
+%% -*-erlang-*-
+{suites,"../kernel_test", [error_logger_SUITE,
+ error_logger_warn_SUITE,
+ logger_SUITE,
+ logger_disk_log_h_SUITE,
+ logger_env_var_SUITE,
+ logger_filters_SUITE,
+ logger_formatter_SUITE,
+ logger_legacy_SUITE,
+ logger_simple_SUITE,
+ logger_std_h_SUITE]}.
diff --git a/lib/kernel/test/logger_SUITE.erl b/lib/kernel/test/logger_SUITE.erl
new file mode 100644
index 0000000000..0edce3e34c
--- /dev/null
+++ b/lib/kernel/test/logger_SUITE.erl
@@ -0,0 +1,828 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+
+-define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++
+ ":"++integer_to_list(?LINE)).
+-define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}).
+-define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]).
+
+-define(MY_LOC(N),#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY},
+ file=>?FILE, line=>?LINE-N}).
+
+-define(TRY(X), my_try(fun() -> X end)).
+
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ case logger:get_handler_config(logger_std_h) of
+ {ok,StdH} ->
+ ok = logger:remove_handler(logger_std_h),
+ [{logger_std_h,StdH}|Config];
+ _ ->
+ Config
+ end.
+
+end_per_suite(Config) ->
+ case ?config(logger_std_h,Config) of
+ {HMod,HConfig} ->
+ ok = logger:add_handler(logger_std_h,HMod,HConfig);
+ _ ->
+ ok
+ end.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ {ok,LC} = logger:get_logger_config(),
+ [{logger_config,LC}|Config].
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [start_stop,
+ add_remove_handler,
+ multiple_handlers,
+ add_remove_filter,
+ change_config,
+ set_formatter,
+ log_all_levels_api,
+ macros,
+ set_level,
+ set_level_module,
+ cache_level_module,
+ format_report,
+ filter_failed,
+ handler_failed,
+ config_sanity_check,
+ log_failed,
+ emulator,
+ via_logger_process,
+ other_node,
+ compare_levels,
+ process_metadata].
+
+start_stop(_Config) ->
+ S = whereis(logger),
+ true = is_pid(S),
+ ok.
+
+add_remove_handler(_Config) ->
+ register(callback_receiver,self()),
+ {ok,#{handlers:=Hs0}} = logger:get_logger_config(),
+ {error,{not_found,h1}} = logger:get_handler_config(h1),
+ ok = logger:add_handler(h1,?MODULE,#{}),
+ [add] = test_server:messages_get(),
+ {ok,#{handlers:=Hs}} = logger:get_logger_config(),
+ [h1|Hs0] = Hs,
+ {ok,{?MODULE,#{level:=info,filters:=[],filter_default:=log}}} = % defaults
+ logger:get_handler_config(h1),
+ ok = logger:set_handler_config(h1,filter_default,stop),
+ [changing_config] = test_server:messages_get(),
+ ?LOG_INFO("hello",[]),
+ ok = check_no_log(),
+ ok = logger:set_handler_config(h1,filter_default,log),
+ [changing_config] = test_server:messages_get(),
+ {ok,{?MODULE,#{filter_default:=log}}} = logger:get_handler_config(h1),
+ ?LOG_INFO("hello",[]),
+ ok = check_logged(info,"hello",[],?MY_LOC(1)),
+ ok = logger:remove_handler(h1),
+ [remove] = test_server:messages_get(),
+ {ok,#{handlers:=Hs0}} = logger:get_logger_config(),
+ {error,{not_found,h1}} = logger:get_handler_config(h1),
+ {error,{not_found,h1}} = logger:remove_handler(h1),
+ logger:info("hello",[]),
+ ok = check_no_log(),
+ ok.
+
+add_remove_handler(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+multiple_handlers(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}),
+ ok = logger:add_handler(h2,?MODULE,#{level=>error,filter_default=>log}),
+ ?LOG_ERROR("hello",[]),
+ ok = check_logged(error,"hello",[],?MY_LOC(1)),
+ ok = check_logged(error,"hello",[],?MY_LOC(2)),
+ ?LOG_INFO("hello",[]),
+ ok = check_logged(info,"hello",[],?MY_LOC(1)),
+ ok = check_no_log(),
+ ok.
+
+multiple_handlers(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:remove_handler(h2),
+ ok.
+
+add_remove_filter(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}),
+ LF = {fun(Log,_) -> Log#{level=>error} end, []},
+ ok = logger:add_logger_filter(lf,LF),
+ {error,{already_exist,lf}} = logger:add_logger_filter(lf,LF),
+ {error,{already_exist,lf}} = logger:add_logger_filter(lf,{fun(Log,_) ->
+ Log
+ end, []}),
+ ?LOG_INFO("hello",[]),
+ ok = check_logged(error,"hello",[],?MY_LOC(1)),
+ ok = check_no_log(),
+
+ ok = logger:add_handler(h2,?MODULE,#{level=>info,filter_default=>log}),
+ HF = {fun(#{level:=error}=Log,_) ->
+ Log#{level=>mylevel};
+ (_,_) ->
+ ignore
+ end,
+ []},
+ ok = logger:add_handler_filter(h1,hf,HF),
+ {error,{already_exist,hf}} = logger:add_handler_filter(h1,hf,HF),
+ {error,{already_exist,hf}} = logger:add_handler_filter(h1,hf,{fun(Log,_) ->
+ Log
+ end, []}),
+ ?LOG_INFO("hello",[]),
+ ok = check_logged(mylevel,"hello",[],?MY_LOC(1)),
+ ok = check_logged(error,"hello",[],?MY_LOC(2)),
+
+ ok = logger:remove_logger_filter(lf),
+ {error,{not_found,lf}} = logger:remove_logger_filter(lf),
+
+ ?LOG_INFO("hello",[]),
+ ok = check_logged(info,"hello",[],?MY_LOC(1)),
+ ok = check_logged(info,"hello",[],?MY_LOC(2)),
+
+ ?LOG_ERROR("hello",[]),
+ ok = check_logged(mylevel,"hello",[],?MY_LOC(1)),
+ ok = check_logged(error,"hello",[],?MY_LOC(2)),
+
+ ok = logger:remove_handler_filter(h1,hf),
+ {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf),
+ ?LOG_INFO("hello",[]),
+ ok = check_logged(info,"hello",[],?MY_LOC(1)),
+ ok = check_logged(info,"hello",[],?MY_LOC(2)),
+
+ ?LOG_ERROR("hello",[]),
+ ok = check_logged(error,"hello",[],?MY_LOC(1)),
+ ok = check_logged(error,"hello",[],?MY_LOC(2)),
+ ok.
+
+add_remove_filter(cleanup,_Config) ->
+ logger:remove_logger_filter(lf),
+ logger:remove_handler(h1),
+ logger:remove_handler(h2),
+ ok.
+
+change_config(_Config) ->
+ %% Overwrite handler config - check that defaults are added
+ ok = logger:add_handler(h1,?MODULE,#{level=>debug,custom=>custom}),
+ {ok,{?MODULE,#{level:=debug,filter_default:=log,custom:=custom}}} =
+ logger:get_handler_config(h1),
+ register(callback_receiver,self()),
+ ok = logger:set_handler_config(h1,#{filter_default=>stop}),
+ [changing_config] = test_server:messages_get(),
+ {ok,{?MODULE,#{level:=info,filter_default:=stop}=C2}} =
+ logger:get_handler_config(h1),
+ false = maps:is_key(custom,C2),
+ {error,fail} = logger:set_handler_config(h1,#{fail=>true}),
+ {error,{attempting_syncronous_call_to_self,_}} =
+ logger:set_handler_config(
+ h1,#{call=>fun() -> logger:set_module_level(?MODULE,debug) end}),
+ {ok,{?MODULE,C2}} = logger:get_handler_config(h1),
+
+ %% Change one key only
+ {error,fail} = logger:set_handler_config(h1,fail,true),
+ ok = logger:set_handler_config(h1,custom,custom),
+ [changing_config] = test_server:messages_get(),
+ {ok,{?MODULE,#{custom:=custom}=C3}} = logger:get_handler_config(h1),
+ C2 = maps:remove(custom,C3),
+
+ %% Overwrite logger config - check that defaults are added
+ {ok,LConfig} = logger:get_logger_config(),
+ ok = logger:set_logger_config(#{filter_default=>stop}),
+ {ok,#{level:=info,filters:=[],handlers:=[],filter_default:=stop}=LC1} =
+ logger:get_logger_config(),
+ 4 = maps:size(LC1),
+
+ %% Change one key only
+ ok = logger:set_logger_config(handlers,[h1]),
+ {ok,#{level:=info,filters:=[],handlers:=[h1],filter_default:=stop}} =
+ logger:get_logger_config(),
+
+ %% Cleanup
+ ok = logger:set_logger_config(LConfig),
+ [] = test_server:messages_get(),
+
+ ok.
+
+change_config(cleanup,Config) ->
+ logger:remove_handler(h1),
+ LC = ?config(logger_config,Config),
+ logger:set_logger_config(LC),
+ ok.
+
+set_formatter(_Config) ->
+ {error,{not_found,h1}}=logger:set_handler_config(h1,formatter,{?MODULE,[]}),
+ ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}),
+ ok = logger:set_handler_config(h1,formatter,{?MODULE,[]}),
+ logger:info("hello",[]),
+ receive
+ {_Log,#{formatter:={?MODULE,[]}}} ->
+ ok
+ after 500 ->
+ ct:fail({timeout,no_log,process_info(self(),messages)})
+ end,
+ ok.
+
+set_formatter(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+log_all_levels_api(_Config) ->
+ ok = logger:set_logger_config(level,debug),
+ ok = logger:add_handler(h1,?MODULE,#{level=>debug,filter_default=>log}),
+ test_api(emergency),
+ test_api(alert),
+ test_api(critical),
+ test_api(error),
+ test_api(warning),
+ test_api(notice),
+ test_api(info),
+ test_api(debug),
+ test_log_function(emergency),
+ ok.
+
+log_all_levels_api(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:set_logger_config(level,info),
+ ok.
+
+macros(_Config) ->
+ ok = logger:set_module_level(?MODULE,debug),
+ ok = logger:add_handler(h1,?MODULE,#{level=>debug,filter_default=>log}),
+ test_macros(emergency),
+ ok.
+
+macros(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:reset_module_level(?MODULE),
+ ok.
+
+set_level(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>debug,filter_default=>log}),
+ logger:debug(?map_rep),
+ ok = check_no_log(),
+ logger:info(M1=?map_rep),
+ ok = check_logged(info,M1,#{}),
+ ok = logger:set_logger_config(level,debug),
+ logger:debug(M2=?map_rep),
+ ok = check_logged(debug,M2,#{}),
+ ok.
+
+set_level(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:set_logger_config(level,info),
+ ok.
+
+set_level_module(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}),
+ {error,{invalid_level,bad}} = logger:set_module_level(?MODULE,bad),
+ {error,{not_a_module,{bad}}} = logger:set_module_level({bad},warning),
+ ok = logger:set_module_level(?MODULE,warning),
+ logger:info(?map_rep,?MY_LOC(0)),
+ ok = check_no_log(),
+ logger:warning(M1=?map_rep,?MY_LOC(0)),
+ ok = check_logged(warning,M1,?MY_LOC(1)),
+ ok = logger:set_module_level(?MODULE,info),
+ logger:info(M2=?map_rep,?MY_LOC(0)),
+ ok = check_logged(info,M2,?MY_LOC(1)),
+
+ {error,{not_a_module,{bad}}} = logger:reset_module_level({bad}),
+ ok = logger:reset_module_level(?MODULE),
+
+ ok.
+
+set_level_module(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:reset_module_level(?MODULE),
+ ok.
+
+cache_level_module(_Config) ->
+ ok = logger:reset_module_level(?MODULE),
+ [] = ets:lookup(logger,?MODULE), %dirty - add API in logger_config?
+ ?LOG_INFO(?map_rep),
+ %% Caching is done asynchronously, so wait a bit for the update
+ timer:sleep(100),
+ [_] = ets:lookup(logger,?MODULE), %dirty - add API in logger_config?
+ ok = logger:reset_module_level(?MODULE),
+ [] = ets:lookup(logger,?MODULE), %dirty - add API in logger_config?
+ ok.
+
+cache_level_module(cleanup,_Config) ->
+ logger:reset_module_level(?MODULE),
+ ok.
+
+format_report(_Config) ->
+ {"~ts",["string"]} = logger:format_report("string"),
+ {"~tp",[term]} = logger:format_report(term),
+ {"~tp",[[]]} = logger:format_report([]),
+ {" ~tp: ~tp",[key,value]} = logger:format_report([{key,value}]),
+ KeyVals = [{key1,value1},{key2,"value2"},{key3,[]}],
+ KeyValRes =
+ {" ~tp: ~tp\n ~tp: ~ts\n ~tp: ~tp",
+ [key1,value1,key2,"value2",key3,[]]} =
+ logger:format_report(KeyVals),
+ KeyValRes = logger:format_report(maps:from_list(KeyVals)),
+ KeyValRes = logger:format_otp_report(#{label=>{?MODULE,test},report=>KeyVals}),
+ {" ~tp: ~tp\n ~tp: ~tp",
+ [label,{?MODULE,test},report,KeyVals]} =
+ logger:format_report(#{label=>{?MODULE,test},report=>KeyVals}),
+
+ {" ~tp: ~tp\n ~tp",[key1,value1,term]} =
+ logger:format_report([{key1,value1},term]),
+
+ {" ~tp: ~tp\n ~tp",[key1,value1,[]]} =
+ logger:format_report([{key1,value1},[]]),
+
+ {"~tp",[[]]} = logger:format_report([[],[],[]]),
+
+ ok.
+
+filter_failed(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}),
+
+ %% Logger filters
+ {error,{invalid_filter,_}} =
+ logger:add_logger_filter(lf,{fun(_) -> ok end,args}),
+ ok = logger:add_logger_filter(lf,{fun(_,_) -> a=b end,args}),
+ {ok,#{filters:=[_]}} = logger:get_logger_config(),
+ ok = logger:info(M1=?map_rep),
+ ok = check_logged(info,M1,#{}),
+ {error,{not_found,lf}} = logger:remove_logger_filter(lf),
+
+ ok = logger:add_logger_filter(lf,{fun(_,_) -> faulty_return end,args}),
+ {ok,#{filters:=[_]}} = logger:get_logger_config(),
+ ok = logger:info(M2=?map_rep),
+ ok = check_logged(info,M2,#{}),
+ {error,{not_found,lf}} = logger:remove_logger_filter(lf),
+
+ %% Handler filters
+ {error,{not_found,h0}} =
+ logger:add_handler_filter(h0,hf,{fun(_,_) -> ignore end,args}),
+ {error,{not_found,h0}} = logger:remove_handler_filter(h0,hf),
+ {error,{invalid_filter,_}} =
+ logger:add_handler_filter(h1,hf,{fun(_) -> ok end,args}),
+ ok = logger:add_handler_filter(h1,hf,{fun(_,_) -> a=b end,args}),
+ {ok,{?MODULE,#{filters:=[_]}}} = logger:get_handler_config(h1),
+ ok = logger:info(M3=?map_rep),
+ ok = check_logged(info,M3,#{}),
+ {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf),
+
+ ok = logger:add_handler_filter(h1,hf,{fun(_,_) -> faulty_return end,args}),
+ {ok,{?MODULE,#{filters:=[_]}}} = logger:get_handler_config(h1),
+ ok = logger:info(M4=?map_rep),
+ ok = check_logged(info,M4,#{}),
+ {error,{not_found,hf}} = logger:remove_handler_filter(h1,hf),
+
+ ok.
+
+filter_failed(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+handler_failed(_Config) ->
+ {error,{invalid_id,1}} = logger:add_handler(1,?MODULE,#{}),
+ {error,{invalid_module,"nomodule"}} = logger:add_handler(h1,"nomodule",#{}),
+ {error,{invalid_handler_config,bad}} = logger:add_handler(h1,?MODULE,bad),
+ {error,{invalid_filters,false}} =
+ logger:add_handler(h1,?MODULE,#{filters=>false}),
+ {error,{invalid_filter_default,true}} =
+ logger:add_handler(h1,?MODULE,#{filter_default=>true}),
+ {error,{invalid_formatter,[]}} =
+ logger:add_handler(h1,?MODULE,#{formatter=>[]}),
+ ok = logger:add_handler(h1,nomodule,#{filter_default=>log}),
+ logger:info(?map_rep),
+ check_no_log(),
+ #{logger:=#{handlers:=Ids1},
+ handlers:=H1} = logger:i(),
+ false = lists:member(h1,Ids1),
+ false = lists:keymember(h1,1,H1),
+ {error,{not_found,h1}} = logger:remove_handler(h1),
+
+ ok = logger:add_handler(h2,?MODULE,#{filter_default=>log,crash=>true}),
+ {error,{already_exist,h2}} = logger:add_handler(h2,othermodule,#{}),
+
+ logger:info(?map_rep),
+ check_no_log(),
+ #{logger:=#{handlers:=Ids2},
+ handlers:=H2} = logger:i(),
+ false = lists:member(h2,Ids2),
+ false = lists:keymember(h2,1,H2),
+ {error,{not_found,h2}} = logger:remove_handler(h2),
+
+ ok.
+
+handler_failed(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ logger:remove_handler(h2),
+ ok.
+
+config_sanity_check(_Config) ->
+ %% Logger config
+ {error,{invalid_filter_default,bad}} =
+ logger:set_logger_config(filter_default,bad),
+ {error,{invalid_level,bad}} = logger:set_logger_config(level,bad),
+ {error,{invalid_handlers,bad}} = logger:set_logger_config(handlers,bad),
+ {error,{invalid_id,{bad,bad}}} =
+ logger:set_logger_config(handlers,[{bad,bad}]),
+ {error,{invalid_id,"bad"}} = logger:set_logger_config(handlers,["bad"]),
+ {error,{invalid_filters,bad}} = logger:set_logger_config(filters,bad),
+ {error,{invalid_filter,bad}} = logger:set_logger_config(filters,[bad]),
+ {error,{invalid_filter,{_,_}}} =
+ logger:set_logger_config(filters,[{id,bad}]),
+ {error,{invalid_filter,{_,{_,_}}}} =
+ logger:set_logger_config(filters,[{id,{bad,args}}]),
+ {error,{invalid_filter,{_,{_,_}}}} =
+ logger:set_logger_config(filters,[{id,{fun() -> ok end,args}}]),
+ {error,{invalid_logger_config,{bad,bad}}} =
+ logger:set_logger_config(bad,bad),
+
+ %% Handler config
+ {error,{not_found,h1}} = logger:set_handler_config(h1,a,b),
+ ok = logger:add_handler(h1,?MODULE,#{}),
+ {error,{invalid_filter_default,bad}} =
+ logger:set_handler_config(h1,filter_default,bad),
+ {error,{invalid_level,bad}} = logger:set_handler_config(h1,level,bad),
+ {error,{invalid_filters,bad}} = logger:set_handler_config(h1,filters,bad),
+ {error,{invalid_filter,bad}} = logger:set_handler_config(h1,filters,[bad]),
+ {error,{invalid_filter,{_,_}}} =
+ logger:set_handler_config(h1,filters,[{id,bad}]),
+ {error,{invalid_filter,{_,{_,_}}}} =
+ logger:set_handler_config(h1,filters,[{id,{bad,args}}]),
+ {error,{invalid_filter,{_,{_,_}}}} =
+ logger:set_handler_config(h1,filters,[{id,{fun() -> ok end,args}}]),
+ {error,{invalid_formatter,bad}} =
+ logger:set_handler_config(h1,formatter,bad),
+ {error,{invalid_module,{bad}}} =
+ logger:set_handler_config(h1,formatter,{{bad},cfg}),
+ {error,{invalid_formatter_config,bad}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,bad}),
+ {error,{invalid_formatter_config,{bad,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,#{bad=>bad}}),
+ {error,{invalid_formatter_config,{template,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{template=>bad}}),
+ {error,{invalid_formatter_template,[1]}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{template=>[1]}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{template=>[]}}),
+ {error,{invalid_formatter_config,{single_line,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{single_line=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{single_line=>true}}),
+ {error,{invalid_formatter_config,{legacy_header,bad}}} =
+ logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{legacy_header=>bad}}),
+ ok = logger:set_handler_config(h1,formatter,{logger_formatter,
+ #{legacy_header=>true}}),
+ ok = logger:set_handler_config(h1,custom,custom),
+ ok.
+
+config_sanity_check(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+log_failed(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}),
+ {error,function_clause} = ?TRY(logger:log(bad,?map_rep)),
+ {error,function_clause} = ?TRY(logger:log(info,?map_rep,bad)),
+ {error,function_clause} = ?TRY(logger:log(info,fun() -> ?map_rep end,bad)),
+ {error,function_clause} = ?TRY(logger:log(info,fun() -> ?map_rep end,bad,#{})),
+ {error,function_clause} = ?TRY(logger:log(info,bad,bad,bad)),
+ {error,function_clause} = ?TRY(logger:log(info,bad,bad,#{})),
+ check_no_log(),
+ ok = logger:log(info,M1=?str,#{}),
+ check_logged(info,M1,#{}),
+ ok = logger:log(info,M2=?map_rep,#{}),
+ check_logged(info,M2,#{}),
+ ok = logger:log(info,M3=?keyval_rep,#{}),
+ check_logged(info,M3,#{}),
+
+ %% Should we check report input more thoroughly?
+ ok = logger:log(info,M4=?keyval_rep++[other,stuff,in,list],#{}),
+ check_logged(info,M4,#{}),
+
+ %% This might break a handler since it is assumed to be a format
+ %% string and args, so it depends how the handler protects itself
+ %% against something like io_lib:format("ok","ok")
+ ok = logger:log(info,"ok","ok",#{}),
+ check_logged(info,"ok","ok",#{}),
+
+ ok.
+
+log_failed(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+emulator(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log,
+ tc_proc=>self()}),
+ Msg = "Error in process ~p on node ~p with exit value:~n~p~n",
+ Error = {badmatch,4},
+ Stack = [{module, function, 2, []}],
+ Pid = spawn(?MODULE, generate_error, [Error, Stack]),
+ check_logged(error, Msg, [Pid, node(), {Error, Stack}],
+ #{gl=>group_leader(),
+ error_logger=>#{tag=>error,emulator=>true}}),
+ ok.
+
+emulator(cleanup, _Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+generate_error(Error, Stack) ->
+ erlang:raise(error, Error, Stack).
+
+via_logger_process(Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log,
+ tc_proc=>self()}),
+
+ %% Explicitly send a message to the logger process
+ %% This is used by code_server, erl_prim_loader, init, prim_file, ...
+ Msg = ?str,
+ logger ! {log,error,Msg,[],#{}},
+ check_logged(error, Msg, [], #{}),
+
+ case os:type() of
+ {win32,_} ->
+ %% Skip this part on windows - cant change file mode"
+ ok;
+ _ ->
+ %% This should trigger the same thing from erl_prim_loader
+ Dir = filename:join(?config(priv_dir,Config),"dummydir"),
+ ok = file:make_dir(Dir),
+ ok = file:change_mode(Dir,8#0222),
+ error = erl_prim_loader:list_dir(Dir),
+ check_logged(error,
+ #{report=>"File operation error: eacces. Target: " ++
+ Dir ++". Function: list_dir. "},
+ #{pid=>self(),
+ gl=>group_leader(),
+ error_logger=>#{tag=>error_report,
+ type=>std_error}}),
+ ok
+ end.
+
+via_logger_process(cleanup, Config) ->
+ Dir = filename:join(?config(priv_dir,Config),"dummydir"),
+ _ = file:change_mode(Dir,8#0664),
+ _ = file:del_dir(Dir),
+ logger:remove_handler(h1),
+ ok.
+
+other_node(_Config) ->
+ ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log,
+ tc_proc=>self()}),
+ {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]),
+ rpc:call(Node,logger,error,[Msg=?str,#{}]),
+ check_logged(error,Msg,#{}),
+ ok.
+
+other_node(cleanup,_Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes],
+ logger:remove_handler(h1),
+ ok.
+
+compare_levels(_Config) ->
+ Levels = [emergency,alert,critical,error,warning,notice,info,debug],
+ ok = compare(Levels),
+ {error,badarg} = ?TRY(logger:compare_levels(bad,bad)),
+ {error,badarg} = ?TRY(logger:compare_levels({bad},info)),
+ {error,badarg} = ?TRY(logger:compare_levels(info,"bad")),
+ ok.
+
+compare([L|Rest]) ->
+ eq = logger:compare_levels(L,L),
+ [gt = logger:compare_levels(L,L1) || L1 <- Rest],
+ [lt = logger:compare_levels(L1,L) || L1 <- Rest],
+ compare(Rest);
+compare([]) ->
+ ok.
+
+process_metadata(_Config) ->
+ undefined = logger:get_process_metadata(),
+ {error,badarg} = ?TRY(logger:set_process_metadata(bad)),
+ ok = logger:add_handler(h1,?MODULE,#{level=>info,filter_default=>log}),
+ Time = erlang:monotonic_time(microsecond),
+ ProcMeta = #{time=>Time,line=>0,custom=>proc},
+ ok = logger:set_process_metadata(ProcMeta),
+ S1 = ?str,
+ ?LOG_INFO(S1,#{custom=>macro}),
+ check_logged(info,S1,#{time=>Time,line=>0,custom=>macro}),
+
+ Time2 = erlang:monotonic_time(microsecond),
+ S2 = ?str,
+ ?LOG_INFO(S2,#{time=>Time2,line=>1,custom=>macro}),
+ check_logged(info,S2,#{time=>Time2,line=>1,custom=>macro}),
+
+ logger:info(S3=?str,#{custom=>func}),
+ check_logged(info,S3,#{time=>Time,line=>0,custom=>func}),
+
+ ProcMeta = logger:get_process_metadata(),
+ ok = logger:unset_process_metadata(),
+ undefined = logger:get_process_metadata(),
+
+ ok.
+
+process_metadata(cleanup,_Config) ->
+ logger:remove_handler(h1),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+check_logged(Level,Format,Args,Meta) ->
+ do_check_logged(Level,{Format,Args},Meta).
+
+check_logged(Level,Msg,Meta) when ?IS_REPORT(Msg) ->
+ do_check_logged(Level,{report,Msg},Meta);
+check_logged(Level,Msg,Meta) when ?IS_STRING(Msg) ->
+ do_check_logged(Level,{string,Msg},Meta).
+
+do_check_logged(Level,Msg0,Meta0) ->
+ receive
+ {#{level:=Level,msg:=Msg,meta:=Meta},_} ->
+ check_msg(Msg0,Msg),
+ check_maps(Meta0,Meta,meta)
+ after 500 ->
+ ct:fail({timeout,no_log,process_info(self(),messages)})
+ end.
+
+check_no_log() ->
+ receive
+ X -> ct:fail({got_unexpected_log,X})
+ after 500 ->
+ ok
+ end.
+
+check_msg(Msg,Msg) ->
+ ok;
+check_msg({report,Expected},{report,Got}) when is_map(Expected), is_map(Got) ->
+ check_maps(Expected,Got,msg);
+check_msg(Expected,Got) ->
+ ct:fail({unexpected,msg,Expected,Got}).
+
+check_maps(Expected,Got,What) ->
+ case maps:merge(Got,Expected) of
+ Got ->
+ ok;
+ _ ->
+ ct:fail({unexpected,What,Expected,Got})
+ end.
+
+%% Handler
+adding_handler(_Id,Config) ->
+ maybe_send(add),
+ {ok,Config}.
+removing_handler(_Id) ->
+ maybe_send(remove),
+ ok.
+changing_config(_Id,_Old,#{call:=Fun}) ->
+ Fun();
+changing_config(_Id,_Old,#{fail:=true}) ->
+ {error,fail};
+changing_config(_Id,_Old,Config) ->
+ maybe_send(changing_config),
+ {ok,Config}.
+
+maybe_send(Msg) ->
+ case whereis(callback_receiver) of
+ undefined -> ok;
+ Pid -> Pid ! Msg
+ end.
+
+log(_Log,#{crash:=true}) ->
+ a=b;
+log(Log,Config) ->
+ TcProc = maps:get(tc_proc,Config,self()),
+ TcProc ! {Log,Config},
+ ok.
+
+test_api(Level) ->
+ logger:Level(#{Level=>rep}),
+ ok = check_logged(Level,#{Level=>rep},#{}),
+ logger:Level(#{Level=>rep},#{my=>meta}),
+ ok = check_logged(Level,#{Level=>rep},#{my=>meta}),
+ logger:Level("~w: ~w",[Level,fa]),
+ ok = check_logged(Level,"~w: ~w",[Level,fa],#{}),
+ logger:Level("~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ logger:Level(fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end,x,
+ #{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],#{my=>meta}),
+ logger:Level(fun(x) -> #{Level=>fun_to_r,meta=>true} end,x,
+ #{my=>meta}),
+ ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},#{my=>meta}),
+ logger:Level(fun(x) -> <<"fun_to_s">> end,x,#{}),
+ ok = check_logged(Level,<<"fun_to_s">>,#{}),
+ logger:Level(F1=fun(x) -> {fun_to_bad} end,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{F1,x},{fun_to_bad}],#{}),
+ logger:Level(F2=fun(x) -> erlang:error(fun_that_crashes) end,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{F2,x},{error,fun_that_crashes}],#{}),
+ ok.
+
+test_log_function(Level) ->
+ logger:log(Level,#{Level=>rep}),
+ ok = check_logged(Level,#{Level=>rep},#{}),
+ logger:log(Level,#{Level=>rep},#{my=>meta}),
+ ok = check_logged(Level,#{Level=>rep},#{my=>meta}),
+ logger:log(Level,"~w: ~w",[Level,fa]),
+ ok = check_logged(Level,"~w: ~w",[Level,fa],#{}),
+ logger:log(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ logger:log(Level,fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end,
+ x, #{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],#{my=>meta}),
+ logger:log(Level,fun(x) -> #{Level=>fun_to_r,meta=>true} end,
+ x, #{my=>meta}),
+ ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},#{my=>meta}),
+ logger:log(Level,fun(x) -> <<"fun_to_s">> end,x,#{}),
+ ok = check_logged(Level,<<"fun_to_s">>,#{}),
+ logger:log(Level,F1=fun(x) -> {fun_to_bad} end,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{F1,x},{fun_to_bad}],#{}),
+ logger:log(Level,F2=fun(x) -> erlang:error(fun_that_crashes) end,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{F2,x},{error,fun_that_crashes}],#{}),
+ ok.
+
+test_macros(emergency=Level) ->
+ ?LOG_EMERGENCY(#{Level=>rep}),
+ ok = check_logged(Level,#{Level=>rep},?MY_LOC(1)),
+ ?LOG_EMERGENCY(#{Level=>rep},#{my=>meta}),
+ ok = check_logged(Level,#{Level=>rep},(?MY_LOC(1))#{my=>meta}),
+ ?LOG_EMERGENCY("~w: ~w",[Level,fa]),
+ ok = check_logged(Level,"~w: ~w",[Level,fa],?MY_LOC(1)),
+ ?LOG_EMERGENCY("~w: ~w ~w",[Level,fa,meta],#{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fa,meta],(?MY_LOC(1))#{my=>meta}),
+ ?LOG_EMERGENCY(fun(x) -> {"~w: ~w ~w",[Level,fun_to_fa,meta]} end,
+ x, #{my=>meta}),
+ ok = check_logged(Level,"~w: ~w ~w",[Level,fun_to_fa,meta],
+ (?MY_LOC(3))#{my=>meta}),
+ ?LOG_EMERGENCY(fun(x) -> #{Level=>fun_to_r,meta=>true} end, x, #{my=>meta}),
+ ok = check_logged(Level,#{Level=>fun_to_r,meta=>true},
+ (?MY_LOC(2))#{my=>meta}),
+ ?LOG_EMERGENCY(fun(x) -> <<"fun_to_s">> end,x,#{}),
+ ok = check_logged(Level,<<"fun_to_s">>,?MY_LOC(1)),
+ F1=fun(x) -> {fun_to_bad} end,
+ ?LOG_EMERGENCY(F1,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN ERROR: ~tp; Returned: ~tp",
+ [{F1,x},{fun_to_bad}],#{}),
+ F2=fun(x) -> erlang:error(fun_that_crashes) end,
+ ?LOG_EMERGENCY(F2,x,#{}),
+ ok = check_logged(Level,"LAZY_FUN CRASH: ~tp; Reason: ~tp",
+ [{F2,x},{error,fun_that_crashes}],#{}),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Called by macro ?TRY(X)
+my_try(Fun) ->
+ try Fun() catch C:R -> {C,R} end.
diff --git a/lib/kernel/test/logger_bench_SUITE.erl b/lib/kernel/test/logger_bench_SUITE.erl
new file mode 100644
index 0000000000..d47122ea9d
--- /dev/null
+++ b/lib/kernel/test/logger_bench_SUITE.erl
@@ -0,0 +1,500 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_bench_SUITE).
+
+-compile(export_all).
+
+%%%-----------------------------------------------------------------
+%%% To include lager tests, add paths to lager and goldrush
+%%% (goldrush is a dependency inside the lager repo)
+%%%
+%%% To print data to .csv files, add the following to a config file:
+%%% {print_csv,[{console_handler,[{path,"/some/dir/"}]}]}.
+%%%
+%%%-----------------------------------------------------------------
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("common_test/include/ct_event.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+
+-define(msg,lists:flatten(string:pad("Log from "++atom_to_list(?FUNCTION_NAME)++
+ ":"++integer_to_list(?LINE),
+ 80,trailing,$*))).
+-define(meta,#{mfa=>{?MODULE,?FUNCTION_NAME,?FUNCTION_ARITY},
+ pid=>self()}).
+
+-define(NO_COMPARE,[profile]).
+
+-define(TIMES,100000).
+
+suite() ->
+ [{timetrap,{seconds,120}}].
+
+init_per_suite(Config) ->
+ DataDir = ?config(data_dir,Config),
+ have_lager() andalso make(DataDir),
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(Group, Config) ->
+ H = remove_all_handlers(),
+ do_init_per_group(Group),
+ [{handlers,H}|Config].
+
+do_init_per_group(minimal_handler) ->
+ ok = logger:add_handler(?MODULE,?MODULE,#{level=>error,filter_default=>log});
+do_init_per_group(console_handler) ->
+ ok = logger:add_handler(?MODULE,logger_std_h,
+ #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS,
+ logger_std_h=>#{type=>standard_io,
+ toggle_sync_qlen => ?TIMES+1,
+ drop_new_reqs_qlen => ?TIMES+2,
+ flush_reqs_qlen => ?TIMES+3,
+ enable_burst_limit => false}}),
+ have_lager() andalso lager_helper:start(),
+ ok.
+
+end_per_group(Group, Config) ->
+ case ?config(saved_config,Config) of
+ {_,[{bench,Bench}]} ->
+ print_compare_chart(Group,Bench);
+ _ ->
+ ok
+ end,
+ add_all_handlers(?config(handlers,Config)),
+ do_end_per_group(Group).
+
+do_end_per_group(minimal_handler) ->
+ ok = logger:remove_handler(?MODULE);
+do_end_per_group(console_handler) ->
+ ok = logger:remove_handler(?MODULE),
+ have_lager() andalso lager_helper:stop(),
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ wait_for_handlers(),
+ ok.
+
+wait_for_handlers() ->
+ wait_for_handler(?MODULE),
+ wait_for_handler(lager_event).
+
+wait_for_handler(Handler) ->
+ case whereis(Handler) of
+ undefined ->
+ io:format("~p: noproc1",[Handler]),
+ ok;
+ Pid ->
+ case process_info(Pid,message_queue_len) of
+ {_,0} ->
+ io:format("~p: queue=~p",[Handler,0]),
+ ok;
+ {_,Q} ->
+ io:format("~p: queue=~p",[Handler,Q]),
+ timer:sleep(2000),
+ wait_for_handler(Handler);
+ undefined ->
+ io:format("~p: noproc2",[Handler]),
+ ok
+ end
+ end.
+
+groups() ->
+ [{minimal_handler,[],[log,
+ log_drop,
+ log_drop_by_handler,
+ macro,
+ macro_drop,
+ macro_drop_by_handler,
+ error_logger,
+ error_logger_drop,
+ error_logger_drop_by_handler
+ ]},
+ {console_handler,[],[%profile,
+ log,
+ log_drop,
+ log_drop_by_handler,
+ %% log_handler_complete,
+ macro,
+ macro_drop,
+ macro_drop_by_handler,
+ %% macro_handler_complete,
+ error_logger,
+ error_logger_drop,
+ error_logger_drop_by_handler%% ,
+ %% error_logger_handler_complete
+ ] ++ lager_cases()}
+ ].
+
+lager_cases() ->
+ case have_lager() of
+ true ->
+ [lager_log,
+ lager_log_drop,
+ lager_log_drop_by_handler,
+ %% lager_log_handler_complete,
+ lager_parsetrans,
+ lager_parsetrans_drop,
+ lager_parsetrans_drop_by_handler%% ,
+ %% lager_parsetrans_handler_complete
+ ];
+ false ->
+ []
+ end.
+
+
+all() ->
+ [{group,minimal_handler},
+ {group,console_handler}
+ ].
+
+log(Config) ->
+ Times = ?TIMES,
+ run_benchmark(Config,?FUNCTION_NAME,fun do_log_func/2, [error,?msg], Times).
+
+log_drop(Config) ->
+ Times = ?TIMES*100,
+ ok = logger:set_logger_config(level,error),
+ run_benchmark(Config,?FUNCTION_NAME,fun do_log_func/2, [info,?msg], Times).
+
+log_drop(cleanup,_Config) ->
+ ok = logger:set_logger_config(level,info).
+
+log_drop_by_handler(Config) ->
+ Times = ?TIMES,
+ %% just ensure correct levels
+ ok = logger:set_logger_config(level,info),
+ ok = logger:set_handler_config(?MODULE,level,error),
+ run_benchmark(Config,?FUNCTION_NAME,fun do_log_func/2, [info,?msg], Times).
+
+log_handler_complete(Config) ->
+ ok = logger:set_handler_config(?MODULE,formatter,
+ {?MODULE,?DEFAULT_FORMAT_CONFIG}),
+ handler_complete(Config, ?FUNCTION_NAME, fun do_log_func/2, [error,?msg]).
+
+log_handler_complete(cleanup,_Config) ->
+ ok=logger:set_handler_config(?MODULE,formatter,
+ {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}).
+
+macro(Config) ->
+ Times = ?TIMES,
+ run_benchmark(Config,?FUNCTION_NAME,fun do_log_macro/2,[error,?msg], Times).
+
+macro_drop(Config) ->
+ Times = ?TIMES*100,
+ ok = logger:set_logger_config(level,error),
+ run_benchmark(Config,?FUNCTION_NAME,fun do_log_macro/2,[info,?msg], Times).
+
+macro_drop(cleanup,_Config) ->
+ ok = logger:set_logger_config(level,info).
+
+macro_drop_by_handler(Config) ->
+ Times = ?TIMES,
+ %% just ensure correct levels
+ ok = logger:set_logger_config(level,info),
+ ok = logger:set_handler_config(?MODULE,level,error),
+ run_benchmark(Config,?FUNCTION_NAME,fun do_log_macro/2, [info,?msg], Times).
+
+macro_handler_complete(Config) ->
+ ok = logger:set_handler_config(?MODULE,formatter,
+ {?MODULE,?DEFAULT_FORMAT_CONFIG}),
+ handler_complete(Config, ?FUNCTION_NAME, fun do_log_macro/2, [error,?msg]).
+
+macro_handler_complete(cleanup,_Config) ->
+ ok=logger:set_handler_config(?MODULE,formatter,
+ {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}).
+
+error_logger(Config) ->
+ Times = ?TIMES,
+ run_benchmark(Config,?FUNCTION_NAME,fun do_error_logger/2, [error,?msg], Times).
+
+error_logger_drop(Config) ->
+ Times = ?TIMES*100,
+ ok = logger:set_logger_config(level,error),
+ run_benchmark(Config,?FUNCTION_NAME,fun do_error_logger/2, [info,?msg], Times).
+
+error_logger_drop(cleanup,_Config) ->
+ ok = logger:set_logger_config(level,info).
+
+error_logger_drop_by_handler(Config) ->
+ Times = ?TIMES,
+ %% just ensure correct levels
+ ok = logger:set_logger_config(level,info),
+ ok = logger:set_handler_config(?MODULE,level,error),
+ run_benchmark(Config,?FUNCTION_NAME,fun do_log_func/2, [info,?msg], Times).
+
+error_logger_handler_complete(Config) ->
+ ok = logger:set_handler_config(?MODULE,formatter,
+ {?MODULE,?DEFAULT_FORMAT_CONFIG}),
+ handler_complete(Config, ?FUNCTION_NAME, fun do_error_logger/2, [error,?msg]).
+
+error_logger_handler_complete(cleanup,_Config) ->
+ ok=logger:set_handler_config(?MODULE,formatter,
+ {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}).
+
+lager_log(Config) ->
+ Times = ?TIMES,
+ run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_func/2, [error,?msg], Times).
+
+lager_log_drop(Config) ->
+ Times = ?TIMES*100,
+ run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_func/2, [info,?msg], Times).
+
+lager_log_drop_by_handler(Config) ->
+ %% This concept does not exist, so doing same as lager_log_drop/1
+ Times = ?TIMES,
+ run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_func/2, [info,?msg], Times).
+
+lager_log_handler_complete(Config) ->
+ handler_complete(Config, ?FUNCTION_NAME, fun lager_helper:do_func/2, [error,?msg]).
+
+lager_parsetrans(Config) ->
+ Times = ?TIMES,
+ run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_parsetrans/2, [error,?msg], Times).
+
+lager_parsetrans_drop(Config) ->
+ Times = ?TIMES*100,
+ run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_parsetrans/2, [info,?msg], Times).
+
+lager_parsetrans_drop_by_handler(Config) ->
+ %% This concept does not exist, so doing same as lager_parsetrans_drop/1
+ Times = ?TIMES,
+ run_benchmark(Config,?FUNCTION_NAME,fun lager_helper:do_parsetrans/2, [info,?msg], Times).
+
+lager_parsetrans_handler_complete(Config) ->
+ handler_complete(Config, ?FUNCTION_NAME, fun lager_helper:do_parsetrans/2, [error,?msg]).
+
+
+profile(Config) ->
+ Times = ?TIMES,
+ %% fprof:apply(fun repeated_apply/3,[fun lager_helper:do_func/2,[error,?msg],Times]),
+ fprof:apply(fun repeated_apply/3,[fun do_log_func/2,[error,?msg],Times]),
+ ok = fprof:profile(),
+ ok = fprof:analyse(dest,"../fprof.analyse"),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+%% Handler
+log(_Log,_Config) ->
+ ok.
+
+format(Log=#{meta:=#{pid:=Pid}},Config) when is_pid(Pid) ->
+ String = ?DEFAULT_FORMATTER:format(Log,Config),
+ Pid ! done,
+ String;
+format(Log=#{meta:=#{pid:=PidStr}},Config) when is_list(PidStr) ->
+ String = ?DEFAULT_FORMATTER:format(Log,Config),
+ list_to_pid(PidStr) ! done,
+ String.
+
+handler_complete(Config, TC, Fun, Args) ->
+ Times = ?TIMES,
+ Start = os:perf_counter(microsecond),
+ repeated_apply(Fun, Args, Times),
+ MSecs = wait_for_done(Start,Times),
+ calc_and_report(Config,TC,MSecs,Times).
+
+wait_for_done(Start,0) ->
+ os:perf_counter(microsecond) - Start;
+wait_for_done(Start,N) ->
+ receive
+ done ->
+ wait_for_done(Start,N-1)
+ after 20000 ->
+ ct:fail("missing " ++ integer_to_list(N) ++ " replys")
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Benchmark stuff
+run_benchmark(Config,Tag,Fun,Args,Times) ->
+ _ = erlang:apply(Fun, Args), % apply once to ensure level is cached
+ MSecs = measure_repeated_op(Fun, Args, Times),
+ %% fprof:profile(),
+ %% fprof:analyse(dest,"../"++atom_to_list(Tag)++".prof"),
+ calc_and_report(Config,Tag,MSecs,Times).
+
+measure_repeated_op(Fun, Args, Times) ->
+ Start = os:perf_counter(microsecond),
+ %% fprof:apply(fun repeated_apply/3, [Fun, Args, Times]),
+ repeated_apply(Fun, Args, Times),
+ os:perf_counter(microsecond) - Start.
+
+repeated_apply(_F, _Args, Times) when Times =< 0 ->
+ ok;
+repeated_apply(F, Args, Times) ->
+ erlang:apply(F, Args),
+ repeated_apply(F, Args, Times - 1).
+
+calc_and_report(Config,Tag,MSecs,Times) ->
+ IOPS = trunc(Times * (1000000 / MSecs)),
+ ct_event:notify(#event{ name = benchmark_data, data = [{value,IOPS}] }),
+ ct:print("~p:~n~p IOPS, ~p us", [Tag, IOPS, MSecs]),
+ ct:comment("~p IOPS, ~p us", [IOPS, MSecs]),
+ Bench = case ?config(saved_config,Config) of
+ {_,[{bench,B}]} -> B;
+ undefined -> []
+ end,
+ {save_config,[{bench,[{Tag,IOPS,MSecs}|Bench]}]}.
+
+remove_all_handlers() ->
+ #{handlers:=Hs} = logger:i(),
+ [logger:remove_handler(Id) || {Id,_,_} <- Hs],
+ Hs.
+
+add_all_handlers(Hs) ->
+ [logger:add_handler(Id,Mod,Config) || {Id,Mod,Config} <- Hs],
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Call logger in different ways
+do_log_func(Level,Msg) ->
+ logger:Level(Msg,[],?meta).
+
+do_log_macro(error,Msg) ->
+ ?LOG_ERROR(Msg,[]);
+do_log_macro(info,Msg) ->
+ ?LOG_INFO(Msg,[]);
+do_log_macro(debug,Msg) ->
+ ?LOG_DEBUG(Msg,[]).
+
+do_error_logger(error,Msg) ->
+ error_logger:error_msg(Msg,[]);
+do_error_logger(info,Msg) ->
+ error_logger:info_msg(Msg,[]).
+
+%%%-----------------------------------------------------------------
+%%%
+print_compare_chart(Group,Bench) ->
+ io:format("~-20s~12s~12s~12s~12s",
+ ["Microseconds:","Log","Drop","HDrop","Complete"]),
+ io:format(user,"~-20s~12s~12s~12s~12s~n",
+ ["Microseconds:","Log","Drop","HDrop","Complete"]),
+ {Log,Drop,HDrop,Comp} = sort_bench(Bench,[],[],[],[]),
+ print_compare_chart(Log,Drop,HDrop,Comp),
+ io:format(user,"~n",[]),
+ maybe_print_csv_files(Group,
+ [{log,Log},{drop,Drop},{hdrop,HDrop},{comp,Comp}]).
+
+print_compare_chart([{What,LIOPS,LMSecs}|Log],
+ [{What,DIOPS,DMSecs}|Drop],
+ [{What,HIOPS,HMSecs}|HDrop],
+ [{What,CIOPS,CMSecs}|Comp]) ->
+ io:format("~-20w~12w~12w~12w~12w",[What,LMSecs,DMSecs,HMSecs,CMSecs]),
+ io:format(user,"~-20w~12w~12w~12w~12w~n",[What,LMSecs,DMSecs,HMSecs,CMSecs]),
+ print_compare_chart(Log,Drop,HDrop,Comp);
+print_compare_chart([{What,LIOPS,LMSecs}|Log],
+ [{What,DIOPS,DMSecs}|Drop],
+ [{What,HIOPS,HMSecs}|HDrop],
+ []=Comp) ->
+ io:format("~-20w~12w~12w~12w",[What,LMSecs,DMSecs,HMSecs]),
+ io:format(user,"~-20w~12w~12w~12w~n",[What,LMSecs,DMSecs,HMSecs]),
+ print_compare_chart(Log,Drop,HDrop,Comp);
+print_compare_chart([],[],[],[]) ->
+ ok;
+print_compare_chart(Log,Drop,HDrop,Comp) ->
+ ct:fail({Log,Drop,HDrop,Comp}).
+
+sort_bench([{TC,IOPS,MSecs}|Bench],Log,Drop,HDrop,Comp) ->
+ case lists:member(TC,?NO_COMPARE) of
+ true ->
+ sort_bench(Bench,Log,Drop,HDrop,Comp);
+ false ->
+ TCStr = atom_to_list(TC),
+ {What,Type} =
+ case re:run(TCStr,"(.*)_(drop.*)",
+ [{capture,all_but_first,list}]) of
+ {match,[WhatStr,TypeStr]} ->
+ {list_to_atom(WhatStr),list_to_atom(TypeStr)};
+ nomatch ->
+ case re:run(TCStr,"(.*)_(handler_complete.*)",
+ [{capture,all_but_first,list}]) of
+ {match,[WhatStr,TypeStr]} ->
+ {list_to_atom(WhatStr),list_to_atom(TypeStr)};
+ nomatch ->
+ {TC,log}
+ end
+ end,
+ case Type of
+ log ->
+ sort_bench(Bench,[{What,IOPS,MSecs}|Log],Drop,HDrop,Comp);
+ drop ->
+ sort_bench(Bench,Log,[{What,IOPS,MSecs}|Drop],HDrop,Comp);
+ drop_by_handler ->
+ sort_bench(Bench,Log,Drop,[{What,IOPS,MSecs}|HDrop],Comp);
+ handler_complete ->
+ sort_bench(Bench,Log,Drop,HDrop,[{What,IOPS,MSecs}|Comp])
+ end
+ end;
+sort_bench([],Log,Drop,HDrop,Comp) ->
+ {lists:keysort(1,Log),
+ lists:keysort(1,Drop),
+ lists:keysort(1,HDrop),
+ lists:keysort(1,Comp)}.
+
+maybe_print_csv_files(Group,Data) ->
+ case ct:get_config({print_csv,Group}) of
+ undefined ->
+ ok;
+ Cfg ->
+ Path = proplists:get_value(path,Cfg,".."),
+ Files = [begin
+ File = filename:join(Path,F)++".csv",
+ case filelib:is_regular(File) of
+ true ->
+ {ok,Fd} = file:open(File,[append]),
+ Fd;
+ false ->
+ {ok,Fd} = file:open(File,[write]),
+ ok = file:write(Fd,
+ "error_logger,lager_log,"
+ "lager_parsetrans,logger_log,"
+ "logger_macro\n"),
+ Fd
+ end
+ end || {F,_} <- Data],
+ [print_csv_file(F,D) || {F,D} <- lists:zip(Files,Data)],
+ [file:close(Fd) || Fd <- Files],
+ ok
+ end.
+
+print_csv_file(Fd,{_,Data}) ->
+ AllIOPS = [integer_to_list(IOPS) || {_,IOPS,_} <- Data],
+ ok = file:write(Fd,lists:join(",",AllIOPS)++"\n").
+
+have_lager() ->
+ code:ensure_loaded(lager) == {module,lager}.
+
+make(Dir) ->
+ {ok,Cwd} = file:get_cwd(),
+ ok = file:set_cwd(Dir),
+ up_to_date = make:all([load]),
+ ok = file:set_cwd(Cwd),
+ code:add_path(Dir).
diff --git a/lib/kernel/test/logger_bench_SUITE_data/Emakefile b/lib/kernel/test/logger_bench_SUITE_data/Emakefile
new file mode 100644
index 0000000000..85c82bdaab
--- /dev/null
+++ b/lib/kernel/test/logger_bench_SUITE_data/Emakefile
@@ -0,0 +1 @@
+{['lager_helper'],[{outdir,"."},debug_info,{i,"/home/uabshan/Work/git/otp/lib/kernel/src"},{i,"/home/uabshan/Work/git/otp/lib/kernel/include"}]}.
diff --git a/lib/kernel/test/logger_bench_SUITE_data/lager_helper.erl b/lib/kernel/test/logger_bench_SUITE_data/lager_helper.erl
new file mode 100644
index 0000000000..296ced4276
--- /dev/null
+++ b/lib/kernel/test/logger_bench_SUITE_data/lager_helper.erl
@@ -0,0 +1,73 @@
+-module(lager_helper).
+
+-compile(export_all).
+-compile({parse_transform,lager_transform}).
+
+-include_lib("kernel/src/logger_internal.hrl").
+
+start() ->
+ application:load(lager),
+ application:set_env(lager, error_logger_redirect, false),
+ application:set_env(lager, async_threshold, 100010),
+ application:set_env(lager, async_threshold_window, 100),
+ application:set_env(lager,handlers,[{?MODULE,[{level,error}]}]),
+ lager:start().
+
+stop() ->
+ application:stop(lager).
+
+do_func(Level,Msg) ->
+ lager:log(Level,[{pid,self()}],Msg,[]).
+
+do_parsetrans(error,Msg) ->
+ lager:error(Msg,[]);
+do_parsetrans(info,Msg) ->
+ lager:info(Msg,[]).
+
+%%%-----------------------------------------------------------------
+%%% Dummy handler for lager
+-record(state, {level :: {'mask', integer()},
+ formatter :: atom(),
+ format_config :: any()}).
+init(Opts) ->
+ Level = proplists:get_value(level,Opts,info),
+ Formatter = proplists:get_value(formatter,Opts,logger_bench_SUITE),
+ FormatConfig = proplists:get_value(format_config,Opts,?DEFAULT_FORMAT_CONFIG),
+ {ok,#state{level=lager_util:config_to_mask(Level),
+ formatter=Formatter,
+ format_config=FormatConfig}}.
+
+handle_call(get_loglevel, #state{level=Level} = State) ->
+ {ok, Level, State};
+handle_call({set_loglevel, Level}, State) ->
+ try lager_util:config_to_mask(Level) of
+ Levels ->
+ {ok, ok, State#state{level=Levels}}
+ catch
+ _:_ ->
+ {ok, {error, bad_log_level}, State}
+ end;
+handle_call(_Request, State) ->
+ {ok, ok, State}.
+
+handle_event({log, Message},
+ #state{level=L,formatter=Formatter,format_config=FormatConfig} = State) ->
+ case lager_util:is_loggable(Message, L, ?MODULE) of
+ true ->
+ Metadata =
+ case maps:from_list(lager_msg:metadata(Message)) of
+ Meta = #{pid:=Pid} when is_pid(Pid) ->
+ Meta;
+ Meta = #{pid:=PidStr} when is_list(PidStr) ->
+ Meta
+ end,
+ Log = #{level=>lager_msg:severity(Message),
+ msg=>{report,lager_msg:message(Message)},
+ meta=>Metadata},
+ io:put_chars(user, Formatter:format(Log,FormatConfig)),
+ {ok, State};
+ false ->
+ {ok, State}
+ end;
+handle_event(_Event, State) ->
+ {ok, State}.
diff --git a/lib/kernel/test/logger_disk_log_h_SUITE.erl b/lib/kernel/test/logger_disk_log_h_SUITE.erl
new file mode 100644
index 0000000000..c7c6137380
--- /dev/null
+++ b/lib/kernel/test/logger_disk_log_h_SUITE.erl
@@ -0,0 +1,1417 @@
+-module(logger_disk_log_h_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+-include_lib("kernel/src/logger_h_common.hrl").
+-include_lib("stdlib/include/ms_transform.hrl").
+-include_lib("kernel/include/file.hrl").
+
+-define(check_no_log, [] = test_server:messages_get()).
+
+-define(check(Expected),
+ receive {log,Expected} ->
+ [] = test_server:messages_get()
+ after 1000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {got,test_server:messages_get()}})
+ end).
+
+-define(msg,"Log from "++atom_to_list(?FUNCTION_NAME)++
+ ":"++integer_to_list(?LINE)).
+-define(bin(Msg), list_to_binary(Msg++"\n")).
+-define(log_no(File,N), lists:concat([File,".",N])).
+-define(domain,#{domain=>[?MODULE]}).
+
+-define(SYNC_REP_INT, if is_atom(?FILESYNC_REPEAT_INTERVAL) -> 5500;
+ true -> ?FILESYNC_REPEAT_INTERVAL + 500
+ end).
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ timer:start(), % to avoid progress report
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(TestHooksCase, Config) when
+ TestHooksCase == write_failure;
+ TestHooksCase == sync_failure ->
+ if ?TEST_HOOKS_TAB == undefined ->
+ {skip,"Define the TEST_HOOKS macro to run this test"};
+ true ->
+ ct:print("********** ~w **********", [TestHooksCase]),
+ Config
+ end;
+init_per_testcase(TestCase, Config) ->
+ ct:print("********** ~w **********", [TestCase]),
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [start_stop_handler,
+ create_log,
+ open_existing_log,
+ disk_log_opts,
+ default_formatter,
+ logging,
+ errors,
+ formatter_fail,
+ config_fail,
+ bad_input,
+ info_and_reset,
+ reconfig,
+ disk_log_sync,
+ disk_log_full,
+ disk_log_wrap,
+ disk_log_events,
+ write_failure,
+ sync_failure,
+ op_switch_to_sync,
+ op_switch_to_drop,
+ op_switch_to_flush,
+ limit_burst_disabled,
+ limit_burst_enabled_one,
+ limit_burst_enabled_period,
+ kill_disabled,
+ qlen_kill_new,
+ %% qlen_kill_std,
+ mem_kill_new,
+ %% mem_kill_std,
+ restart_after,
+ handler_requests_under_load
+ ].
+
+start_stop_handler(_Config) ->
+ ok = logger:add_handler(?MODULE, logger_disk_log_h, #{}),
+ {error,{already_exist,?MODULE}} =
+ logger:add_handler(?MODULE, logger_disk_log_h, #{}),
+ true = is_pid(whereis(?MODULE)),
+ ok = logger:remove_handler(?MODULE),
+ timer:sleep(500),
+ undefined = whereis(?MODULE).
+start_stop_handler(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+create_log(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ %% test new handler
+ Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_A"])),
+ LogFile1 = filename:join(PrivDir, Name1),
+ ok = start_and_add(Name1, #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}},
+ #{file=>LogFile1}),
+ logger:info("hello", ?domain),
+ logger_disk_log_h:disk_log_sync(Name1),
+ ct:pal("Checking contents of ~p", [?log_no(LogFile1,1)]),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"hello\n">>}, 5000),
+
+ %% test second handler
+ Name2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_B"])),
+ DLName = lists:concat([?FUNCTION_NAME,"_B_log"]),
+ LogFile2 = filename:join(PrivDir, DLName),
+ ok = start_and_add(Name2, #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}},
+ #{file=>LogFile2}),
+ logger:info("dummy", ?domain),
+ logger_disk_log_h:disk_log_sync(Name2),
+ ct:pal("Checking contents of ~p", [?log_no(LogFile2,1)]),
+ try_read_file(?log_no(LogFile2,1), {ok,<<"dummy\n">>}, 5000),
+
+ remove_and_stop(Name1),
+ remove_and_stop(Name2),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"hello\ndummy\n">>}, 1),
+ try_read_file(?log_no(LogFile2,1), {ok,<<"dummy\n">>}, 5000),
+ ok.
+
+open_existing_log(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ %% test new handler
+ HName = ?FUNCTION_NAME,
+ DLName = lists:concat([?FUNCTION_NAME,"_log"]),
+ LogFile1 = filename:join(PrivDir, DLName),
+ ok = start_and_add(HName, #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}},
+ #{file=>LogFile1}),
+ logger:info("one", ?domain),
+ logger_disk_log_h:disk_log_sync(HName),
+ ct:pal("Checking contents of ~p", [?log_no(LogFile1,1)]),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"one\n">>}, 5000),
+ logger:info("two", ?domain),
+ ok = remove_and_stop(HName),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\n">>}, 5000),
+
+ logger:info("two and a half", ?domain),
+
+ ok = start_and_add(HName, #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}},
+ #{file=>LogFile1}),
+ logger:info("three", ?domain),
+ logger_disk_log_h:disk_log_sync(HName),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\nthree\n">>}, 5000),
+ remove_and_stop(HName),
+ try_read_file(?log_no(LogFile1,1), {ok,<<"one\ntwo\nthree\n">>}, 5000).
+
+disk_log_opts(Config) ->
+ Get = fun(Key, PL) -> proplists:get_value(Key, PL) end,
+ PrivDir = ?config(priv_dir,Config),
+ WName = list_to_atom(lists:concat([?FUNCTION_NAME,"_W"])),
+ WFile = lists:concat([?FUNCTION_NAME,"_W_log"]),
+ Size = length("12345"),
+ ConfigW = #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter => {?MODULE,no_nl}},
+ WFileFull = filename:join(PrivDir, WFile),
+ DLOptsW = #{file => WFileFull,
+ type => wrap,
+ max_no_bytes => Size,
+ max_no_files => 2},
+ ok = start_and_add(WName, ConfigW, DLOptsW),
+ WInfo1 = disk_log:info(WName),
+ ct:log("Fullname = ~s", [WFileFull]),
+ {WFileFull,wrap,{Size,2},1} = {Get(file,WInfo1),Get(type,WInfo1),
+ Get(size,WInfo1),Get(current_file,WInfo1)},
+ logger:info("123", ?domain),
+ logger_disk_log_h:disk_log_sync(WName),
+ timer:sleep(500),
+ 1 = Get(current_file, disk_log:info(WName)),
+
+ logger:info("45", ?domain),
+ logger_disk_log_h:disk_log_sync(WName),
+ timer:sleep(500),
+ 1 = Get(current_file, disk_log:info(WName)),
+
+ logger:info("6", ?domain),
+ logger_disk_log_h:disk_log_sync(WName),
+ timer:sleep(500),
+ 2 = Get(current_file, disk_log:info(WName)),
+
+ logger:info("7890", ?domain),
+ logger_disk_log_h:disk_log_sync(WName),
+ timer:sleep(500),
+ 2 = Get(current_file, disk_log:info(WName)),
+
+ HName1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_H1"])),
+ HFile1 = lists:concat([?FUNCTION_NAME,"_H1_log"]),
+ ConfigH = #{filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter => {?MODULE,no_nl}},
+ HFile1Full = filename:join(PrivDir, HFile1),
+ DLOptsH1 = #{file => HFile1Full,
+ type => halt},
+ ok = start_and_add(HName1, ConfigH, DLOptsH1),
+ HInfo1 = disk_log:info(HName1),
+ ct:log("Fullname = ~s", [HFile1Full]),
+ {HFile1Full,halt,infinity} = {Get(file,HInfo1),Get(type,HInfo1),
+ Get(size,HInfo1)},
+ logger:info("12345", ?domain),
+ logger_disk_log_h:disk_log_sync(HName1),
+ timer:sleep(500),
+ 1 = Get(no_written_items, disk_log:info(HName1)),
+
+ HName2 = list_to_atom(lists:concat([?FUNCTION_NAME,"_H2"])),
+ HFile2 = lists:concat([?FUNCTION_NAME,"_H2_log"]),
+ HFile2Full = filename:join(PrivDir, HFile2),
+ DLOptsH2 = DLOptsH1#{file => HFile2Full,
+ max_no_bytes => 1000},
+ ok = start_and_add(HName2, ConfigH, DLOptsH2),
+ HInfo3 = disk_log:info(HName2),
+ ct:log("Fullname = ~s", [HFile2Full]),
+ {HFile2Full,halt,1000} = {Get(file,HInfo3),Get(type,HInfo3),
+ Get(size,HInfo3)},
+
+ remove_and_stop(WName),
+ remove_and_stop(HName1),
+ remove_and_stop(HName2),
+ ok.
+
+default_formatter(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ LogFile = filename:join(PrivDir,atom_to_list(?FUNCTION_NAME)),
+ HConfig = #{disk_log_opts => #{file=>LogFile},
+ filter_default=>log},
+ ct:pal("Log: ~p", [LogFile]),
+ ok = logger:add_handler(?MODULE, logger_disk_log_h, HConfig),
+ ok = logger:set_handler_config(?MODULE,formatter,
+ {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}),
+ LogName = lists:concat([LogFile, ".1"]),
+ logger:info("dummy"),
+ wait_until_written(LogName),
+ {ok,Bin} = file:read_file(LogName),
+ match = re:run(Bin, "=INFO REPORT====.*\ndummy", [{capture,none}]),
+ ok.
+default_formatter(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+logging(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ %% test new handler
+ Name = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ LogFile = filename:join(PrivDir, Name),
+ ok = start_and_add(Name, #{filter_default=>log,
+ formatter=>{?MODULE,self()}},
+ #{file => LogFile}),
+ MsgFormatter = fun(Term) -> {io_lib:format("Term:~p",[Term]),[]} end,
+ logger:info([{x,y}], #{report_cb => MsgFormatter}),
+ logger:info([{x,y}], #{}),
+ ct:pal("Checking contents of ~p", [?log_no(LogFile,1)]),
+ try_read_file(?log_no(LogFile,1), {ok,<<"Term:[{x,y}]\n x: y\n">>}, 5000).
+
+logging(cleanup, _Config) ->
+ Name = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ remove_and_stop(Name).
+
+errors(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ LogFile1 = filename:join(PrivDir,Name1),
+ HConfig = #{disk_log_opts=>#{file=>LogFile1},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}},
+ ok = logger:add_handler(Name1, logger_disk_log_h, HConfig),
+ {error,{already_exist,Name1}} =
+ logger:add_handler(Name1, logger_disk_log_h, #{}),
+
+ %%! TODO:
+ %%! Check how bad log_opts are handled!
+
+ {error,{illegal_config_change,_,_}} =
+ logger:set_handler_config(Name1,
+ disk_log_opts,
+ #{file=>LogFile1,
+ type=>halt}),
+ {error,{illegal_config_change,_,_}} =
+ logger:set_handler_config(Name1,id,new),
+
+ ok = logger:remove_handler(Name1),
+ {error,{not_found,Name1}} = logger:remove_handler(Name1),
+ ok.
+
+errors(cleanup, _Config) ->
+ Name1 = list_to_atom(lists:concat([?FUNCTION_NAME,"_1"])),
+ _ = logger:remove_handler(Name1).
+
+formatter_fail(Config) ->
+ PrivDir = ?config(priv_dir,Config),
+ Name = ?FUNCTION_NAME,
+ LogFile = filename:join(PrivDir,Name),
+ ct:pal("Log = ~p", [LogFile]),
+ HConfig = #{disk_log_opts => #{file=>LogFile},
+ filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE])},
+ %% no formatter!
+ logger:add_handler(Name, logger_disk_log_h, HConfig),
+ Pid = whereis(Name),
+ true = is_pid(Pid),
+ {ok,#{handlers:=H}} = logger:get_logger_config(),
+ true = lists:member(Name,H),
+
+ %% Formatter is added automatically
+ {ok,{_,#{formatter:={logger_formatter,_}}}} =
+ logger:get_handler_config(Name),
+ logger:info(M1=?msg,?domain),
+ Got1 = try_match_file(?log_no(LogFile,1),"=INFO REPORT====.*\n"++M1,5000),
+
+ ok = logger:set_handler_config(Name,formatter,{nonexistingmodule,#{}}),
+ logger:info(M2=?msg,?domain),
+ Got2 = try_match_file(?log_no(LogFile,1),
+ Got1++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M2,
+ 5000),
+
+ ok = logger:set_handler_config(Name,formatter,{?MODULE,crash}),
+ logger:info(M3=?msg,?domain),
+ Got3 = try_match_file(?log_no(LogFile,1),
+ Got2++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M3,
+ 5000),
+
+ ok = logger:set_handler_config(Name,formatter,{?MODULE,bad_return}),
+ logger:info(?msg,?domain),
+ try_match_file(?log_no(LogFile,1),
+ Got3++"FORMATTER ERROR: bad_return_value",
+ 5000),
+
+ %% Check that handler is still alive and was never dead
+ Pid = whereis(Name),
+ {ok,#{handlers:=H}} = logger:get_logger_config(),
+ ok.
+
+formatter_fail(cleanup,_Config) ->
+ _ = logger:remove_handler(?FUNCTION_NAME),
+ ok.
+
+config_fail(_Config) ->
+ {error,{handler_not_added,{invalid_config,logger_disk_log_h,{bad,bad}}}} =
+ logger:add_handler(?MODULE,logger_disk_log_h,
+ #{logger_disk_log_h => #{bad => bad},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ {error,{handler_not_added,{invalid_levels,{42,42,_}}}} =
+ logger:add_handler(?MODULE,logger_disk_log_h,
+ #{logger_disk_log_h => #{toggle_sync_qlen=>42,
+ drop_new_reqs_qlen=>42}}),
+
+ ok = logger:add_handler(?MODULE,logger_disk_log_h,
+ #{filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ %% can't change the disk log options for a log already in use
+ {error,{illegal_config_change,_,_}} =
+ logger:set_handler_config(?MODULE,disk_log_opts,
+ #{max_no_files=>2}),
+ %% can't change name of an existing handler
+ {error,{illegal_config_change,_,_}} =
+ logger:set_handler_config(?MODULE,id,bad),
+ %% incorrect values of OP params
+ {error,{invalid_levels,_}} =
+ logger:set_handler_config(?MODULE,logger_disk_log_h,
+ #{toggle_sync_qlen=>100,
+ flush_reqs_qlen=>99}),
+ %% invalid name of config parameter
+ {error,{invalid_config,logger_disk_log_h,{filesync_rep_int,2000}}} =
+ logger:set_handler_config(?MODULE, logger_disk_log_h,
+ #{filesync_rep_int => 2000}),
+ ok.
+config_fail(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+bad_input(_Config) ->
+ {error,{badarg,{disk_log_sync,["BadType"]}}} =
+ logger_disk_log_h:disk_log_sync("BadType"),
+ {error,{badarg,{info,["BadType"]}}} = logger_disk_log_h:info("BadType"),
+ {error,{badarg,{reset,["BadType"]}}} = logger_disk_log_h:reset("BadType").
+
+info_and_reset(_Config) ->
+ ok = logger:add_handler(?MODULE,logger_disk_log_h,
+ #{filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ #{id := ?MODULE} = logger_disk_log_h:info(?MODULE),
+ ok = logger_disk_log_h:reset(?MODULE).
+info_and_reset(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+reconfig(Config) ->
+ Dir = ?config(priv_dir,Config),
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ #{id := ?MODULE,
+ toggle_sync_qlen := ?TOGGLE_SYNC_QLEN,
+ drop_new_reqs_qlen := ?DROP_NEW_REQS_QLEN,
+ flush_reqs_qlen := ?FLUSH_REQS_QLEN,
+ enable_burst_limit := ?ENABLE_BURST_LIMIT,
+ burst_limit_size := ?BURST_LIMIT_SIZE,
+ burst_window_time := ?BURST_WINDOW_TIME,
+ enable_kill_overloaded := ?ENABLE_KILL_OVERLOADED,
+ handler_overloaded_qlen := ?HANDLER_OVERLOADED_QLEN,
+ handler_overloaded_mem := ?HANDLER_OVERLOADED_MEM,
+ handler_restart_after := ?HANDLER_RESTART_AFTER,
+ filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL,
+ log_opts := #{type := ?DISK_LOG_TYPE,
+ max_no_files := ?DISK_LOG_MAX_NO_FILES,
+ max_no_bytes := ?DISK_LOG_MAX_NO_BYTES,
+ file := _DiskLogFile}} =
+ logger_disk_log_h:info(?MODULE),
+
+ ok = logger:set_handler_config(?MODULE, logger_disk_log_h,
+ #{toggle_sync_qlen => 1,
+ drop_new_reqs_qlen => 2,
+ flush_reqs_qlen => 3,
+ enable_burst_limit => false,
+ burst_limit_size => 10,
+ burst_window_time => 10,
+ enable_kill_overloaded => true,
+ handler_overloaded_qlen => 100000,
+ handler_overloaded_mem => 10000000,
+ handler_restart_after => never,
+ filesync_repeat_interval => no_repeat}),
+ #{id := ?MODULE,
+ toggle_sync_qlen := 1,
+ drop_new_reqs_qlen := 2,
+ flush_reqs_qlen := 3,
+ enable_burst_limit := false,
+ burst_limit_size := 10,
+ burst_window_time := 10,
+ enable_kill_overloaded := true,
+ handler_overloaded_qlen := 100000,
+ handler_overloaded_mem := 10000000,
+ handler_restart_after := never,
+ filesync_repeat_interval := no_repeat} =
+ logger_disk_log_h:info(?MODULE),
+
+ ok = logger:remove_handler(?MODULE),
+
+ File = filename:join(Dir, "logfile"),
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()},
+ disk_log_opts=>
+ #{type => halt,
+ max_no_files => 1,
+ max_no_bytes => 1024,
+ file => File}}),
+ #{log_opts := #{type := halt,
+ max_no_files := 1,
+ max_no_bytes := 1024,
+ file := File}} =
+ logger_disk_log_h:info(?MODULE),
+ ok.
+
+reconfig(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+disk_log_sync(Config) ->
+ Dir = ?config(priv_dir,Config),
+ File = filename:join(Dir, ?FUNCTION_NAME),
+ Log = lists:concat([File,".1"]),
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{disk_log_opts => #{file => File},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,nl}}),
+
+ start_tracer([{?MODULE,format,2},
+ {disk_log,blog,2},
+ {disk_log,sync,1}],
+ [{formatter,"first"},
+ {disk_log,blog},
+ {disk_log,sync}]),
+
+ logger:info("first", ?domain),
+ %% wait for automatic disk_log_sync
+ check_tracer(?FILESYNC_REPEAT_INTERVAL*2),
+
+ start_tracer([{?MODULE,format,2},
+ {disk_log,blog,2},
+ {disk_log,sync,1}],
+ [{formatter,"second"},
+ {formatter,"third"},
+ {disk_log,blog},
+ {disk_log,blog},
+ {disk_log,sync}]),
+ %% two log requests in fast succession will make the handler skip
+ %% an automatic disk log sync
+ logger:info("second", ?domain),
+ logger:info("third", ?domain),
+ %% do explicit disk_log_sync
+ logger_disk_log_h:disk_log_sync(?MODULE),
+ check_tracer(100),
+
+ %% check that if there's no repeated disk_log_sync active,
+ %% a disk_log_sync is still performed when handler goes idle
+ logger:set_handler_config(?MODULE, logger_disk_log_h,
+ #{filesync_repeat_interval => no_repeat}),
+ no_repeat = maps:get(filesync_repeat_interval,
+ logger_disk_log_h:info(?MODULE)),
+
+ start_tracer([{?MODULE,format,2},
+ {disk_log,blog,2},
+ {disk_log,sync,1}],
+ [{formatter,"fourth"},
+ {disk_log,blog},
+ {formatter,"fifth"},
+ {disk_log,blog},
+ {disk_log,sync}]),
+
+ logger:info("fourth", ?domain),
+ timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
+ logger:info("fifth", ?domain),
+ %% wait for automatic disk_log_sync
+ check_tracer(?IDLE_DETECT_TIME_MSEC*2),
+
+ try_read_file(Log, {ok,<<"first\nsecond\nthird\nfourth\nfifth\n">>}, 1000),
+
+ %% switch repeated disk_log_sync on and verify that the looping works
+ SyncInt = 1000,
+ WaitT = 4500,
+ OneSync = {logger_disk_log_h,handle_cast,repeated_disk_log_sync},
+ %% receive 1 initial repeated_disk_log_sync, then 1 per sec
+ start_tracer([{logger_disk_log_h,handle_cast,2}],
+ [OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]),
+
+ logger:set_handler_config(?MODULE, logger_disk_log_h,
+ #{filesync_repeat_interval => SyncInt}),
+ SyncInt = maps:get(filesync_repeat_interval,
+ logger_disk_log_h:info(?MODULE)),
+ timer:sleep(WaitT),
+ logger:set_handler_config(?MODULE, logger_disk_log_h,
+ #{filesync_repeat_interval => no_repeat}),
+ check_tracer(100),
+ ok.
+disk_log_sync(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+disk_log_wrap(Config) ->
+ Get = fun(Key, PL) -> proplists:get_value(Key, PL) end,
+ Dir = ?config(priv_dir,Config),
+ File = filename:join(Dir, ?FUNCTION_NAME),
+ ct:pal("Log = ~p", [File]),
+ MaxFiles = 3,
+ MaxBytes = 5,
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()},
+ disk_log_opts=>
+ #{type => wrap,
+ max_no_files => MaxFiles,
+ max_no_bytes => MaxBytes,
+ file => File}}),
+ Info = disk_log:info(?MODULE),
+ {File,wrap,{MaxBytes,MaxFiles},1} =
+ {Get(file,Info),Get(type,Info),Get(size,Info),Get(current_file,Info)},
+ Tester = self(),
+ TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) ->
+ Pid ! {trace,Mod,Func,Details},
+ Pid
+ end,
+ {ok,_} = dbg:tracer(process, {TraceFun, Tester}),
+ {ok,_} = dbg:p(whereis(?MODULE), [c]),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []),
+
+ Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,MaxBytes)],
+ ct:pal("String = ~p (~w)", [Text, erts_debug:size(Text)]),
+ %% fill first file
+ lists:foreach(fun(N) ->
+ Log = lists:concat([File,".",N]),
+ logger:info(Text, ?domain),
+ wait_until_written(Log),
+ ct:pal("N = ~w",
+ [N = Get(current_file,
+ disk_log:info(?MODULE))])
+ end, lists:seq(1,MaxFiles)),
+
+ %% wait for trace messages
+ timer:sleep(1000),
+ dbg:stop_clear(),
+ Received = lists:flatmap(fun({trace,_M,handle_info,
+ [{disk_log,_Node,_Name,What},_]}) ->
+ [{trace,What}];
+ ({log,_}) ->
+ []
+ end, test_server:messages_get()),
+ ct:pal("Trace =~n~p", [Received]),
+ Received = [{trace,{wrap,0}} || _ <- lists:seq(1,MaxFiles-1)],
+ ok.
+
+disk_log_wrap(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+disk_log_full(Config) ->
+ Dir = ?config(priv_dir,Config),
+ File = filename:join(Dir, ?FUNCTION_NAME),
+ ct:pal("Log = ~p", [File]),
+ MaxBytes = 50,
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()},
+ disk_log_opts=>
+ #{type => halt,
+ max_no_files => 1,
+ max_no_bytes => MaxBytes,
+ file => File}}),
+
+ Tester = self(),
+ TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) ->
+ Pid ! {trace,Mod,Func,Details},
+ Pid
+ end,
+ {ok,_} = dbg:tracer(process, {TraceFun, Tester}),
+ {ok,_} = dbg:p(whereis(?MODULE), [c]),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []),
+
+ NoOfChars = 5,
+ Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,NoOfChars)],
+ [logger:info(Text, ?domain) || _ <- lists:seq(1,trunc(MaxBytes/NoOfChars)+1)],
+
+ %% wait for trace messages
+ timer:sleep(2000),
+ dbg:stop_clear(),
+ Received = lists:flatmap(fun({trace,_M,handle_info,
+ [{disk_log,_Node,_Name,What},_]}) ->
+ [{trace,What}];
+ ({log,_}) ->
+ []
+ end, test_server:messages_get()),
+ ct:pal("Trace =~n~p", [Received]),
+ [{trace,full},
+ {trace,{error_status,{error,{full,_}}}}] = Received,
+ ok.
+disk_log_full(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+disk_log_events(Config) ->
+ Node = node(),
+ Log = ?MODULE,
+ ok = logger:add_handler(?MODULE,
+ logger_disk_log_h,
+ #{filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+
+ %% Events copied from disk_log API
+ Events =
+ [{disk_log, Node, Log, {wrap, 0}},
+ {disk_log, Node, Log, {truncated, 0}},
+ {disk_log, Node, Log, {read_only, 42}},
+ {disk_log, Node, Log, {blocked_log, 42}},
+ {disk_log, Node, Log, {format_external, 42}},
+ {disk_log, Node, Log, full},
+ {disk_log, Node, Log, {error_status, ok}}],
+
+ Tester = self(),
+ TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) ->
+ Pid ! {trace,Mod,Func,Details},
+ Pid
+ end,
+ {ok,_} = dbg:tracer(process, {TraceFun, Tester}),
+ {ok,_} = dbg:p(whereis(?MODULE), [c]),
+ {ok,_} = dbg:tp(logger_disk_log_h, handle_info, 2, []),
+
+ [whereis(?MODULE) ! E || E <- Events],
+ %% wait for trace messages
+ timer:sleep(2000),
+ dbg:stop_clear(),
+ Received = lists:map(fun({trace,_M,handle_info,
+ [Got,_]}) -> Got
+ end, test_server:messages_get()),
+ ct:pal("Trace =~n~p", [Received]),
+ NoOfEvents = length(Events),
+ NoOfEvents = length(Received),
+ lists:foreach(fun(Event) ->
+ true = lists:member(Event, Received)
+ end, Received),
+ ok.
+disk_log_events(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+write_failure(Config) ->
+ Dir = ?config(priv_dir, Config),
+ File = filename:join(Dir, ?FUNCTION_NAME),
+ Log = lists:concat([File,".1"]),
+ ct:pal("Log = ~p", [Log]),
+
+ Node = start_h_on_new_node(Config, ?FUNCTION_NAME, File),
+ false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
+ rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
+ rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
+ rpc:call(Node, ?MODULE, set_result, [disk_log_blog,ok]),
+ HState = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]),
+ ct:pal("LogOpts = ~p", [LogOpts = maps:get(log_opts, HState)]),
+
+ ok = log_on_remote_node(Node, "Logged1"),
+ rpc:call(Node, logger_disk_log_h, disk_log_sync, [?STANDARD_HANDLER]),
+ ?check_no_log,
+ try_read_file(Log, {ok,<<"Logged1\n">>}, ?SYNC_REP_INT),
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_blog,{error,no_such_log}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+
+ ?check({error,{?STANDARD_HANDLER,log,LogOpts,{error,no_such_log}}}),
+
+ ok = log_on_remote_node(Node, "No second error printout"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_blog,
+ {error,{full,?STANDARD_HANDLER}}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+ ?check({error,{?STANDARD_HANDLER,log,LogOpts,
+ {error,{full,?STANDARD_HANDLER}}}}),
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_blog,ok]),
+ ok = log_on_remote_node(Node, "Logged2"),
+ rpc:call(Node, logger_disk_log_h, disk_log_sync, [?STANDARD_HANDLER]),
+ ?check_no_log,
+ try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, ?SYNC_REP_INT),
+ ok.
+write_failure(cleanup, _Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+
+sync_failure(Config) ->
+ Dir = ?config(priv_dir, Config),
+ FileName = lists:concat([?MODULE,"_",?FUNCTION_NAME]),
+ File = filename:join(Dir, FileName),
+ Log = lists:concat([File,".1"]),
+
+ Node = start_h_on_new_node(Config, ?FUNCTION_NAME, File),
+ false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
+ rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
+ rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
+ rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]),
+ HState = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]),
+ LogOpts = maps:get(log_opts, HState),
+
+ SyncInt = 500,
+ ok = rpc:call(Node, logger, set_handler_config,
+ [?STANDARD_HANDLER, logger_disk_log_h,
+ #{filesync_repeat_interval => SyncInt}]),
+ Info = rpc:call(Node, logger_disk_log_h, info, [?STANDARD_HANDLER]),
+ SyncInt = maps:get(filesync_repeat_interval, Info),
+
+ ok = log_on_remote_node(Node, "Logged1"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_sync,{error,no_such_log}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+
+ ?check({error,{?STANDARD_HANDLER,sync,LogOpts,{error,no_such_log}}}),
+
+ ok = log_on_remote_node(Node, "No second error printout"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result,
+ [disk_log_sync,{error,{blocked_log,?STANDARD_HANDLER}}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+ ?check({error,{?STANDARD_HANDLER,sync,LogOpts,
+ {error,{blocked_log,?STANDARD_HANDLER}}}}),
+
+ rpc:call(Node, ?MODULE, set_result, [disk_log_sync,ok]),
+ ok = log_on_remote_node(Node, "Logged2"),
+ ?check_no_log,
+ ok.
+sync_failure(cleanup, _Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+start_h_on_new_node(_Config, Func, File) ->
+ Pa = filename:dirname(code:which(?MODULE)),
+ Dest =
+ case os:type() of
+ {win32,_} ->
+ lists:concat([" {disk_log,\\\"",File,"\\\"}"]);
+ _ ->
+ lists:concat([" \'{disk_log,\"",File,"\"}\'"])
+ end,
+ Args = lists:concat([" -kernel ",logger_dest,Dest," -pa ",Pa]),
+ NodeName = lists:concat([?MODULE,"_",Func]),
+ ct:pal("Starting ~s with ~tp", [NodeName,Args]),
+ {ok,Node} = test_server:start_node(NodeName, peer, [{args, Args}]),
+ Pid = rpc:call(Node,erlang,whereis,[?STANDARD_HANDLER]),
+ true = is_pid(Pid),
+ ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter,
+ {?MODULE,nl}]),
+ Node.
+
+log_on_remote_node(Node,Msg) ->
+ _ = spawn_link(Node,
+ fun() -> erlang:group_leader(whereis(user),self()),
+ logger:info(Msg)
+ end),
+ ok.
+
+%% functions for test hook macros to be called by rpc
+set_internal_log(Mod, Func) ->
+ ?set_internal_log({Mod,Func}).
+set_result(Op, Result) ->
+ ?set_result(Op, Result).
+set_defaults() ->
+ ?set_defaults().
+
+%% internal log function that sends the term to the test case process
+internal_log(Type, Term) ->
+ [{tester,Tester}] = ets:lookup(?TEST_HOOKS_TAB, tester),
+ Tester ! {log,{Type,Term}},
+ logger:internal_log(Type, Term),
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% Overload protection tests
+
+op_switch_to_sync(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 3,
+ drop_new_reqs_qlen => 501,
+ flush_reqs_qlen => 2000,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 500,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ NumOfReqs = count_lines(Log),
+ ok = file:delete(Log).
+op_switch_to_sync(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_drop(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+
+ NewHConfig =
+ HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 2,
+ drop_new_reqs_qlen => 3,
+ flush_reqs_qlen => 600,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 500,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages dropped = ~w (~w)",
+ [NumOfReqs-Logged,NumOfReqs]),
+ true = (Logged < NumOfReqs),
+ ok = file:delete(Log).
+op_switch_to_drop(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_flush() ->
+ [{timetrap,{seconds,60}}].
+op_switch_to_flush(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+
+ %% it's important that both async and sync requests have been queued
+ %% when the flush happens (verify with coverage of flush_log_requests/2)
+
+ NewHConfig =
+ HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 2,
+ drop_new_reqs_qlen => 99,
+ flush_reqs_qlen => 100,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 1000,
+ Procs = 500,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages flushed/dropped = ~w (~w)",
+ [(NumOfReqs*Procs)-Logged,NumOfReqs*Procs]),
+ true = (Logged < (NumOfReqs*Procs)),
+ ok = file:delete(Log).
+op_switch_to_flush(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+
+limit_burst_disabled(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{logger_disk_log_h => DLHConfig#{enable_burst_limit => false,
+ burst_limit_size => 10,
+ burst_window_time => 2000,
+ drop_new_reqs_qlen => 200,
+ flush_reqs_qlen => 300}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ok = file:delete(Log),
+ NumOfReqs = Logged.
+limit_burst_disabled(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_enabled_one(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ReqLimit = 10,
+ NewHConfig =
+ HConfig#{logger_disk_log_h => DLHConfig#{enable_burst_limit => true,
+ burst_limit_size => ReqLimit,
+ burst_window_time => 2000,
+ drop_new_reqs_qlen => 200,
+ flush_reqs_qlen => 300}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ok = file:delete(Log),
+ ReqLimit = Logged.
+limit_burst_enabled_one(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_enabled_period(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ReqLimit = 10,
+ BurstTWin = 1000,
+ NewHConfig =
+ HConfig#{logger_disk_log_h => DLHConfig#{enable_burst_limit => true,
+ burst_limit_size => ReqLimit,
+ burst_window_time => BurstTWin,
+ drop_new_reqs_qlen => 20000,
+ flush_reqs_qlen => 20001}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+
+ Windows = 3,
+ Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w",
+ [Sent,Logged]),
+ ok = file:delete(Log),
+ true = (Logged > (ReqLimit*Windows)) andalso
+ (Logged < (ReqLimit*(Windows+2))).
+limit_burst_enabled_period(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+kill_disabled(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{logger_disk_log_h=>DLHConfig#{enable_kill_overloaded=>false,
+ handler_overloaded_qlen=>10,
+ handler_overloaded_mem=>100}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ok = file:delete(Log),
+ true = is_pid(whereis(?MODULE)),
+ ok.
+kill_disabled(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+qlen_kill_new(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ Pid0 = whereis(?MODULE),
+ {_,Mem0} = process_info(Pid0, memory),
+ RestartAfter = 2000,
+ NewHConfig =
+ HConfig#{logger_disk_log_h =>
+ DLHConfig#{enable_kill_overloaded=>true,
+ handler_overloaded_qlen=>10,
+ handler_overloaded_mem=>Mem0+50000,
+ handler_restart_after=>RestartAfter}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ MRef = erlang:monitor(process, Pid0),
+ NumOfReqs = 100,
+ Procs = 2,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info),
+ %% send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ receive
+ {'DOWN', MRef, _, _, Info} ->
+ case Info of
+ {shutdown,{overloaded,?MODULE,QLen,Mem}} ->
+ ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
+ killed ->
+ ct:pal("Slow shutdown, handler process was killed!", [])
+ end,
+ timer:sleep(RestartAfter + 1000),
+ true = is_pid(whereis(?MODULE)),
+ ok
+ after
+ 5000 ->
+ Info = logger_disk_log_h:info(?MODULE),
+ ct:pal("Handler state = ~p", [Info]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end.
+qlen_kill_new(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+mem_kill_new(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ Pid0 = whereis(?MODULE),
+ {_,Mem0} = process_info(Pid0, memory),
+ RestartAfter = 2000,
+ NewHConfig =
+ HConfig#{logger_disk_log_h =>
+ DLHConfig#{enable_kill_overloaded=>true,
+ handler_overloaded_qlen=>50000,
+ handler_overloaded_mem=>Mem0+500,
+ handler_restart_after=>RestartAfter}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ MRef = erlang:monitor(process, Pid0),
+ NumOfReqs = 100,
+ Procs = 2,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info),
+ %% send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ receive
+ {'DOWN', MRef, _, _, Info} ->
+ case Info of
+ {shutdown,{overloaded,?MODULE,QLen,Mem}} ->
+ ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
+ killed ->
+ ct:pal("Slow shutdown, handler process was killed!", [])
+ end,
+ timer:sleep(RestartAfter * 2),
+ true = is_pid(whereis(?MODULE)),
+ ok
+ after
+ 5000 ->
+ Info = logger_disk_log_h:info(?MODULE),
+ ct:pal("Handler state = ~p", [Info]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end.
+mem_kill_new(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+restart_after(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig1 =
+ HConfig#{logger_disk_log_h=>DLHConfig#{enable_kill_overloaded=>true,
+ handler_overloaded_qlen=>10,
+ handler_restart_after=>never}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig1),
+ MRef1 = erlang:monitor(process, whereis(?MODULE)),
+ %% kill handler
+ send_burst({n,100}, {spawn,2,0}, {chars,79}, info),
+ receive
+ {'DOWN', MRef1, _, _, _Info1} ->
+ timer:sleep(?HANDLER_RESTART_AFTER + 1000),
+ undefined = whereis(?MODULE),
+ ok
+ after
+ 5000 ->
+ ct:fail("Handler not dead! It should not have survived this!")
+ end,
+
+ {Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ RestartAfter = 2000,
+ NewHConfig2 =
+ HConfig#{logger_disk_log_h=>DLHConfig#{enable_kill_overloaded=>true,
+ handler_overloaded_qlen=>10,
+ handler_restart_after=>RestartAfter}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig2),
+ Pid0 = whereis(?MODULE),
+ MRef2 = erlang:monitor(process, Pid0),
+ %% kill handler
+ send_burst({n,100}, {spawn,2,0}, {chars,79}, info),
+ receive
+ {'DOWN', MRef2, _, _, _Info2} ->
+ timer:sleep(RestartAfter + 1000),
+ Pid1 = whereis(?MODULE),
+ true = is_pid(Pid1),
+ false = (Pid1 == Pid0),
+ ok
+ after
+ 5000 ->
+ ct:fail("Handler not dead! It should not have survived this!")
+ end,
+ ok.
+restart_after(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+%% send handler requests (filesync, info, reset, change_config)
+%% during high load to verify that sync, dropping and flushing is
+%% handled correctly.
+handler_requests_under_load() ->
+ [{timetrap,{seconds,60}}].
+handler_requests_under_load(Config) ->
+ {Log,HConfig,DLHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{logger_disk_log_h => DLHConfig#{toggle_sync_qlen => 2,
+ drop_new_reqs_qlen => 1000,
+ flush_reqs_qlen => 2000,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{disk_log_sync,[]},
+ {info,[]},
+ {reset,[]},
+ {change_config,[]}])
+ end),
+ Procs = 100,
+ Sent = Procs * send_burst({n,5000}, {spawn,Procs,10}, {chars,79}, info),
+ Pid ! {self(),finish},
+ ReqResult = receive {Pid,Result} -> Result end,
+ Logged = count_lines(Log),
+ ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w",
+ [Sent,Logged]),
+ FindError = fun(Res) ->
+ [E || E <- Res,
+ is_tuple(E) andalso (element(1,E) == error)]
+ end,
+ Errors = [{Req,FindError(Res)} || {Req,Res} <- ReqResult],
+ NoOfReqs = lists:foldl(fun({_,Res}, N) -> N + length(Res) end, 0, ReqResult),
+ ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]),
+ ok = file:delete(Log).
+handler_requests_under_load(cleanup, Config) ->
+ ok = stop_handler(?MODULE).
+
+send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) ->
+ receive
+ {From,finish} ->
+ From ! {self(),Reqs}
+ after
+ TO ->
+ Result =
+ case Req of
+ change_config ->
+ logger:set_handler_config(HName, logger_disk_log_h,
+ #{enable_kill_overloaded =>
+ false});
+ Func ->
+ logger_disk_log_h:Func(HName)
+ end,
+ send_requests(HName, TO, Rs ++ [{Req,[Result|Res]}])
+ end.
+
+%%%-----------------------------------------------------------------
+%%%
+start_handler(Name, FuncName, Config) ->
+ Dir = ?config(priv_dir,Config),
+ File = filename:join(Dir, FuncName),
+ ct:pal("Logging to ~tp", [File]),
+ ok = logger:add_handler(Name,
+ logger_disk_log_h,
+ #{disk_log_opts=>#{file => File,
+ max_no_files => 1,
+ max_no_bytes => 100000000},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([Name]),
+ formatter=>{?MODULE,op}}),
+ {ok,{_,HConfig = #{logger_disk_log_h := DLHConfig}}} =
+ logger:get_handler_config(Name),
+ {lists:concat([File,".1"]),HConfig,DLHConfig}.
+
+stop_handler(Name) ->
+ ok = logger:remove_handler(Name),
+ ct:pal("Handler ~p stopped!", [Name]).
+
+send_burst(NorT, Type, {chars,Sz}, Class) ->
+ Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,Sz)],
+ case NorT of
+ {n,N} ->
+ %% process_flag(priority, high),
+ send_n_burst(N, Type, Text, Class),
+ %% process_flag(priority, normal),
+ N;
+ {t,T} ->
+ ct:pal("Sending messages sequentially for ~w ms", [T]),
+ T0 = erlang:monotonic_time(millisecond),
+ send_t_burst(T0, T, Text, Class, 0)
+ end.
+
+send_n_burst(0, _, _Text, _Class) ->
+ ok;
+send_n_burst(N, seq, Text, Class) ->
+ ok = logger:Class(Text, ?domain),
+ send_n_burst(N-1, seq, Text, Class);
+send_n_burst(N, {spawn,Ps,TO}, Text, Class) ->
+ ct:pal("~w processes each sending ~w messages", [Ps,N]),
+ PerProc = fun() ->
+ send_n_burst(N, seq, Text, Class)
+ end,
+ MRefs = [begin if TO == 0 -> ok; true -> timer:sleep(TO) end,
+ monitor(process,spawn_link(PerProc)) end ||
+ _ <- lists:seq(1,Ps)],
+ lists:foreach(fun(MRef) ->
+ receive
+ {'DOWN', MRef, _, _, _} ->
+ ok
+ end
+ end, MRefs),
+ ct:pal("Message burst sent", []),
+ ok.
+
+send_t_burst(T0, T, Text, Class, N) ->
+ T1 = erlang:monotonic_time(millisecond),
+ if (T1-T0) > T ->
+ N;
+ true ->
+ ok = logger:Class(Text, ?domain),
+ send_t_burst(T0, T, Text, Class, N+1)
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Formatter callback
+%%% Using this to send the formatted string back to the test case
+%%% process - so it can check for logged events.
+format(_,bad_return) ->
+ bad_return;
+format(_,crash) ->
+ erlang:error(formatter_crashed);
+format(#{msg:={report,R},meta:=#{report_cb:=Fun}}=Log,Config) ->
+ format(Log#{msg=>Fun(R)},Config);
+format(#{msg:={string,String0}},no_nl) ->
+ String = unicode:characters_to_list(String0),
+ String;
+format(#{msg:={string,String0}},nl) ->
+ String = unicode:characters_to_list(String0),
+ String++"\n";
+format(#{msg:={string,String0}},op) ->
+ String = unicode:characters_to_list(String0),
+ String++"\n";
+format(#{msg:={report,#{label:={supervisor,progress}}}},op) ->
+ "";
+format(#{msg:={report,#{label:={gen_server,terminate}}}},op) ->
+ "";
+format(#{msg:={report,#{label:={proc_lib,crash}}}},op) ->
+ "";
+format(#{msg:={F,A}},Pid) when is_list(F), is_list(A) ->
+ String = lists:flatten(io_lib:format(F,A)),
+ Pid ! {log,String},
+ String++"\n";
+format(#{msg:={string,String0}},Pid) ->
+ String = unicode:characters_to_list(String0),
+ Pid ! {log,String},
+ String++"\n";
+format(Msg,Tag) ->
+ Error = {unexpected_format,Msg,Tag},
+ erlang:display(Error),
+ exit(Error).
+
+remove(Handler, LogName) ->
+ logger_disk_log_h:remove(Handler, LogName),
+ HState = #{log_names := Logs} = logger_disk_log_h:info(),
+ false = maps:is_key(LogName, HState),
+ false = lists:member(LogName, Logs),
+ false = logger_config:exist(logger, LogName),
+ {error,no_such_log} = disk_log:info(LogName),
+ ok.
+
+start_and_add(Name, Config, LogOpts) ->
+ ct:pal("Adding handler ~w with: ~p",
+ [Name,Config#{disk_log_opts=>LogOpts}]),
+ ok = logger:add_handler(Name, logger_disk_log_h,
+ Config#{disk_log_opts=>LogOpts}),
+ Pid = whereis(Name),
+ true = is_pid(Pid),
+ Name = proplists:get_value(name, disk_log:info(Name)),
+ ok.
+
+remove_and_stop(Handler) ->
+ ok = logger:remove_handler(Handler),
+ timer:sleep(500),
+ undefined = whereis(Handler),
+ ok.
+
+try_read_file(FileName, Expected, Time) ->
+ try_read_file(FileName, Expected, Time, undefined).
+
+try_read_file(FileName, Expected, Time, _) when Time > 0 ->
+ case file:read_file(FileName) of
+ Expected ->
+ ok;
+ Error = {error,_Reason} ->
+ erlang:error(Error);
+ SomethingElse ->
+ ct:pal("try_read_file read unexpected: ~p~n", [SomethingElse]),
+ timer:sleep(500),
+ try_read_file(FileName, Expected, Time-500, SomethingElse)
+ end;
+
+try_read_file(_, _, _, Incorrect) ->
+ ct:pal("try_read_file got incorrect pattern: ~p~n", [Incorrect]),
+ erlang:error({error,not_matching_pattern,Incorrect}).
+
+try_match_file(FileName, Pattern, Time) ->
+ try_match_file(FileName, Pattern, Time, <<>>).
+
+try_match_file(FileName, Pattern, Time, _) when Time > 0 ->
+ case file:read_file(FileName) of
+ {ok, Bin} ->
+ case re:run(Bin,Pattern,[{capture,none}]) of
+ match ->
+ unicode:characters_to_list(Bin);
+ _ ->
+ timer:sleep(100),
+ try_match_file(FileName, Pattern, Time-100, Bin)
+ end;
+ Error ->
+ erlang:error(Error)
+ end;
+try_match_file(_,Pattern,_,Incorrect) ->
+ ct:pal("try_match_file did not match pattern: ~p~nGot: ~p~n",
+ [Pattern,Incorrect]),
+ erlang:error({error,not_matching_pattern,Pattern,Incorrect}).
+
+count_lines(File) ->
+ wait_until_written(File),
+ count_lines1(File).
+
+wait_until_written(File) ->
+ wait_until_written(File, -1).
+
+wait_until_written(File, Sz) ->
+ timer:sleep(2000),
+ case file:read_file_info(File) of
+ {ok,#file_info{size = Sz}} ->
+ timer:sleep(1000),
+ case file:read_file_info(File) of
+ {ok,#file_info{size = Sz1}} ->
+ ok;
+ {ok,#file_info{size = Sz2}} ->
+ wait_until_written(File, Sz2)
+ end;
+ {ok,#file_info{size = Sz1}} ->
+ wait_until_written(File, Sz1)
+ end.
+
+count_lines1(File) ->
+ Counter = fun Cnt(Dev,LC) ->
+ case file:read_line(Dev) of
+ eof -> LC;
+ _ -> Cnt(Dev,LC+1)
+ end
+ end,
+ {_,Dev} = file:open(File, [read]),
+ Lines = Counter(Dev, 0),
+ file:close(Dev),
+ Lines.
+
+start_tracer(Trace,Expected) ->
+ Pid = self(),
+ dbg:tracer(process,{fun tracer/2,{Pid,Expected}}),
+ dbg:p(whereis(?MODULE),[c]),
+ dbg:p(Pid,[c]),
+ tpl(Trace),
+ ok.
+
+tpl([{M,F,A}|Trace]) ->
+ {ok,Match} = dbg:tpl(M,F,A,[]),
+ case lists:keyfind(matched,1,Match) of
+ {_,_,1} ->
+ ok;
+ _ ->
+ dbg:stop_clear(),
+ throw({skip,"Can't trace "++atom_to_list(M)++":"++
+ atom_to_list(F)++"/"++integer_to_list(A)})
+ end,
+ tpl(Trace);
+tpl([]) ->
+ ok.
+
+tracer({trace,_,call,{?MODULE,format,[#{msg:={string,Msg}}|_]}}, {Pid,[{formatter,Msg}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{formatter,Msg});
+tracer({trace,_,call,{logger_disk_log_h,handle_cast,[{Op,_}|_]}}, {Pid,[{Mod,Func,Op}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func,Op});
+tracer({trace,_,call,{Mod,Func,_}}, {Pid,[{Mod,Func}|Expected]}) ->
+ maybe_tracer_done(Pid,Expected,{Mod,Func});
+tracer({trace,_,call,Call}, {Pid,Expected}) ->
+ Pid ! {tracer_got_unexpected,Call,Expected},
+ {Pid,Expected}.
+
+maybe_tracer_done(Pid,[],Got) ->
+ ct:log("Tracer got: ~p~n",[Got]),
+ Pid ! tracer_done;
+maybe_tracer_done(Pid,Expected,Got) ->
+ ct:log("Tracer got: ~p~n",[Got]),
+ {Pid,Expected}.
+
+check_tracer(T) ->
+ receive
+ tracer_done ->
+ dbg:stop_clear(),
+ ok;
+ {tracer_got_unexpected,Got,Expected} ->
+ dbg:stop_clear(),
+ ct:fail({tracer_got_unexpected,Got,Expected})
+ after T ->
+ ct:fail({timeout,tracer})
+ end.
diff --git a/lib/kernel/test/logger_env_var_SUITE.erl b/lib/kernel/test/logger_env_var_SUITE.erl
new file mode 100644
index 0000000000..c2d3364701
--- /dev/null
+++ b/lib/kernel/test/logger_env_var_SUITE.erl
@@ -0,0 +1,451 @@
+%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_env_var_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+
+-define(all_vars,[{kernel,logger_dest},
+ {kernel,logger_level},
+ {kernel,logger_log_progress},
+ {kernel,logger_sasl_compatible},
+ {kernel,error_logger}]).
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ Env = [{App,Key,application:get_env(App,Key)} || {App,Key} <- ?all_vars],
+ Removed = cleanup(),
+ [{env,Env},{logger,Removed}|Config].
+
+end_per_suite(Config) ->
+ [application:set_env(App,Key,Val) ||
+ {App,Key,Val} <- ?config(env,Config),
+ Val =/= undefined],
+ Hs = ?config(logger,Config),
+ [ok = logger:add_handler(Id,Mod,C) || {Id,Mod,C} <- Hs],
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ cleanup(),
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [default,
+ default_sasl_compatible,
+ dest_tty,
+ dest_tty_sasl_compatible,
+ dest_false,
+ dest_false_progress,
+ dest_false_sasl_compatible,
+ dest_silent,
+ dest_silent_sasl_compatible,
+ dest_file_old,
+ dest_file,
+ dest_disk_log,
+ %% disk_log_vars, % or test this in logger_disk_log_SUITE?
+ sasl_compatible_false,
+ sasl_compatible_false_no_progress,
+ sasl_compatible,
+ bad_dest%% ,
+ %% bad_level,
+ %% bad_sasl_compatibility,
+ %% bad_progress
+ ].
+
+default(Config) ->
+ {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
+ undefined,
+ undefined, % dest
+ undefined, % level
+ undefined, % sasl comp (default=false)
+ undefined), % progress (default=false)
+ {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs),
+ true = is_pid(whereis(logger_std_h)),
+ info = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} =
+ lists:keyfind(domain,1,StdFilters),
+ true = lists:keymember(stop_progress,1,StdFilters),
+ false = lists:keymember(logger_simple,1,Hs),
+ false = lists:keymember(sasl_h,1,Hs),
+ false = is_pid(whereis(sasl_h)),
+ ok.
+
+default_sasl_compatible(Config) ->
+ {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
+ undefined,
+ undefined, % dest
+ undefined, % level
+ true, % sasl comp (default=false)
+ undefined), % progress (default=false)
+ {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs),
+ true = is_pid(whereis(logger_std_h)),
+ info = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,prefix_of,[beam,erlang,otp]}}} =
+ lists:keyfind(domain,1,StdFilters),
+ false = lists:keymember(stop_progress,1,StdFilters),
+ false = lists:keymember(logger_simple,1,Hs),
+ true = lists:keymember(sasl_h,1,Hs),
+ true = is_pid(whereis(sasl_h)),
+ ok.
+
+dest_tty(Config) ->
+ {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ tty, % dest
+ undefined, % level
+ undefined, % sasl comp (default=false)
+ undefined), % progress (default=false)
+ {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs),
+ true = is_pid(whereis(logger_std_h)),
+ info = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} =
+ lists:keyfind(domain,1,StdFilters),
+ true = lists:keymember(stop_progress,1,StdFilters),
+ false = lists:keymember(logger_simple,1,Hs),
+ false = lists:keymember(sasl_h,1,Hs),
+ false = is_pid(whereis(sasl_h)),
+ ok.
+
+dest_tty_sasl_compatible(Config) ->
+ {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ tty, % dest
+ undefined, % level
+ true, % sasl comp (default=false)
+ undefined), % progress (default=false)
+ {logger_std_h,logger_std_h,StdC} = lists:keyfind(logger_std_h,1,Hs),
+ true = is_pid(whereis(logger_std_h)),
+ info = maps:get(level,StdC),
+ StdFilters = maps:get(filters,StdC),
+ {domain,{_,{log,prefix_of,[beam,erlang,otp]}}} =
+ lists:keyfind(domain,1,StdFilters),
+ false = lists:keymember(stop_progress,1,StdFilters),
+ false = lists:keymember(logger_simple,1,Hs),
+ true = lists:keymember(sasl_h,1,Hs),
+ true = is_pid(whereis(sasl_h)),
+ ok.
+
+dest_false(Config) ->
+ {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ false, % dest
+ notice, % level
+ undefined, % sasl comp (default=false)
+ undefined), % progress (default=false)
+ false = lists:keymember(logger_std_h,1,Hs),
+ {logger_simple,logger_simple,SimpleC} = lists:keyfind(logger_simple,1,Hs),
+ notice = maps:get(level,SimpleC),
+ SimpleFilters = maps:get(filters,SimpleC),
+ {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} =
+ lists:keyfind(domain,1,SimpleFilters),
+ true = lists:keymember(stop_progress,1,SimpleFilters),
+ false = lists:keymember(sasl_h,1,Hs),
+ ok.
+
+dest_false_progress(Config) ->
+ {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ false, % dest
+ notice, % level
+ undefined, % sasl comp (default=false)
+ true), % progress (default=false)
+ false = lists:keymember(logger_std_h,1,Hs),
+ {logger_simple,logger_simple,SimpleC} = lists:keyfind(logger_simple,1,Hs),
+ notice = maps:get(level,SimpleC),
+ SimpleFilters = maps:get(filters,SimpleC),
+ {domain,{_,{log,prefix_of,[beam,erlang,otp,sasl]}}} =
+ lists:keyfind(domain,1,SimpleFilters),
+ false = lists:keymember(stop_progress,1,SimpleFilters),
+ false = lists:keymember(sasl_h,1,Hs),
+ ok.
+
+dest_false_sasl_compatible(Config) ->
+ {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ false, % dest
+ notice, % level
+ true, % sasl comp (default=false)
+ undefined), % progress (default=false)
+ false = lists:keymember(logger_std_h,1,Hs),
+ {logger_simple,logger_simple,SimpleC} = lists:keyfind(logger_simple,1,Hs),
+ notice = maps:get(level,SimpleC),
+ SimpleFilters = maps:get(filters,SimpleC),
+ {domain,{_,{log,prefix_of,[beam,erlang,otp]}}} =
+ lists:keyfind(domain,1,SimpleFilters),
+ false = lists:keymember(stop_progress,1,SimpleFilters),
+ true = lists:keymember(sasl_h,1,Hs),
+ true = is_pid(whereis(sasl_h)),
+ ok.
+
+dest_silent(Config) ->
+ {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ silent, % dest
+ undefined, % level
+ undefined, % sasl comp (default=false)
+ undefined), % progress (default=false)
+ false = lists:keymember(logger_std_h,1,Hs),
+ false = lists:keymember(logger_simple,1,Hs),
+ false = lists:keymember(sasl_h,1,Hs),
+ ok.
+
+dest_silent_sasl_compatible(Config) ->
+ {ok,{_Log,Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ silent, % dest
+ undefined, % level
+ true, % sasl comp (default=false)
+ undefined), % progress (default=false)
+ false = lists:keymember(logger_std_h,1,Hs),
+ false = lists:keymember(logger_simple,1,Hs),
+ true = lists:keymember(sasl_h,1,Hs),
+ true = is_pid(whereis(sasl_h)),
+ ok.
+
+
+dest_file_old(Config) ->
+ {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME,
+ error_logger,
+ file, % dest
+ undefined, % level
+ undefined, % sasl comp (default=false)
+ undefined), % progress (default=false)
+ check_log(Log,
+ file, % dest
+ 0), % progress in std logger
+ ok.
+
+
+dest_file(Config) ->
+ {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ file, % dest
+ undefined, % level
+ undefined, % sasl comp (default=false)
+ undefined), % progress (default=false)
+ check_log(Log,
+ file, % dest
+ 0), % progress in std logger
+ ok.
+
+
+dest_disk_log(Config) ->
+ {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ disk_log, % dest
+ undefined, % level
+ undefined, % sasl comp (default=false)
+ undefined), % progress (default=false)
+ check_log(Log,
+ disk_log, % dest
+ 0), % progress in std logger
+ ok.
+
+
+sasl_compatible_false(Config) ->
+ {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ file, % dest
+ undefined, % level
+ false, % sasl comp
+ true), % progress
+ check_log(Log,
+ file, % dest
+ 4), % progress in std logger
+ ok.
+
+sasl_compatible_false_no_progress(Config) ->
+ {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ file, % dest
+ undefined, % level
+ false, % sasl comp
+ false), % progress
+ check_log(Log,
+ file, % dest
+ 0), % progress in std logger
+ ok.
+
+sasl_compatible(Config) ->
+ {ok,{Log,_Hs}} = setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ file, % dest
+ undefined, % level
+ true, % sasl comp
+ undefined), % progress
+ check_log(Log,
+ file, % dest
+ 0), % progress in std logger
+ ok.
+
+bad_dest(Config) ->
+ {error,{bad_config,{kernel,{logger_dest,baddest}}}} =
+ setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ baddest,
+ undefined,
+ undefined,
+ undefined).
+
+bad_level(Config) ->
+ error =
+ setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ tty,
+ badlevel,
+ undefined,
+ undefined).
+
+bad_sasl_compatibility(Config) ->
+ error =
+ setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ tty,
+ info,
+ badcomp,
+ undefined).
+
+bad_progress(Config) ->
+ error =
+ setup(Config,?FUNCTION_NAME,
+ logger_dest,
+ tty,
+ info,
+ undefined,
+ badprogress).
+
+%%%-----------------------------------------------------------------
+%%% Internal
+setup(Config,Func,DestVar,Dest,Level,SaslComp,Progress) ->
+ ok = logger:add_handler(logger_simple,logger_simple,
+ #{filter_default=>log,
+ logger_simple=>#{buffer=>true}}),
+ Dir = ?config(priv_dir,Config),
+ File = lists:concat([?MODULE,"_",Func,".log"]),
+ Log = filename:join(Dir,File),
+ case Dest of
+ undefined ->
+ ok;
+ F when F==file; F==disk_log ->
+ application:set_env(kernel,DestVar,{Dest,Log});
+ _ ->
+ application:set_env(kernel,DestVar,Dest)
+ end,
+ case Level of
+ undefined ->
+ ok;
+ _ ->
+ application:set_env(kernel,logger_level,Level)
+ end,
+ case SaslComp of
+ undefined ->
+ ok;
+ _ ->
+ application:set_env(kernel,logger_sasl_compatible,SaslComp)
+ end,
+ case Progress of
+ undefined ->
+ ok;
+ _ ->
+ application:set_env(kernel,logger_log_progress,Progress)
+ end,
+ case logger:setup_standard_handler() of
+ ok ->
+ application:start(sasl),
+ StdH = case Dest of
+ NoH when NoH==false; NoH==silent -> false;
+ _ -> true
+ end,
+ StdH = is_pid(whereis(?STANDARD_HANDLER)),
+ SaslH = if SaslComp -> true;
+ true -> false
+ end,
+ SaslH = is_pid(whereis(sasl_h)),
+ {ok,{Log,maps:get(handlers,logger:i())}};
+ Error ->
+ Error
+ end.
+
+check_log(Log,Dest,NumProgress) ->
+ ok = logger:alert("dummy1"),
+ ok = logger:debug("dummy1"),
+
+ %% Check that there are progress reports (supervisor and
+ %% application_controller) and an error report (the call above) in
+ %% the log. There should not be any info reports yet.
+ {ok,Bin1} = sync_and_read(Dest,Log),
+ ct:log("Log content:~n~s",[Bin1]),
+ match(Bin1,<<"PROGRESS REPORT">>,NumProgress),
+ match(Bin1,<<"ALERT REPORT">>,1),
+ match(Bin1,<<"INFO REPORT">>,0),
+ match(Bin1,<<"DEBUG REPORT">>,0),
+
+ %% Then stop sasl and see that the info report from
+ %% application_controller is there
+ ok = application:stop(sasl),
+ {ok,Bin2} = sync_and_read(Dest,Log),
+ ct:log("Log content:~n~s",[Bin2]),
+ match(Bin2,<<"INFO REPORT">>,1),
+ match(Bin1,<<"DEBUG REPORT">>,0),
+ ok.
+
+match(Bin,Pattern,0) ->
+ nomatch = re:run(Bin,Pattern,[{capture,none}]);
+match(Bin,Pattern,N) ->
+ {match,M} = re:run(Bin,Pattern,[{capture,all},global]),
+ N = length(M).
+
+sync_and_read(disk_log,Log) ->
+ logger_disk_log_h:disk_log_sync(?STANDARD_HANDLER),
+ file:read_file(Log ++ ".1");
+sync_and_read(file,Log) ->
+ logger_std_h:filesync(?STANDARD_HANDLER),
+ file:read_file(Log).
+
+cleanup() ->
+ application:stop(sasl),
+ [application:unset_env(App,Key) || {App,Key} <- ?all_vars],
+ #{handlers:=Hs0} = logger:i(),
+ Hs = lists:keydelete(cth_log_redirect,1,Hs0),
+ [ok = logger:remove_handler(Id) || {Id,_,_} <- Hs],
+ Hs.
diff --git a/lib/kernel/test/logger_filters_SUITE.erl b/lib/kernel/test/logger_filters_SUITE.erl
new file mode 100644
index 0000000000..21f14bbc02
--- /dev/null
+++ b/lib/kernel/test/logger_filters_SUITE.erl
@@ -0,0 +1,214 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_filters_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+
+-define(ndlog,
+ #{level=>info,msg=>{"Line: ~p",[?LINE]},meta=>#{}}).
+-define(dlog(Domain),
+ #{level=>info,msg=>{"Line: ~p",[?LINE]},meta=>#{domain=>Domain}}).
+-define(llog(Level),
+ #{level=>Level,msg=>{"Line: ~p",[?LINE]},meta=>#{}}).
+-define(plog,
+ #{level=>info,
+ msg=>{report,#{label=>{?MODULE,progress}}},
+ meta=>#{line=>?LINE}}).
+-define(rlog(Node),
+ #{level=>info,
+ msg=>{"Line: ~p",[?LINE]},
+ meta=>#{gl=>rpc:call(Node,erlang,whereis,[user])}}).
+
+-define(TRY(X), my_try(fun() -> X end)).
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [domain,
+ level,
+ progress,
+ remote_gl].
+
+domain(_Config) ->
+ L1 = logger_filters:domain(L1=?dlog([]),{log,prefix_of,[]}),
+ stop = logger_filters:domain(?dlog([]),{stop,prefix_of,[]}),
+ L2 = logger_filters:domain(L2=?dlog([]),{log,starts_with,[]}),
+ stop = logger_filters:domain(?dlog([]),{stop,starts_with,[]}),
+ L3 = logger_filters:domain(L3=?dlog([]),{log,equals,[]}),
+ stop = logger_filters:domain(?dlog([]),{stop,equals,[]}),
+ ignore = logger_filters:domain(?dlog([]),{log,no_domain,[]}),
+ ignore = logger_filters:domain(?dlog([]),{stop,no_domain,[]}),
+
+ L4 = logger_filters:domain(L4=?dlog([a]),{log,prefix_of,[a,b]}),
+ stop = logger_filters:domain(?dlog([a]),{stop,prefix_of,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{log,starts_with,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{stop,starts_with,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{log,equals,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{stop,equals,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{log,no_domain,[a,b]}),
+ ignore = logger_filters:domain(?dlog([a]),{stop,no_domain,[a,b]}),
+
+ ignore = logger_filters:domain(?dlog([a,b]),{log,prefix_of,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{stop,prefix_of,[a]}),
+ L5 = logger_filters:domain(L5=?dlog([a,b]),{log,starts_with,[a]}),
+ stop = logger_filters:domain(?dlog([a,b]),{stop,starts_with,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{log,equals,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{stop,equals,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{log,no_domain,[a]}),
+ ignore = logger_filters:domain(?dlog([a,b]),{stop,no_domain,[a]}),
+
+ ignore = logger_filters:domain(?ndlog,{log,prefix_of,[a]}),
+ ignore = logger_filters:domain(?ndlog,{stop,prefix_of,[a]}),
+ ignore = logger_filters:domain(?ndlog,{log,starts_with,[a]}),
+ ignore = logger_filters:domain(?ndlog,{stop,starts_with,[a]}),
+ ignore = logger_filters:domain(?ndlog,{log,equals,[a]}),
+ ignore = logger_filters:domain(?ndlog,{stop,equals,[a]}),
+ L6 = logger_filters:domain(L6=?ndlog,{log,no_domain,[a]}),
+ stop = logger_filters:domain(?ndlog,{stop,no_domain,[a]}),
+
+ L7 = logger_filters:domain(L7=?dlog([a,b,c,d]),{log,prefix_of,[a,b,c,d]}),
+ stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,prefix_of,[a,b,c,d]}),
+ L8 = logger_filters:domain(L8=?dlog([a,b,c,d]),{log,starts_with,[a,b,c,d]}),
+ stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,starts_with,[a,b,c,d]}),
+ L9 = logger_filters:domain(L9=?dlog([a,b,c,d]),{log,equals,[a,b,c,d]}),
+ stop = logger_filters:domain(?dlog([a,b,c,d]),{stop,equals,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog([a,b,c,d]),{log,no_domain,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog([a,b,c,d]),{stop,no_domain,[a,b,c,d]}),
+
+ %% A domain field in meta which is not a list is allowed by the
+ %% filter, but it will never match.
+ ignore = logger_filters:domain(?dlog(dummy),{log,prefix_of,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,prefix_of,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{log,starts_with,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,starts_with,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{log,equals,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,equals,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{log,no_domain,[a,b,c,d]}),
+ ignore = logger_filters:domain(?dlog(dummy),{stop,no_domain,[a,b,c,d]}),
+
+ {error,badarg} = ?TRY(logger_filters:domain(?ndlog,bad)),
+ {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{bad,prefix_of,[]})),
+ {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,bad,[]})),
+ {error,badarg} = ?TRY(logger_filters:domain(?ndlog,{log,prefix_of,bad})),
+
+ ok.
+
+level(_Config) ->
+ ignore = logger_filters:level(?llog(info),{log,lt,info}),
+ ignore = logger_filters:level(?llog(info),{stop,lt,info}),
+ ignore = logger_filters:level(?llog(info),{log,gt,info}),
+ ignore = logger_filters:level(?llog(info),{stop,gt,info}),
+ L1 = logger_filters:level(L1=?llog(info),{log,lteq,info}),
+ stop = logger_filters:level(?llog(info),{stop,lteq,info}),
+ L2 = logger_filters:level(L2=?llog(info),{log,gteq,info}),
+ stop = logger_filters:level(?llog(info),{stop,gteq,info}),
+ L3 = logger_filters:level(L3=?llog(info),{log,eq,info}),
+ stop = logger_filters:level(?llog(info),{stop,eq,info}),
+ ignore = logger_filters:level(?llog(info),{log,neq,info}),
+ ignore = logger_filters:level(?llog(info),{stop,neq,info}),
+
+ ignore = logger_filters:level(?llog(error),{log,lt,info}),
+ ignore = logger_filters:level(?llog(error),{stop,lt,info}),
+ L4 = logger_filters:level(L4=?llog(error),{log,gt,info}),
+ stop = logger_filters:level(?llog(error),{stop,gt,info}),
+ ignore = logger_filters:level(?llog(error),{log,lteq,info}),
+ ignore = logger_filters:level(?llog(error),{stop,lteq,info}),
+ L5 = logger_filters:level(L5=?llog(error),{log,gteq,info}),
+ stop = logger_filters:level(?llog(error),{stop,gteq,info}),
+ ignore = logger_filters:level(?llog(error),{log,eq,info}),
+ ignore = logger_filters:level(?llog(error),{stop,eq,info}),
+ L6 = logger_filters:level(L6=?llog(error),{log,neq,info}),
+ stop = logger_filters:level(?llog(error),{stop,neq,info}),
+
+ L7 = logger_filters:level(L7=?llog(info),{log,lt,error}),
+ stop = logger_filters:level(?llog(info),{stop,lt,error}),
+ ignore = logger_filters:level(?llog(info),{log,gt,error}),
+ ignore = logger_filters:level(?llog(info),{stop,gt,error}),
+ L8 = logger_filters:level(L8=?llog(info),{log,lteq,error}),
+ stop = logger_filters:level(?llog(info),{stop,lteq,error}),
+ ignore = logger_filters:level(?llog(info),{log,gteq,error}),
+ ignore = logger_filters:level(?llog(info),{stop,gteq,error}),
+ ignore = logger_filters:level(?llog(info),{log,eq,error}),
+ ignore = logger_filters:level(?llog(info),{stop,eq,error}),
+ L9 = logger_filters:level(L9=?llog(info),{log,neq,error}),
+ stop = logger_filters:level(?llog(info),{stop,neq,error}),
+
+ {error,badarg} = ?TRY(logger_filters:level(?llog(info),bad)),
+ {error,badarg} = ?TRY(logger_filters:level(?llog(info),{bad,eq,info})),
+ {error,badarg} = ?TRY(logger_filters:level(?llog(info),{log,bad,info})),
+ {error,badarg} = ?TRY(logger_filters:level(?llog(info),{log,eq,bad})),
+
+ ok.
+
+progress(_Config) ->
+ L1 = logger_filters:progress(L1=?plog,log),
+ stop = logger_filters:progress(?plog,stop),
+ ignore = logger_filters:progress(?ndlog,log),
+ ignore = logger_filters:progress(?ndlog,stop),
+
+ {error,badarg} = ?TRY(logger_filters:progress(?plog,bad)),
+
+ ok.
+
+remote_gl(_Config) ->
+ {ok,Node} = test_server:start_node(?FUNCTION_NAME,slave,[]),
+ L1 = logger_filters:remote_gl(L1=?rlog(Node),log),
+ stop = logger_filters:remote_gl(?rlog(Node),stop),
+ ignore = logger_filters:remote_gl(?ndlog,log),
+ ignore = logger_filters:remote_gl(?ndlog,stop),
+
+ {error,badarg} = ?TRY(logger_filters:remote_gl(?rlog(Node),bad)),
+ ok.
+
+remote_gl(cleanup,_Config) ->
+ [test_server:stop_node(N) || N<-nodes()].
+
+%%%-----------------------------------------------------------------
+%%% Called by macro ?TRY(X)
+my_try(Fun) ->
+ try Fun() catch C:R -> {C,R} end.
diff --git a/lib/kernel/test/logger_formatter_SUITE.erl b/lib/kernel/test/logger_formatter_SUITE.erl
new file mode 100644
index 0000000000..ac1abba629
--- /dev/null
+++ b/lib/kernel/test/logger_formatter_SUITE.erl
@@ -0,0 +1,558 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_formatter_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+
+-define(TRY(X), my_try(fun() -> X end)).
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ Config.
+
+end_per_suite(_Config) ->
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [default,
+ legacy_header,
+ single_line,
+ template,
+ format_msg,
+ report_cb,
+ max_size,
+ depth,
+ chars_limit,
+ format_mfa,
+ format_time,
+ level_or_msg_in_meta,
+ faulty_log,
+ faulty_config,
+ faulty_msg].
+
+default(_Config) ->
+ String1 = format(info,{"~p",[term]},#{},#{}),
+ ct:log(String1),
+ [_Date,_Time,"info:\nterm\n"] = string:lexemes(String1," "),
+
+ Time = timestamp(),
+ ExpectedTimestamp = default_time_format(Time),
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{}),
+ ct:log(String2),
+ " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp),
+ ok.
+
+legacy_header(_Config) ->
+ Time = timestamp(),
+ String1 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>true}),
+ ct:log(String1),
+ "=INFO REPORT==== "++Rest = String1,
+ [Timestamp,"\nterm\n"] = string:lexemes(Rest," ="),
+ [D,M,Y,H,Min,S,Micro] = string:lexemes(Timestamp,"-:."),
+ integer(D,31),
+ integer(Y,2018,infinity),
+ integer(H,23),
+ integer(Min,59),
+ integer(S,59),
+ integer(Micro,999999),
+ true = lists:member(M,["Jan","Feb","Mar","Apr","May","Jun",
+ "Jul","Aug","Sep","Oct","Nov","Dec"]),
+
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>false}),
+ ct:log(String2),
+ ExpectedTimestamp = default_time_format(Time),
+ " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp),
+
+ String3 = format(info,{"~p",[term]},#{time=>Time},#{legacy_header=>bad}),
+ ct:log(String3),
+ String3 = String2,
+
+ String4 = format(info,{"~p",[term]},#{time=>Time},
+ #{legacy_header=>true,
+ single_line=>true}), % <---ignored
+ ct:log(String4),
+ String4 = String1,
+
+ String5 = format(info,{"~p",[term]},#{}, % <--- no time
+ #{legacy_header=>true}),
+ ct:log(String5),
+ "=INFO REPORT==== "++_ = String5,
+ ok.
+
+single_line(_Config) ->
+ Time = timestamp(),
+ ExpectedTimestamp = default_time_format(Time),
+ String1 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>true}),
+ ct:log(String1),
+ " info: term\n" = string:prefix(String1,ExpectedTimestamp),
+
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>false}),
+ ct:log(String2),
+ " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp),
+
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{single_line=>bad}),
+ ok.
+
+template(_Config) ->
+ Time = timestamp(),
+
+ Template1 = [msg],
+ String1 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template1}),
+ ct:log(String1),
+ "term" = String1,
+
+ Template2 = [msg,unknown],
+ String2 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template2}),
+ ct:log(String2),
+ "term" = String2,
+
+ Template3 = ["string"],
+ String3 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template3}),
+ ct:log(String3),
+ "string" = String3,
+
+ Template4 = ["string\nnewline"],
+ String4 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template4,
+ single_line=>true}),
+ ct:log(String4),
+ "string\nnewline" = String4,
+
+ Template5 = [],
+ String5 = format(info,{"~p",[term]},#{time=>Time},#{template=>Template5}),
+ ct:log(String5),
+ "" = String5,
+
+ Ref6 = erlang:make_ref(),
+ Meta6 = #{atom=>some_atom,
+ integer=>632,
+ list=>[list,"string",4321,#{},{tuple}],
+ mfa=>{mod,func,0},
+ pid=>self(),
+ ref=>Ref6,
+ string=>"some string",
+ time=>Time,
+ tuple=>{1,atom,"list"},
+ nested=>#{subkey=>subvalue}},
+ Template6 = lists:join(";",maps:keys(maps:remove(nested,Meta6)) ++
+ [{nested,subkey}]),
+ String6 = format(info,{"~p",[term]},Meta6,#{template=>Template6,
+ single_line=>true}),
+ ct:log(String6),
+ SelfStr = pid_to_list(self()),
+ RefStr6 = ref_to_list(Ref6),
+ ListStr = "[list,\"string\",4321,#{},{tuple}]",
+ ExpectedTime6 = default_time_format(Time),
+ ["some_atom",
+ "632",
+ ListStr,
+ "mod:func/0",
+ SelfStr,
+ RefStr6,
+ "some string",
+ ExpectedTime6,
+ "{1,atom,\"list\"}",
+ "subvalue"] = string:lexemes(String6,";"),
+
+ Meta7 = #{time=>Time,
+ nested=>#{key1=>#{subkey1=>value1},
+ key2=>value2}},
+ Template7 = lists:join(";",[nested,
+ {nested,key1},
+ {nested,key1,subkey1},
+ {nested,key2},
+ {nested,key2,subkey2},
+ {nested,key3},
+ {nested,key3,subkey3}]),
+ String7 = format(info,{"~p",[term]},Meta7,#{template=>Template7,
+ single_line=>true}),
+ ct:log(String7),
+ [MultipleKeysStr,
+ "#{subkey1 => value1}",
+ "value1",
+ "value2",
+ "",
+ "",
+ ""] = string:split(String7,";",all),
+ %% Order of keys is not fixed
+ case MultipleKeysStr of
+ "#{key2 => value2,key1 => #{subkey1 => value1}}" -> ok;
+ "#{key1 => #{subkey1 => value1},key2 => value2}" -> ok;
+ _ -> ct:fail({full_nested_map_unexpected,MultipleKeysStr})
+ end,
+ ok.
+
+format_msg(_Config) ->
+ Template = [msg],
+
+ String1 = format(info,{"~p",[term]},#{},#{template=>Template}),
+ ct:log(String1),
+ "term" = String1,
+
+ String2 = format(info,{"list",[term]},#{},#{template=>Template}),
+ ct:log(String2),
+ "FORMAT ERROR: \"list\" - [term]" = String2,
+
+ String3 = format(info,{report,term},#{},#{template=>Template}),
+ ct:log(String3),
+ "term" = String3,
+
+ String4 = format(info,{report,term},
+ #{report_cb=>fun(_)-> {"formatted",[]} end},
+ #{template=>Template}),
+ ct:log(String4),
+ "formatted" = String4,
+
+ String5 = format(info,{report,term},
+ #{report_cb=>fun(_)-> faulty_return end},
+ #{template=>Template}),
+ ct:log(String5),
+ "REPORT_CB ERROR: term; Returned: faulty_return" = String5,
+
+ String6 = format(info,{report,term},
+ #{report_cb=>fun(_)-> erlang:error(fun_crashed) end},
+ #{template=>Template}),
+ ct:log(String6),
+ "REPORT_CB CRASH: term; Reason: {error,fun_crashed}" = String6,
+
+ %% strings are not formatted
+ String7 = format(info,{string,"string"},
+ #{report_cb=>fun(_)-> {"formatted",[]} end},
+ #{template=>Template}),
+ ct:log(String7),
+ "string" = String7,
+
+ String8 = format(info,{string,['not',printable,list]},
+ #{report_cb=>fun(_)-> {"formatted",[]} end},
+ #{template=>Template}),
+ ct:log(String8),
+ "INVALID STRING: ['not',printable,list]" = String8,
+
+ String9 = format(info,{string,"string"},#{},#{template=>Template}),
+ ct:log(String9),
+ "string" = String9,
+
+ ok.
+
+report_cb(_Config) ->
+ Template = [msg],
+ MetaFun = fun(_) -> {"meta_rcb",[]} end,
+ ConfigFun = fun(_) -> {"config_rcb",[]} end,
+ "term" = format(info,{report,term},#{},#{template=>Template}),
+ "meta_rcb" =
+ format(info,{report,term},#{report_cb=>MetaFun},#{template=>Template}),
+ "config_rcb" =
+ format(info,{report,term},#{},#{template=>Template,
+ report_cb=>ConfigFun}),
+ "config_rcb" =
+ format(info,{report,term},#{report_cb=>MetaFun},#{template=>Template,
+ report_cb=>ConfigFun}),
+ ok.
+
+max_size(_Config) ->
+ Template = [msg],
+ "12345678901234567890" =
+ format(info,{"12345678901234567890",[]},#{},#{template=>Template}),
+ application:set_env(kernel,logger_max_size,11),
+ "12345678901234567890" = % min value is 50, so this is not limited
+ format(info,{"12345678901234567890",[]},#{},#{template=>Template}),
+ "12345678901234567890123456789012345678901234567..." = % 50
+ format(info,
+ {"123456789012345678901234567890123456789012345678901234567890",
+ []},
+ #{},
+ #{template=>Template}),
+ application:set_env(kernel,logger_max_size,53),
+ "12345678901234567890123456789012345678901234567890..." = %53
+ format(info,
+ {"123456789012345678901234567890123456789012345678901234567890",
+ []},
+ #{},
+ #{template=>Template}),
+ "123456789012..." =
+ format(info,{"12345678901234567890",[]},#{},#{template=>Template,
+ max_size=>15}),
+ "12345678901234567890" =
+ format(info,{"12345678901234567890",[]},#{},#{template=>Template,
+ max_size=>unlimited}),
+ %% Check that one newline at the end of the line is kept (if it exists)
+ "12345678901...\n" =
+ format(info,{"12345678901234567890\n",[]},#{},#{template=>Template,
+ max_size=>15}),
+ "12345678901...\n" =
+ format(info,{"12345678901234567890",[]},#{},#{template=>[msg,"\n"],
+ max_size=>15}),
+ ok.
+max_size(cleanup,_Config) ->
+ application:unset_env(kernel,logger_max_size),
+ ok.
+
+depth(_Config) ->
+ Template = [msg],
+ "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]" =
+ format(info,
+ {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
+ #{},
+ #{template=>Template}),
+ application:set_env(kernel,error_logger_format_depth,11),
+ "[1,2,3,4,5,6,7,8,9,0|...]" =
+ format(info,
+ {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
+ #{},
+ #{template=>Template}),
+ application:set_env(kernel,logger_format_depth,12),
+ "[1,2,3,4,5,6,7,8,9,0,1|...]" =
+ format(info,
+ {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
+ #{},
+ #{template=>Template}),
+ "[1,2,3,4,5,6,7,8,9,0,1,2|...]" =
+ format(info,
+ {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
+ #{},
+ #{template=>Template,
+ depth=>13}),
+ "[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]" =
+ format(info,
+ {"~p",[[1,2,3,4,5,6,7,8,9,0,1,2,3,4,5,6,7,8,9,0]]},
+ #{},
+ #{template=>Template,
+ depth=>unlimited}),
+ ok.
+depth(cleanup,_Config) ->
+ application:unset_env(kernel,logger_format_depth),
+ ok.
+
+chars_limit(_Config) ->
+ FA = {"LoL: ~p~nL: ~p~nMap: ~p~n",
+ [lists:duplicate(10,lists:seq(1,100)),
+ lists:seq(1,100),
+ maps:from_list(lists:zip(lists:seq(1,100),
+ lists:duplicate(100,value)))]},
+ Meta = #{time=>"2018-04-26 9:15:40.449879"},
+ Template = [time," - ", msg, "\n"],
+ FC = #{template=>Template,
+ depth=>unlimited,
+ max_size=>unlimited,
+ chars_limit=>unlimited,
+ single_line=>true},
+ CL1 = 80,
+ String1 = format(info,FA,Meta,FC#{chars_limit=>CL1}),
+ L1 = string:length(String1),
+ ct:log("String1: ~p~nLength1: ~p~n",[lists:flatten(String1),L1]),
+ true = L1 > CL1,
+ true = L1 < CL1 + 10,
+
+ String2 = format(info,FA,Meta,FC#{chars_limit=>CL1,depth=>10}),
+ L2 = string:length(String2),
+ ct:log("String2: ~p~nLength2: ~p~n",[lists:flatten(String2),L2]),
+ String2 = String1,
+
+ CL3 = 200,
+ String3 = format(info,FA,Meta,FC#{chars_limit=>CL3}),
+ L3 = string:length(String3),
+ ct:log("String3: ~p~nLength3: ~p~n",[lists:flatten(String3),L3]),
+ true = L3 > CL3,
+ true = L3 < CL3 + 10,
+
+ String4 = format(info,FA,Meta,FC#{chars_limit=>CL3,depth=>10}),
+ L4 = string:length(String4),
+ ct:log("String4: ~p~nLength4: ~p~n",[lists:flatten(String4),L4]),
+ true = L4 > CL3,
+ true = L4 < CL3 + 10,
+
+ %% Test that max_size truncates the string which is limited by
+ %% depth and chars_limit
+ MS5 = 150,
+ String5 = format(info,FA,Meta,FC#{chars_limit=>CL3,depth=>10,max_size=>MS5}),
+ L5 = string:length(String5),
+ ct:log("String5: ~p~nLength5: ~p~n",[String5,L5]),
+ L5 = MS5,
+ true = lists:prefix(lists:sublist(String5,L5-4),String4),
+
+ ok.
+
+format_mfa(_Config) ->
+ Template = [mfa],
+
+ Meta1 = #{mfa=>{mod,func,0}},
+ String1 = format(info,{"~p",[term]},Meta1,#{template=>Template}),
+ ct:log(String1),
+ "mod:func/0" = String1,
+
+ Meta2 = #{mfa=>{mod,func,[]}},
+ String2 = format(info,{"~p",[term]},Meta2,#{template=>Template}),
+ ct:log(String2),
+ "mod:func/0" = String2,
+
+ Meta3 = #{mfa=>"mod:func/0"},
+ String3 = format(info,{"~p",[term]},Meta3,#{template=>Template}),
+ ct:log(String3),
+ "mod:func/0" = String3,
+
+ Meta4 = #{mfa=>othermfa},
+ String4 = format(info,{"~p",[term]},Meta4,#{template=>Template}),
+ ct:log(String4),
+ "othermfa" = String4,
+
+ ok.
+
+format_time(_Config) ->
+ Time1 = timestamp(),
+ ExpectedTimestamp1 = default_time_format(Time1),
+ String1 = format(info,{"~p",[term]},#{time=>Time1},#{}),
+ ct:log(String1),
+ " info:\nterm\n" = string:prefix(String1,ExpectedTimestamp1),
+
+ Time2 = timestamp(),
+ ExpectedTimestamp2 = default_time_format(Time2,true),
+ String2 = format(info,{"~p",[term]},#{time=>Time2},#{utc=>true}),
+ ct:log(String2),
+ " info:\nterm\n" = string:prefix(String2,ExpectedTimestamp2),
+
+ application:set_env(kernel,logger_utc,true),
+ Time3 = timestamp(),
+ ExpectedTimestamp3 = default_time_format(Time3,true),
+ String3 = format(info,{"~p",[term]},#{time=>Time3},#{}),
+ ct:log(String3),
+ " info:\nterm\n" = string:prefix(String3,ExpectedTimestamp3),
+
+ ok.
+
+format_time(cleanup,_Config) ->
+ application:unset_env(kernel,logger_utc),
+ ok.
+
+level_or_msg_in_meta(_Config) ->
+ %% The template contains atoms to pick out values from meta,
+ %% or level/msg to add these from the log event. What if you have
+ %% a key named 'level' or 'msg' in meta and want to display
+ %% its value?
+ %% For now we simply ignore Meta on this and display the
+ %% actual level and msg from the log event.
+
+ Meta = #{level=>mylevel,
+ msg=>"metamsg"},
+ Template = [level,";",msg],
+ String = format(info,{"~p",[term]},Meta,#{template=>Template}),
+ ct:log(String),
+ "info;term" = String, % so mylevel and "metamsg" are ignored
+
+ ok.
+
+faulty_log(_Config) ->
+ %% Unexpected log (should be type logger:log()) - print error
+ {error,
+ function_clause,
+ {logger_formatter,format,[_,_],_}} =
+ ?TRY(logger_formatter:format(unexp_log,#{})),
+ ok.
+
+faulty_config(_Config) ->
+ {error,
+ function_clause,
+ {logger_formatter,format,[_,_],_}} =
+ ?TRY(logger_formatter:format(#{level=>info,
+ msg=>{"~p",[term]},
+ meta=>#{time=>timestamp()}},
+ unexp_config)),
+ ok.
+
+faulty_msg(_Config) ->
+ {error,
+ function_clause,
+ {logger_formatter,_,_,_}} =
+ ?TRY(logger_formatter:format(#{level=>info,
+ msg=>term,
+ meta=>#{time=>timestamp()}},
+ #{})),
+ ok.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+format(Level,Msg,Meta,Config) ->
+ format(#{level=>Level,msg=>Msg,meta=>add_time(Meta)},Config).
+
+format(Log,Config) ->
+ lists:flatten(logger_formatter:format(Log,Config)).
+
+default_time_format(Timestamp) ->
+ default_time_format(Timestamp,false).
+
+default_time_format(Timestamp0,Utc) when is_integer(Timestamp0) ->
+ Timestamp=Timestamp0+erlang:time_offset(microsecond),
+ %% calendar:system_time_to_rfc3339(Time,[{unit,microsecond}]).
+ Micro = Timestamp rem 1000000,
+ Sec = Timestamp div 1000000,
+ UniversalTime = erlang:posixtime_to_universaltime(Sec),
+ {Date,Time} =
+ if Utc -> UniversalTime;
+ true -> erlang:universaltime_to_localtime(UniversalTime)
+ end,
+ default_time_format(Date,Time,Micro).
+
+default_time_format({Y,M,D},{H,Min,S},Micro) ->
+ lists:flatten(
+ io_lib:format("~4w-~2..0w-~2..0w ~2w:~2..0w:~2..0w.~6..0w",
+ [Y,M,D,H,Min,S,Micro])).
+
+integer(Str) ->
+ is_integer(list_to_integer(Str)).
+integer(Str,Max) ->
+ integer(Str,0,Max).
+integer(Str,Min,Max) ->
+ Int = list_to_integer(Str),
+ Int >= Min andalso Int =<Max.
+
+%%%-----------------------------------------------------------------
+%%% Called by macro ?TRY(X)
+my_try(Fun) ->
+ try Fun() catch C:R:S -> {C,R,hd(S)} end.
+
+timestamp() ->
+ erlang:monotonic_time(microsecond).
+
+%% necessary?
+add_time(#{time:=_}=Meta) ->
+ Meta;
+add_time(Meta) ->
+ Meta#{time=>timestamp()}.
diff --git a/lib/kernel/test/logger_legacy_SUITE.erl b/lib/kernel/test/logger_legacy_SUITE.erl
new file mode 100644
index 0000000000..b59f5f7758
--- /dev/null
+++ b/lib/kernel/test/logger_legacy_SUITE.erl
@@ -0,0 +1,282 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_legacy_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+
+%%%-----------------------------------------------------------------
+%%% This test suite test that log events from within OTP can be
+%%% delivered to legacy error_logger event handlers on the same format
+%%% as before 'logger' was introduced.
+%%%
+%%% Before changing the expected format of any of the log events in
+%%% this suite, please make sure that the backwards incompatibility it
+%%% introduces is ok.
+%%% -----------------------------------------------------------------
+
+-define(check(Expected),
+ receive Expected ->
+ [] = test_server:messages_get()
+ after 1000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {got,test_server:messages_get()}})
+ end).
+-define(check_no_flush(Expected),
+ receive Expected ->
+ ok
+ after 1000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {got,test_server:messages_get()}})
+ end).
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ logger:add_handler(error_logger,error_logger,
+ #{level=>info,filter_default=>stop}),
+ Config.
+
+end_per_suite(_Config) ->
+ logger:remove_handler(error_logger),
+ ok.
+
+init_per_group(std, Config) ->
+ ok = logger:set_handler_config(
+ error_logger,filters,
+ [{domain,{fun logger_filters:domain/2,
+ {log,prefix_of,[beam,erlang,otp]}}}]),
+ Config;
+init_per_group(sasl, Config) ->
+ ok = logger:set_handler_config(
+ error_logger,filters,
+ [{domain,{fun logger_filters:domain/2,
+ {log,prefix_of,[beam,erlang,otp,sasl]}}}]),
+
+ %% cth_log_redirect checks if sasl is started before displaying
+ %% any sasl reports - so just to see the real sasl reports in tc
+ %% log:
+ application:start(sasl),
+ Config;
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(sasl, _Config) ->
+ application:stop(sasl),
+ ok;
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ error_logger:add_report_handler(?MODULE,{event_handler,self()}),
+ Config.
+
+end_per_testcase(Case, Config) ->
+ %% Using gen_event directly here, instead of
+ %% error_logger:delete_report_handler. This is to avoid
+ %% automatically stopping the error_logger process due to removing
+ %% the last handler.
+ gen_event:delete_handler(error_logger,?MODULE,[]),
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [{std,[],[gen_server,
+ gen_event,
+ gen_fsm,
+ gen_statem]},
+ {sasl,[],[sasl_reports,
+ supervisor_handle_info]}].
+
+all() ->
+ [{group,std},
+ {group,sasl}].
+
+gen_server(_Config) ->
+ {ok,Pid} = gen_server:start(?MODULE,gen_server,[]),
+ Msg = fun() -> a=b end,
+ Pid ! Msg,
+ ?check({warning_msg,"** Undefined handle_info in ~p"++_,[?MODULE,Msg]}),
+ ok = gen_server:cast(Pid,Msg),
+ ?check({error,"** Generic server ~tp terminating"++_,
+ [Pid,{'$gen_cast',Msg},gen_server,{{badmatch,b},_}]}).
+
+gen_event(_Config) ->
+ {ok,Pid} = gen_event:start(),
+ ok = gen_event:add_handler(Pid,?MODULE,gen_event),
+ Msg = fun() -> a=b end,
+ Pid ! Msg,
+ ?check({warning_msg,"** Undefined handle_info in ~tp"++_,[?MODULE,Msg]}),
+ gen_event:notify(Pid,Msg),
+ ?check({error,"** gen_event handler ~p crashed."++_,
+ [?MODULE,Pid,Msg,gen_event,{{badmatch,b},_}]}).
+
+gen_fsm(_Config) ->
+ {ok,Pid} = gen_fsm:start(?MODULE,gen_fsm,[]),
+ Msg = fun() -> a=b end,
+ Pid ! Msg,
+ ?check({warning_msg,"** Undefined handle_info in ~p"++_,[?MODULE,Msg]}),
+ gen_fsm:send_all_state_event(Pid,Msg),
+ ?check({error,"** State machine ~tp terminating"++_,
+ [Pid,Msg,mystate,gen_fsm,{{badmatch,b},_}]}).
+
+gen_statem(_Config) ->
+ {ok,Pid} = gen_statem:start(?MODULE,gen_statem,[]),
+ Msg = fun() -> a=b end,
+ Pid ! Msg,
+ ?check({error,"** State machine ~tp terminating"++_,
+ [Pid,{info,Msg},{mystate,gen_statem},error,{badmatch,b}|_]}).
+
+sasl_reports(Config) ->
+ App = {application,?MODULE,[{description, ""},
+ {vsn, "1.0"},
+ {modules, [?MODULE]},
+ {registered, []},
+ {applications, []},
+ {mod, {?MODULE, []}}]},
+ AppStr = io_lib:format("~p.",[App]),
+ Dir = ?config(priv_dir,Config),
+ AppFile = filename:join(Dir,?MODULE_STRING++".app"),
+ ok = file:write_file(AppFile,AppStr),
+ true = code:add_patha(Dir),
+ ok = application:start(?MODULE),
+ SupName = sup_name(),
+ Pid = whereis(SupName),
+ [{ch,ChPid,_,_}] = supervisor:which_children(Pid),
+ Node = node(),
+ ?check_no_flush({info_report,progress,[{application,?MODULE},
+ {started_at,Node}]}),
+ ?check({info_report,progress,[{supervisor,{local,SupName}},
+ {started,[{pid,ChPid}|_]}]}),
+ ok = gen_server:cast(ChPid, fun() ->
+ spawn_link(fun() -> receive x->ok end end)
+ end),
+ Msg = fun() -> a=b end,
+ ok = gen_server:cast(ChPid,Msg),
+ ?check_no_flush({error,"** Generic server ~tp terminating"++_,
+ [ChPid,{'$gen_cast',Msg},gen_server,{{badmatch,b},_}]}),
+ ?check_no_flush({error_report,crash_report,
+ [[{initial_call,_},
+ {pid,ChPid},
+ {registered_name,[]},
+ {error_info,{error,{badmatch,b},_}},
+ {ancestors,_},
+ {message_queue_len,_},
+ {messages,_},
+ {links,[Pid,Neighbour]},
+ {dictionary,_},
+ {trap_exit,_},
+ {status,_},
+ {heap_size,_},
+ {stack_size,_},
+ {reductions,_}],
+ [{neighbour,[{pid,Neighbour},
+ {registered_name,_},
+ {initial_call,_},
+ {current_function,_},
+ {ancestors,_},
+ {message_queue_len,_},
+ {links,[ChPid]},
+ {trap_exit,_},
+ {status,_},
+ {heap_size,_},
+ {stack_size,_},
+ {reductions,_},
+ {current_stacktrace,_}]}]]}),
+ ?check_no_flush({error_report,supervisor_report,
+ [{supervisor,{local,SupName}},
+ {errorContext,child_terminated},
+ {reason,{{badmatch,b},_}},
+ {offender,[{pid,ChPid}|_]}]}),
+ ?check({info_report,progress,[{supervisor,{local,SupName}},
+ {started,_}]}),
+
+ ok = application:stop(?MODULE),
+ ?check({info_report,std_info,[{application,?MODULE},
+ {exited,stopped},
+ {type,temporary}]}).
+
+sasl_reports(cleanup,_Config) ->
+ application:stop(?MODULE).
+
+supervisor_handle_info(_Config) ->
+ {ok,Pid} = supervisor:start_link({local,sup_name()},?MODULE,supervisor),
+ ?check({info_report,progress,[{supervisor,_},{started,_}]}),
+ Pid ! msg,
+ ?check({error,"Supervisor received unexpected message: ~tp~n",[msg]}).
+
+supervisor_handle_info(cleanup,_Config) ->
+ Pid = whereis(sup_name()),
+ unlink(Pid),
+ exit(Pid,shutdown).
+
+%%%-----------------------------------------------------------------
+%%% Callbacks for error_logger event handler, gen_server, gen_statem,
+%%% gen_fsm, gen_event, supervisor and application.
+start(_,_) ->
+ supervisor:start_link({local,sup_name()},?MODULE,supervisor).
+
+init(supervisor) ->
+ {ok,{#{},[#{id=>ch,start=>{gen_server,start_link,[?MODULE,gen_server,[]]}}]}};
+init(StateMachine) when StateMachine==gen_statem; StateMachine==gen_fsm ->
+ {ok,mystate,StateMachine};
+init(State) ->
+ {ok,State}.
+
+%% error_logger event handler
+handle_event({Tag,_Gl,{_Pid,Type,Report}},{_,Pid}=State) ->
+ Pid ! {Tag,Type,Report},
+ {ok,State};
+%% other gen_event
+handle_event(Fun,State) when is_function(Fun) ->
+ Fun(),
+ {next_state,State}.
+
+%% gen_fsm
+handle_event(Fun,State,Data) when is_function(Fun) ->
+ Fun(),
+ {next_state,State,Data}.
+
+%% gen_statem
+handle_event(info,Fun,State,Data) when is_function(Fun) ->
+ Fun(),
+ {next_state,State,Data}.
+
+%% gen_server
+handle_cast(Fun,State) when is_function(Fun) ->
+ Fun(),
+ {noreply,State}.
+
+%% gen_statem
+callback_mode() ->
+ handle_event_function.
+
+%%%-----------------------------------------------------------------
+%%% Internal
+sup_name() ->
+ list_to_atom(?MODULE_STRING++"_sup").
diff --git a/lib/kernel/test/logger_simple_SUITE.erl b/lib/kernel/test/logger_simple_SUITE.erl
new file mode 100644
index 0000000000..5d8d32492d
--- /dev/null
+++ b/lib/kernel/test/logger_simple_SUITE.erl
@@ -0,0 +1,247 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_simple_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+
+-define(check_no_log,[] = test_server:messages_get()).
+-define(check(Expected),
+ receive {log,Expected} ->
+ [] = test_server:messages_get()
+ after 1000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {expected,Expected},
+ {got,test_server:messages_get()}})
+ end).
+
+-define(str,"Log from "++atom_to_list(?FUNCTION_NAME)++
+ ":"++integer_to_list(?LINE)).
+-define(map_rep,#{function=>?FUNCTION_NAME, line=>?LINE}).
+-define(keyval_rep,[{function,?FUNCTION_NAME}, {line,?LINE}]).
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ #{handlers:=Hs0} = logger:i(),
+ Hs = lists:keydelete(cth_log_redirect,1,Hs0),
+ [ok = logger:remove_handler(Id) || {Id,_,_} <- Hs],
+ Env = [{App,Key,application:get_env(App,Key)} ||
+ {App,Key} <- [{kernel,logger_dest},
+ {kernel,logger_level}]],
+ [{env,Env},{logger,Hs}|Config].
+
+end_per_suite(Config) ->
+ [application:set_env(App,Key,Val) || {App,Key,Val} <- ?config(env,Config)],
+ Hs = ?config(logger,Config),
+ [ok = logger:add_handler(Id,Mod,C) || {Id,Mod,C} <- Hs],
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(_TestCase, Config) ->
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [start_stop,
+ get_buffer,
+ replace_file,
+ replace_disk_log
+ ].
+
+start_stop(_Config) ->
+ undefined = whereis(logger_simple),
+ register(logger_simple,self()),
+ {error,_} = logger:add_handler(logger_simple,
+ logger_simple,
+ #{filter_default=>log}),
+ unregister(logger_simple),
+ ok = logger:add_handler(logger_simple,logger_simple,#{filter_default=>log}),
+ Pid = whereis(logger_simple),
+ true = is_pid(Pid),
+ ok = logger:remove_handler(logger_simple),
+ false = is_pid(whereis(logger_simple)),
+ ok.
+start_stop(cleanup,_Config) ->
+ logger:remove_handler(logger_simple).
+
+get_buffer(_Config) ->
+ %% Start simple without buffer
+ ok = logger:add_handler(logger_simple,logger_simple,
+ #{filter_default=>log}),
+ logger:emergency(?str),
+ logger:alert(?str,[]),
+ logger:error(?map_rep),
+ logger:info(?keyval_rep),
+ {ok,[]} = logger_simple:get_buffer(), % no buffer
+ ok = logger:remove_handler(logger_simple),
+
+ %% Start with buffer
+ ok = logger:add_handler(logger_simple,logger_simple,
+ #{filter_default=>log,
+ logger_simple=>#{buffer=>true}}),
+ logger:emergency(M1=?str),
+ logger:alert(M2=?str,[]),
+ logger:error(M3=?map_rep),
+ logger:info(M4=?keyval_rep),
+ logger:info(M41=?keyval_rep++[not_key_val]),
+ error_logger:error_report(some_type,M5=?map_rep),
+ error_logger:warning_report("some_type",M6=?map_rep),
+ logger:critical(M7=?str,[A7=?keyval_rep]),
+ logger:notice(M8=["fake",string,"line:",?LINE]),
+ {ok,Buffered1} = logger_simple:get_buffer(),
+ [#{level:=emergency,msg:={string,M1}},
+ #{level:=alert,msg:={M2,[]}},
+ #{level:=error,msg:={report,M3}},
+ #{level:=info,msg:={report,M4}},
+ #{level:=info,msg:={report,M41}},
+ #{level:=error,msg:={report,#{label:={error_logger,error_report},
+ report:=M5}}},
+ #{level:=warning,msg:={report,#{label:={error_logger,warning_report},
+ report:=M6}}},
+ #{level:=critical,msg:={M7,[A7]}},
+ #{level:=notice,msg:={string,M8}}] = Buffered1,
+
+ %% Keep logging - should not buffer any more
+ logger:emergency(?str),
+ logger:alert(?str,[]),
+ logger:error(?map_rep),
+ logger:info(?keyval_rep),
+ {ok,[]} = logger_simple:get_buffer(),
+ ok = logger:remove_handler(logger_simple),
+
+ %% Fill buffer and drop
+ ok = logger:add_handler(logger_simple,logger_simple,
+ #{filter_default=>log,
+ logger_simple=>#{buffer=>true}}),
+ logger:emergency(M9=?str),
+ M10=?str,
+ [logger:info(M10) || _ <- lists:seq(1,8)],
+ logger:error(M11=?str),
+ logger:error(?str),
+ logger:error(?str),
+ {ok,Buffered3} = logger_simple:get_buffer(),
+ 11 = length(Buffered3),
+ [#{level:=emergency,msg:={string,M9}},
+ #{level:=info,msg:={string,M10}},
+ #{level:=info,msg:={string,M10}},
+ #{level:=info,msg:={string,M10}},
+ #{level:=info,msg:={string,M10}},
+ #{level:=info,msg:={string,M10}},
+ #{level:=info,msg:={string,M10}},
+ #{level:=info,msg:={string,M10}},
+ #{level:=info,msg:={string,M10}},
+ #{level:=error,msg:={string,M11}},
+ #{level:=info,msg:={"Simple handler buffer full, dropped ~w messages",[2]}}]
+ = Buffered3,
+ ok.
+get_buffer(cleanup,_Config) ->
+ logger:remove_handler(logger_simple).
+
+replace_file(Config) ->
+ ok = logger:add_handler(logger_simple,logger_simple,
+ #{filter_default=>log,
+ logger_simple=>#{buffer=>true}}),
+ logger:emergency(M1=?str),
+ logger:alert(M2=?str,[]),
+ logger:error(?map_rep),
+ logger:info(?keyval_rep),
+ undefined = whereis(?STANDARD_HANDLER),
+ PrivDir = ?config(priv_dir,Config),
+ File = filename:join(PrivDir,atom_to_list(?FUNCTION_NAME)++".log"),
+
+ application:set_env(kernel,logger_dest,{file,File}),
+ application:set_env(kernel,logger_level,info),
+
+ ok = logger:setup_standard_handler(),
+ true = is_pid(whereis(?STANDARD_HANDLER)),
+ ok = logger_std_h:filesync(?STANDARD_HANDLER),
+ {ok,Bin} = file:read_file(File),
+ Lines = [unicode:characters_to_list(L) ||
+ L <- binary:split(Bin,<<"\n">>,[global,trim])],
+ ["=EMERGENCY REPORT===="++_,
+ M1,
+ "=ALERT REPORT===="++_,
+ M2,
+ "=ERROR REPORT===="++_,
+ _,
+ _,
+ "=INFO REPORT===="++_,
+ _,
+ _] = Lines,
+ ok.
+replace_file(cleanup,_Config) ->
+ logger:remove_handler(?STANDARD_HANDLER),
+ logger:remove_handler(logger_simple).
+
+replace_disk_log(Config) ->
+ ok = logger:add_handler(logger_simple,logger_simple,
+ #{filter_default=>log,
+ logger_simple=>#{buffer=>true}}),
+ logger:emergency(M1=?str),
+ logger:alert(M2=?str,[]),
+ logger:error(?map_rep),
+ logger:info(?keyval_rep),
+ undefined = whereis(?STANDARD_HANDLER),
+ PrivDir = ?config(priv_dir,Config),
+ File = filename:join(PrivDir,atom_to_list(?FUNCTION_NAME)),
+
+ application:set_env(kernel,logger_dest,{disk_log,File}),
+ application:set_env(kernel,logger_level,info),
+
+ ok = logger:setup_standard_handler(),
+ true = is_pid(whereis(?STANDARD_HANDLER)),
+ ok = logger_disk_log_h:disk_log_sync(?STANDARD_HANDLER),
+ {ok,Bin} = file:read_file(File++".1"),
+ Lines = [unicode:characters_to_list(L) ||
+ L <- binary:split(Bin,<<"\n">>,[global,trim])],
+ ["=EMERGENCY REPORT===="++_,
+ M1,
+ "=ALERT REPORT===="++_,
+ M2,
+ "=ERROR REPORT===="++_,
+ _,
+ _,
+ "=INFO REPORT===="++_,
+ _,
+ _|_] = Lines, % the tail might be an info report about opening the disk log
+ ok.
+replace_disk_log(cleanup,_Config) ->
+ logger:remove_handler(?STANDARD_HANDLER),
+ logger:remove_handler(logger_simple).
+
diff --git a/lib/kernel/test/logger_std_h_SUITE.erl b/lib/kernel/test/logger_std_h_SUITE.erl
new file mode 100644
index 0000000000..e940e0a026
--- /dev/null
+++ b/lib/kernel/test/logger_std_h_SUITE.erl
@@ -0,0 +1,1396 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2018. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+-module(logger_std_h_SUITE).
+
+-compile(export_all).
+
+-include_lib("common_test/include/ct.hrl").
+-include_lib("kernel/include/logger.hrl").
+-include_lib("kernel/src/logger_internal.hrl").
+-include_lib("kernel/src/logger_h_common.hrl").
+-include_lib("stdlib/include/ms_transform.hrl").
+-include_lib("kernel/include/file.hrl").
+
+-define(check_no_log, [] = test_server:messages_get()).
+-define(check(Expected),
+ receive
+ {log,Expected} ->
+ [] = test_server:messages_get()
+ after 5000 ->
+ ct:fail({report_not_received,
+ {line,?LINE},
+ {expected,Expected},
+ {got,test_server:messages_get()}})
+ end).
+
+-define(msg,"Log from "++atom_to_list(?FUNCTION_NAME)++
+ ":"++integer_to_list(?LINE)).
+-define(bin(Msg), list_to_binary(Msg++"\n")).
+-define(domain,#{domain=>[?MODULE]}).
+
+-define(FILESYNC_REP_INT, if is_atom(?FILESYNC_REPEAT_INTERVAL) -> 5500;
+ true -> ?FILESYNC_REPEAT_INTERVAL + 500
+ end).
+
+suite() ->
+ [{timetrap,{seconds,30}}].
+
+init_per_suite(Config) ->
+ timer:start(), % to avoid progress report
+ {ok,{?STANDARD_HANDLER,#{formatter:=OrigFormatter}}} =
+ logger:get_handler_config(?STANDARD_HANDLER),
+ [{formatter,OrigFormatter}|Config].
+
+end_per_suite(Config) ->
+ {OrigMod,OrigConf} = proplists:get_value(formatter,Config),
+ logger:set_handler_config(?STANDARD_HANDLER,formatter,{OrigMod,OrigConf}),
+ ok.
+
+init_per_group(_Group, Config) ->
+ Config.
+
+end_per_group(_Group, _Config) ->
+ ok.
+
+init_per_testcase(TestHooksCase, Config) when
+ TestHooksCase == write_failure;
+ TestHooksCase == sync_failure ->
+ if ?TEST_HOOKS_TAB == undefined ->
+ {skip,"Define the TEST_HOOKS macro to run this test"};
+ true ->
+ ct:print("********** ~w **********", [TestHooksCase]),
+ Config
+ end;
+init_per_testcase(TestCase, Config) ->
+ ct:print("********** ~w **********", [TestCase]),
+ Config.
+
+end_per_testcase(Case, Config) ->
+ try apply(?MODULE,Case,[cleanup,Config])
+ catch error:undef -> ok
+ end,
+ ok.
+
+groups() ->
+ [].
+
+all() ->
+ [add_remove_instance_tty,
+ add_remove_instance_standard_io,
+ add_remove_instance_standard_error,
+ add_remove_instance_file1,
+ add_remove_instance_file2,
+ default_formatter,
+ errors,
+ formatter_fail,
+ config_fail,
+ crash_std_h_to_file,
+ crash_std_h_to_disk_log,
+ bad_input,
+ info_and_reset,
+ reconfig,
+ file_opts,
+ filesync,
+ write_failure,
+ sync_failure,
+ op_switch_to_sync_file,
+ op_switch_to_sync_tty,
+ op_switch_to_drop_file,
+ op_switch_to_drop_tty,
+ op_switch_to_flush_file,
+ op_switch_to_flush_tty,
+ limit_burst_disabled,
+ limit_burst_enabled_one,
+ limit_burst_enabled_period,
+ kill_disabled,
+ qlen_kill_new,
+ qlen_kill_std,
+ mem_kill_new,
+ mem_kill_std,
+ restart_after,
+ handler_requests_under_load
+ ].
+
+add_remove_instance_tty(_Config) ->
+ {error,{handler_not_added,{invalid_config,logger_std_h,{type,tty}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{logger_std_h => #{type => tty},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ ok.
+
+add_remove_instance_standard_io(_Config) ->
+ add_remove_instance_nofile(standard_io).
+add_remove_instance_standard_io(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_standard_error(_Config) ->
+ add_remove_instance_nofile(standard_error).
+add_remove_instance_standard_error(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file1(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,"stdlog1.txt"),
+ Type = {file,Log},
+ add_remove_instance_file(Log, Type).
+add_remove_instance_file1(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file2(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,"stdlog2.txt"),
+ Type = {file,Log,[raw,append]},
+ add_remove_instance_file(Log, Type).
+add_remove_instance_file2(cleanup,_Config) ->
+ logger_std_h_remove().
+
+add_remove_instance_file(Log, Type) ->
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{logger_std_h => #{type => Type},
+ filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ Pid = whereis(?MODULE),
+ true = is_pid(Pid),
+ logger:info(M1=?msg,?domain),
+ ?check(M1),
+ B1 = ?bin(M1),
+ try_read_file(Log, {ok,B1}, ?FILESYNC_REP_INT),
+ ok = logger:remove_handler(?MODULE),
+ timer:sleep(500),
+ undefined = whereis(?MODULE),
+ logger:info(?msg,?domain),
+ ?check_no_log,
+ try_read_file(Log, {ok,B1}, ?FILESYNC_REP_INT),
+ ok.
+
+default_formatter(_Config) ->
+ ok = logger:set_handler_config(?STANDARD_HANDLER,formatter,
+ {?DEFAULT_FORMATTER,?DEFAULT_FORMAT_CONFIG}),
+ ct:capture_start(),
+ logger:info(M1=?msg),
+ timer:sleep(100),
+ ct:capture_stop(),
+ [Msg] = ct:capture_get(),
+ match = re:run(Msg,"=INFO REPORT====.*\n"++M1,[{capture,none}]),
+ ok.
+
+errors(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,?FUNCTION_NAME),
+
+ ok = logger:add_handler(?MODULE,logger_std_h,#{}),
+ {error,{already_exist,?MODULE}} =
+ logger:add_handler(?MODULE,logger_std_h,#{}),
+
+ {error,{not_found,no_such_name}} = logger:remove_handler(no_such_name),
+
+ ok = logger:remove_handler(?MODULE),
+ {error,{not_found,?MODULE}} = logger:remove_handler(?MODULE),
+
+ {error,
+ {handler_not_added,
+ {invalid_config,logger_std_h,{type,faulty_type}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{logger_std_h => #{type => faulty_type}}),
+
+ NoDir = lists:concat(["/",?MODULE,"_dir"]),
+ {error,
+ {handler_not_added,{{open_failed,NoDir,eacces},_}}} =
+ logger:add_handler(myh2,logger_std_h,
+ #{logger_std_h=>#{type=>{file,NoDir}}}),
+
+ {error,
+ {handler_not_added,{{open_failed,Log,_},_}}} =
+ logger:add_handler(myh3,logger_std_h,
+ #{logger_std_h=>#{type=>{file,Log,[bad_file_opt]}}}),
+
+ ok = logger:info(?msg).
+
+errors(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+formatter_fail(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir,?FUNCTION_NAME),
+
+ %% no formatter
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{logger_std_h => #{type => {file,Log}},
+ filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE])}),
+ Pid = whereis(?MODULE),
+ true = is_pid(Pid),
+ {ok,#{handlers:=H}} = logger:get_logger_config(),
+ true = lists:member(?MODULE,H),
+
+ %% Formatter is added automatically
+ {ok,{_,#{formatter:={logger_formatter,_}}}} =
+ logger:get_handler_config(?MODULE),
+ logger:info(M1=?msg,?domain),
+ Got1 = try_match_file(Log,"=INFO REPORT====.*\n"++M1,5000),
+
+ ok = logger:set_handler_config(?MODULE,formatter,{nonexistingmodule,#{}}),
+ logger:info(M2=?msg,?domain),
+ Got2 = try_match_file(Log,
+ Got1++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M2,
+ 5000),
+
+ ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,crash}),
+ logger:info(M3=?msg,?domain),
+ Got3 = try_match_file(Log,
+ Got2++"=INFO REPORT====.*\nFORMATTER CRASH: .*"++M3,
+ 5000),
+
+ ok = logger:set_handler_config(?MODULE,formatter,{?MODULE,bad_return}),
+ logger:info(?msg,?domain),
+ try_match_file(Log,
+ Got3++"FORMATTER ERROR: bad_return_value",
+ 5000),
+
+ %% Check that handler is still alive and was never dead
+ Pid = whereis(?MODULE),
+ {ok,#{handlers:=H}} = logger:get_logger_config(),
+
+ ok.
+
+formatter_fail(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+config_fail(_Config) ->
+ {error,{handler_not_added,{invalid_config,logger_std_h,{bad,bad}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{logger_std_h => #{bad => bad},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ {error,{handler_not_added,{invalid_config,logger_std_h,
+ {restart_type,bad}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{logger_std_h => #{restart_type => bad},
+ filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ {error,{handler_not_added,{invalid_levels,{42,42,_}}}} =
+ logger:add_handler(?MODULE,logger_std_h,
+ #{logger_std_h => #{toggle_sync_qlen=>42,
+ drop_new_reqs_qlen=>42}}),
+
+ ok = logger:add_handler(?MODULE,logger_std_h,
+ #{filter_default=>log,
+ formatter=>{?MODULE,self()}}),
+ {error,{illegal_config_change,_,_}} =
+ logger:set_handler_config(?MODULE,logger_std_h,
+ #{type=>{file,"file"}}),
+ {error,{illegal_config_change,_,_}} =
+ logger:set_handler_config(?MODULE,id,bad),
+ {error,{invalid_levels,_}} =
+ logger:set_handler_config(?MODULE,logger_std_h,
+ #{toggle_sync_qlen=>100,
+ flush_reqs_qlen=>99}),
+ {error,{invalid_config,logger_std_h,{filesync_rep_int,2000}}} =
+ logger:set_handler_config(?MODULE, logger_std_h,
+ #{filesync_rep_int => 2000}),
+ ok.
+
+config_fail(cleanup,_Config) ->
+ logger:remove_handler(?MODULE).
+
+crash_std_h_to_file(Config) ->
+ crash_std_h(Config,?FUNCTION_NAME,logger_dest,file).
+crash_std_h_to_file(cleanup,_Config) ->
+ crash_std_h(cleanup).
+
+crash_std_h_to_disk_log(Config) ->
+ crash_std_h(Config,?FUNCTION_NAME,logger_dest,disk_log).
+crash_std_h_to_disk_log(cleanup,_Config) ->
+ crash_std_h(cleanup).
+
+crash_std_h(Config,Func,Var,Type) ->
+ Dir = ?config(priv_dir,Config),
+ File = lists:concat([?MODULE,"_",Func,".log"]),
+ Log = filename:join(Dir,File),
+ Pa = filename:dirname(code:which(?MODULE)),
+ TypeAndLog =
+ case os:type() of
+ {win32,_} ->
+ lists:concat([" {",Type,",\\\"",Log,"\\\"}"]);
+ _ ->
+ lists:concat([" \'{",Type,",\"",Log,"\"}\'"])
+ end,
+ Args = lists:concat([" -kernel ",Var,TypeAndLog," -pa ",Pa]),
+ Name = lists:concat([?MODULE,"_",Func]),
+ ct:pal("Starting ~p with ~tp", [Name,Args]),
+ %% Start a node which prints kernel logs to the destination specified by Type
+ {ok,Node} = test_server:start_node(Name, peer, [{args, Args}]),
+ Pid = rpc:call(Node,erlang,whereis,[?STANDARD_HANDLER]),
+ ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter,
+ {?MODULE,self()}]),
+ ok = log_on_remote_node(Node,"dummy1"),
+ ?check("dummy1"),
+ {ok,Bin1} = sync_and_read(Node,Type,Log),
+ <<"dummy1\n">> = binary:part(Bin1,{byte_size(Bin1),-7}),
+
+ %% Kill the logger_std_h process
+ exit(Pid, kill),
+
+ %% Wait a bit, then check that it is gone
+ timer:sleep(2000),
+ undefined = rpc:call(Node,erlang,whereis,[?STANDARD_HANDLER]),
+
+ %% Check that file is not empty
+ {ok,Bin2} = sync_and_read(Node,Type,Log),
+ <<"dummy1\n">> = binary:part(Bin2,{byte_size(Bin2),-7}),
+ ok.
+
+%% Can not use rpc:call here, since the code would execute on a
+%% process with group_leader on this (the calling) node, and thus
+%% logger would send the log event to the logger process here instead
+%% of logging it itself.
+log_on_remote_node(Node,Msg) ->
+ _ = spawn_link(Node,
+ fun() -> erlang:group_leader(whereis(user),self()),
+ logger:info(Msg)
+ end),
+ ok.
+
+
+crash_std_h(cleanup) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+sync_and_read(Node,disk_log,Log) ->
+ rpc:call(Node,logger_disk_log_h,disk_log_sync,[?STANDARD_HANDLER]),
+ case file:read_file(Log ++ ".1") of
+ {ok,<<>>} ->
+ timer:sleep(5000),
+ file:read_file(Log ++ ".1");
+ Ok ->
+ Ok
+ end;
+sync_and_read(Node,file,Log) ->
+ rpc:call(Node,logger_std_h,filesync,[?STANDARD_HANDLER]),
+ case file:read_file(Log) of
+ {ok,<<>>} ->
+ timer:sleep(5000),
+ file:read_file(Log);
+ Ok ->
+ Ok
+ end.
+
+bad_input(_Config) ->
+ {error,{badarg,{filesync,["BadType"]}}} = logger_std_h:filesync("BadType"),
+ {error,{badarg,{info,["BadType"]}}} = logger_std_h:info("BadType"),
+ {error,{badarg,{reset,["BadType"]}}} = logger_std_h:reset("BadType").
+
+
+info_and_reset(_Config) ->
+ #{id := ?STANDARD_HANDLER} = logger_std_h:info(?STANDARD_HANDLER),
+ ok = logger_std_h:reset(?STANDARD_HANDLER).
+
+reconfig(Config) ->
+ Dir = ?config(priv_dir,Config),
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{logger_std_h => #{type => standard_io},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ #{id := ?MODULE,
+ type := standard_io,
+ file_ctrl_pid := FileCtrlPid,
+ toggle_sync_qlen := ?TOGGLE_SYNC_QLEN,
+ drop_new_reqs_qlen := ?DROP_NEW_REQS_QLEN,
+ flush_reqs_qlen := ?FLUSH_REQS_QLEN,
+ enable_burst_limit := ?ENABLE_BURST_LIMIT,
+ burst_limit_size := ?BURST_LIMIT_SIZE,
+ burst_window_time := ?BURST_WINDOW_TIME,
+ enable_kill_overloaded := ?ENABLE_KILL_OVERLOADED,
+ handler_overloaded_qlen := ?HANDLER_OVERLOADED_QLEN,
+ handler_overloaded_mem := ?HANDLER_OVERLOADED_MEM,
+ handler_restart_after := ?HANDLER_RESTART_AFTER,
+ filesync_repeat_interval := ?FILESYNC_REPEAT_INTERVAL} =
+ logger_std_h:info(?MODULE),
+
+ ok = logger:set_handler_config(?MODULE, logger_std_h,
+ #{toggle_sync_qlen => 1,
+ drop_new_reqs_qlen => 2,
+ flush_reqs_qlen => 3,
+ enable_burst_limit => false,
+ burst_limit_size => 10,
+ burst_window_time => 10,
+ enable_kill_overloaded => true,
+ handler_overloaded_qlen => 100000,
+ handler_overloaded_mem => 10000000,
+ handler_restart_after => never,
+ filesync_repeat_interval => no_repeat}),
+ #{id := ?MODULE,
+ type := standard_io,
+ file_ctrl_pid := FileCtrlPid,
+ toggle_sync_qlen := 1,
+ drop_new_reqs_qlen := 2,
+ flush_reqs_qlen := 3,
+ enable_burst_limit := false,
+ burst_limit_size := 10,
+ burst_window_time := 10,
+ enable_kill_overloaded := true,
+ handler_overloaded_qlen := 100000,
+ handler_overloaded_mem := 10000000,
+ handler_restart_after := never,
+ filesync_repeat_interval := no_repeat} = logger_std_h:info(?MODULE),
+ ok.
+
+reconfig(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+
+file_opts(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir, lists:concat([?FUNCTION_NAME,".log"])),
+ BadFileOpts = [raw],
+ BadType = {file,Log,BadFileOpts},
+ {error,{handler_not_added,{{open_failed,Log,enoent},_}}} =
+ logger:add_handler(?MODULE, logger_std_h,
+ #{logger_std_h => #{type => BadType}}),
+
+ OkFileOpts = [raw,append],
+ OkType = {file,Log,OkFileOpts},
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{logger_std_h => #{type => OkType},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+
+ #{type := OkType} = logger_std_h:info(?MODULE),
+ logger:info(M1=?msg,?domain),
+ ?check(M1),
+ B1 = ?bin(M1),
+ try_read_file(Log, {ok,B1}, ?FILESYNC_REP_INT),
+ ok.
+file_opts(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+
+filesync(Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir, lists:concat([?FUNCTION_NAME,".log"])),
+ Type = {file,Log},
+ ok = logger:add_handler(?MODULE,
+ logger_std_h,
+ #{logger_std_h => #{type => Type},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ Tester = self(),
+ TraceFun = fun({trace,_,call,{Mod,Func,Details}}, Pid) ->
+ Pid ! {trace,Mod,Func,Details},
+ Pid;
+ ({trace,TPid,'receive',Received}, Pid) ->
+ Pid ! {trace,TPid,Received},
+ Pid
+ end,
+ {ok,_} = dbg:tracer(process, {TraceFun, Tester}),
+ FileCtrlPid = maps:get(file_ctrl_pid , logger_std_h:info(?MODULE)),
+ {ok,_} = dbg:p(FileCtrlPid, [c]),
+ {ok,_} = dbg:tpl(logger_std_h, write_to_dev, 5, []),
+ {ok,_} = dbg:tpl(logger_std_h, sync_dev, 4, []),
+ {ok,_} = dbg:tp(file, datasync, 1, []),
+
+ logger:info("first", ?domain),
+ %% wait for automatic filesync
+ timer:sleep(?FILESYNC_REP_INT),
+ Expected1 = [{log,"first"}, {trace,logger_std_h,write_to_dev},
+ {trace,logger_std_h,sync_dev}, {trace,file,datasync}],
+
+ logger:info("second", ?domain),
+ %% do explicit filesync
+ logger_std_h:filesync(?MODULE),
+ %% a second filesync should be ignored
+ logger_std_h:filesync(?MODULE),
+ Expected2 = [{log,"second"}, {trace,logger_std_h,write_to_dev},
+ {trace,logger_std_h,sync_dev}, {trace,file,datasync}],
+
+ %% check that if there's no repeated filesync active,
+ %% a filesync is still performed when handler goes idle
+ logger:set_handler_config(?MODULE, logger_std_h,
+ #{filesync_repeat_interval => no_repeat}),
+ no_repeat = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)),
+ logger:info("third", ?domain),
+ timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
+ logger:info("fourth", ?domain),
+ %% wait for automatic filesync
+ timer:sleep(?IDLE_DETECT_TIME_MSEC*2),
+ Expected3 = [{log,"third"}, {trace,logger_std_h,write_to_dev},
+ {log,"fourth"}, {trace,logger_std_h,write_to_dev},
+ {trace,logger_std_h,sync_dev}, {trace,file,datasync}],
+
+ dbg:stop_clear(),
+
+ %% verify that filesync has been performed as expected
+ Received1 = lists:map(fun({trace,M,F,_}) -> {trace,M,F};
+ (Other) -> Other
+ end, test_server:messages_get()),
+ ct:pal("Trace #1 =~n~p", [Received1]),
+ Received1 = Expected1 ++ Expected2 ++ Expected3,
+
+ try_read_file(Log, {ok,<<"first\nsecond\nthird\nfourth\n">>}, 1000),
+
+ {ok,_} = dbg:tracer(process, {TraceFun, Tester}),
+ {ok,_} = dbg:p(whereis(?MODULE), [c]),
+ {ok,_} = dbg:tpl(logger_std_h, handle_cast, 2, []),
+
+ %% switch repeated filesync on and verify that the looping works
+ SyncInt = 1000,
+ WaitT = 4500,
+ logger:set_handler_config(?MODULE, logger_std_h,
+ #{filesync_repeat_interval => SyncInt}),
+ SyncInt = maps:get(filesync_repeat_interval, logger_std_h:info(?MODULE)),
+ timer:sleep(WaitT),
+ logger:set_handler_config(?MODULE, logger_std_h,
+ #{filesync_repeat_interval => no_repeat}),
+ dbg:stop_clear(),
+
+ Received2 = lists:map(fun({trace,_M,handle_cast,[{Op,_},_]}) -> {trace,Op};
+ (Other) -> Other
+ end, test_server:messages_get()),
+ ct:pal("Trace #2 =~n~p", [Received2]),
+ OneSync = [{trace,repeated_filesync}],
+ %% receive 1 initial repeated_filesync, then 1 per sec
+ Received2 =
+ lists:flatten([OneSync || _ <- lists:seq(1, 1 + trunc(WaitT/SyncInt))]),
+ ok.
+filesync(cleanup, _Config) ->
+ logger:remove_handler(?MODULE).
+
+write_failure(Config) ->
+ Dir = ?config(priv_dir, Config),
+ File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]),
+ Log = filename:join(Dir, File),
+ Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log),
+ false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
+ rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
+ rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
+ rpc:call(Node, ?MODULE, set_result, [file_write,ok]),
+
+ ok = log_on_remote_node(Node, "Logged1"),
+ rpc:call(Node, logger_std_h, filesync, [?STANDARD_HANDLER]),
+ ?check_no_log,
+ try_read_file(Log, {ok,<<"Logged1\n">>}, ?FILESYNC_REP_INT),
+
+ rpc:call(Node, ?MODULE, set_result, [file_write,{error,terminated}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+
+ ?check({error,{?STANDARD_HANDLER,write,Log,{error,terminated}}}),
+
+ ok = log_on_remote_node(Node, "No second error printout"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result, [file_write,{error,eacces}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+ ?check({error,{?STANDARD_HANDLER,write,Log,{error,eacces}}}),
+
+ rpc:call(Node, ?MODULE, set_result, [file_write,ok]),
+ ok = log_on_remote_node(Node, "Logged2"),
+ rpc:call(Node, logger_std_h, filesync, [?STANDARD_HANDLER]),
+ ?check_no_log,
+ try_read_file(Log, {ok,<<"Logged1\nLogged2\n">>}, ?FILESYNC_REP_INT),
+ ok.
+write_failure(cleanup, _Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+sync_failure(Config) ->
+ Dir = ?config(priv_dir, Config),
+ File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]),
+ Log = filename:join(Dir, File),
+ Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log),
+ false = (undefined == rpc:call(Node, ets, whereis, [?TEST_HOOKS_TAB])),
+ rpc:call(Node, ets, insert, [?TEST_HOOKS_TAB,{tester,self()}]),
+ rpc:call(Node, ?MODULE, set_internal_log, [?MODULE,internal_log]),
+ rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]),
+
+ SyncInt = 500,
+ ok = rpc:call(Node, logger, set_handler_config,
+ [?STANDARD_HANDLER, logger_std_h,
+ #{filesync_repeat_interval => SyncInt}]),
+ Info = rpc:call(Node, logger_std_h, info, [?STANDARD_HANDLER]),
+ SyncInt = maps:get(filesync_repeat_interval, Info),
+
+ ok = log_on_remote_node(Node, "Logged1"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result, [file_datasync,{error,terminated}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+
+ ?check({error,{?STANDARD_HANDLER,filesync,Log,{error,terminated}}}),
+
+ ok = log_on_remote_node(Node, "No second error printout"),
+ ?check_no_log,
+
+ rpc:call(Node, ?MODULE, set_result, [file_datasync,{error,eacces}]),
+ ok = log_on_remote_node(Node, "Cause simple error printout"),
+ ?check({error,{?STANDARD_HANDLER,filesync,Log,{error,eacces}}}),
+
+ rpc:call(Node, ?MODULE, set_result, [file_datasync,ok]),
+ ok = log_on_remote_node(Node, "Logged2"),
+ ?check_no_log,
+ ok.
+sync_failure(cleanup, _Config) ->
+ Nodes = nodes(),
+ [test_server:stop_node(Node) || Node <- Nodes].
+
+start_std_h_on_new_node(_Config, Func, Log) ->
+ Pa = filename:dirname(code:which(?MODULE)),
+ Dest =
+ case os:type() of
+ {win32,_} ->
+ lists:concat([" {file,\\\"",Log,"\\\"}"]);
+ _ ->
+ lists:concat([" \'{file,\"",Log,"\"}\'"])
+ end,
+ Args = lists:concat([" -kernel ",logger_dest,Dest," -pa ",Pa]),
+ Name = lists:concat([?MODULE,"_",Func]),
+ ct:pal("Starting ~s with ~tp", [Name,Args]),
+ {ok,Node} = test_server:start_node(Name, peer, [{args, Args}]),
+ Pid = rpc:call(Node,erlang,whereis,[?STANDARD_HANDLER]),
+ true = is_pid(Pid),
+ ok = rpc:call(Node,logger,set_handler_config,[?STANDARD_HANDLER,formatter,
+ {?MODULE,nl}]),
+ Node.
+
+%% functions for test hook macros to be called by rpc
+set_internal_log(Mod, Func) ->
+ ?set_internal_log({Mod,Func}).
+set_result(Op, Result) ->
+ ?set_result(Op, Result).
+set_defaults() ->
+ ?set_defaults().
+
+%% internal log function that sends the term to the test case process
+internal_log(Type, Term) ->
+ [{tester,Tester}] = ets:lookup(?TEST_HOOKS_TAB, tester),
+ Tester ! {log,{Type,Term}},
+ logger:internal_log(Type, Term),
+ ok.
+
+
+%%%-----------------------------------------------------------------
+%%% Overload protection tests
+
+op_switch_to_sync_file(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 3,
+ drop_new_reqs_qlen => 501,
+ flush_reqs_qlen => 2000,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ %% TRecvPid = start_op_trace(),
+ NumOfReqs = 500,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ NumOfReqs = count_lines(Log),
+ %% true = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(async,Events) end),
+ %% true = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(sync,Events) end),
+ %% true = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_switch(async,sync,Events) end),
+ %% false = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(drop,Events) end),
+ %% false = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(flush,Events) end),
+ ok = file:delete(Log),
+ %% stop_op_trace(TRecvPid),
+ ok.
+op_switch_to_sync_file(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_sync_tty(Config) ->
+ {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config),
+ NewHConfig =
+ HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 3,
+ drop_new_reqs_qlen => 501,
+ flush_reqs_qlen => 2000,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 500,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ ok.
+op_switch_to_sync_tty(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_drop_file(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+
+ NewHConfig =
+ HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2,
+ drop_new_reqs_qlen => 3,
+ flush_reqs_qlen => 600,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ %% TRecvPid = start_op_trace(),
+ NumOfReqs = 500,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages dropped = ~w (~w)",
+ [NumOfReqs-Logged,NumOfReqs]),
+ true = (Logged < NumOfReqs),
+ %% true = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(async,Events) end),
+ %% true = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(drop,Events) end),
+ %% false = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_mode(flush,Events) end),
+ %% true = analyse_trace(TRecvPid,
+ %% fun(Events) -> find_switch(async,drop,Events)
+ %% orelse find_switch(sync,drop,Events)
+ %% end),
+ ok = file:delete(Log),
+ %% stop_op_trace(TRecvPid),
+ ok.
+op_switch_to_drop_file(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_drop_tty(Config) ->
+ {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config),
+ NewHConfig =
+ HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2,
+ drop_new_reqs_qlen => 3,
+ flush_reqs_qlen => 600,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 500,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ ok.
+op_switch_to_drop_tty(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_flush_file() ->
+ [{timetrap,{seconds,60}}].
+op_switch_to_flush_file(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+
+ %% it's important that both async and sync requests have been queued
+ %% when the flush happens (verify with coverage of flush_log_requests/2)
+
+ NewHConfig =
+ HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2,
+ drop_new_reqs_qlen => 99,
+ flush_reqs_qlen => 100,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 10000,
+ Procs = 100,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages flushed/dropped = ~w (~w)",
+ [(NumOfReqs*Procs)-Logged,NumOfReqs*Procs]),
+ true = (Logged < (NumOfReqs*Procs)),
+
+ %%! --- Thu Apr 12 13:46:00 2018 --- peppe was here!
+ %%! TODO: Verify that handler has switched to flush mode
+
+ ok = file:delete(Log),
+ ok.
+op_switch_to_flush_file(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+op_switch_to_flush_tty(Config) ->
+ {HConfig,StdHConfig} = start_handler(?MODULE, standard_io, Config),
+
+ %% it's important that both async and sync requests have been queued
+ %% when the flush happens (verify with coverage of flush_log_requests/2)
+
+ NewHConfig =
+ HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2,
+ drop_new_reqs_qlen => 99,
+ flush_reqs_qlen => 100,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 10000,
+ Procs = 10,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info),
+ ok.
+op_switch_to_flush_tty(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_disabled(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{logger_std_h => StdHConfig#{enable_burst_limit => false,
+ burst_limit_size => 10,
+ burst_window_time => 2000,
+ drop_new_reqs_qlen => 200,
+ flush_reqs_qlen => 300}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ok = file:delete(Log),
+ NumOfReqs = Logged.
+limit_burst_disabled(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_enabled_one(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ReqLimit = 10,
+ NewHConfig =
+ HConfig#{logger_std_h => StdHConfig#{enable_burst_limit => true,
+ burst_limit_size => ReqLimit,
+ burst_window_time => 2000,
+ drop_new_reqs_qlen => 200,
+ flush_reqs_qlen => 300}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ok = file:delete(Log),
+ ReqLimit = Logged.
+limit_burst_enabled_one(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+limit_burst_enabled_period(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ ReqLimit = 10,
+ BurstTWin = 1000,
+ NewHConfig =
+ HConfig#{logger_std_h => StdHConfig#{enable_burst_limit => true,
+ burst_limit_size => ReqLimit,
+ burst_window_time => BurstTWin,
+ drop_new_reqs_qlen => 20000,
+ flush_reqs_qlen => 20001}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+
+ Windows = 3,
+ Sent = send_burst({t,BurstTWin*Windows}, seq, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w",
+ [Sent,Logged]),
+ ok = file:delete(Log),
+ true = (Logged > (ReqLimit*Windows)) andalso
+ (Logged < (ReqLimit*(Windows+2))).
+limit_burst_enabled_period(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+kill_disabled(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>false,
+ handler_overloaded_qlen=>10,
+ handler_overloaded_mem=>100}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ NumOfReqs = 100,
+ send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ Logged = count_lines(Log),
+ ct:pal("Number of messages logged = ~w", [Logged]),
+ ok = file:delete(Log),
+ true = is_pid(whereis(?MODULE)),
+ ok.
+kill_disabled(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+qlen_kill_new(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ Pid0 = whereis(?MODULE),
+ {_,Mem0} = process_info(Pid0, memory),
+ RestartAfter = 2000,
+ NewHConfig =
+ HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true,
+ handler_overloaded_qlen=>10,
+ handler_overloaded_mem=>Mem0+50000,
+ handler_restart_after=>RestartAfter}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ MRef = erlang:monitor(process, Pid0),
+ NumOfReqs = 100,
+ Procs = 2,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info),
+ %% send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ receive
+ {'DOWN', MRef, _, _, Info} ->
+ case Info of
+ {shutdown,{overloaded,?MODULE,QLen,Mem}} ->
+ ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
+ killed ->
+ ct:pal("Slow shutdown, handler process was killed!", [])
+ end,
+ timer:sleep(RestartAfter + 1000),
+ true = is_pid(whereis(?MODULE)),
+ ok
+ after
+ 5000 ->
+ Info = logger_std_h:info(?MODULE),
+ ct:pal("Handler state = ~p", [Info]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end.
+qlen_kill_new(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+%% choke the standard handler on remote node to verify the termination
+%% works as expected
+qlen_kill_std(Config) ->
+ %%! HERE
+ %% Dir = ?config(priv_dir, Config),
+ %% File = lists:concat([?MODULE,"_",?FUNCTION_NAME,".log"]),
+ %% Log = filename:join(Dir, File),
+ %% Node = start_std_h_on_new_node(Config, ?FUNCTION_NAME, Log),
+ %% ok = rpc:call(Node, logger, set_handler_config,
+ %% [?STANDARD_HANDLER, logger_std_h,
+ %% #{enable_kill_overloaded=>true,
+ %% handler_overloaded_qlen=>10,
+ %% handler_overloaded_mem=>100000}]),
+ {skip,"Not done yet"}.
+
+mem_kill_new(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ Pid0 = whereis(?MODULE),
+ {_,Mem0} = process_info(Pid0, memory),
+ RestartAfter = 2000,
+ NewHConfig =
+ HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true,
+ handler_overloaded_qlen=>50000,
+ handler_overloaded_mem=>Mem0+500,
+ handler_restart_after=>RestartAfter}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ MRef = erlang:monitor(process, Pid0),
+ NumOfReqs = 100,
+ Procs = 2,
+ send_burst({n,NumOfReqs}, {spawn,Procs,0}, {chars,79}, info),
+ %% send_burst({n,NumOfReqs}, seq, {chars,79}, info),
+ receive
+ {'DOWN', MRef, _, _, Info} ->
+ case Info of
+ {shutdown,{overloaded,?MODULE,QLen,Mem}} ->
+ ct:pal("Terminated with qlen = ~w, mem = ~w", [QLen,Mem]);
+ killed ->
+ ct:pal("Slow shutdown, handler process was killed!", [])
+ end,
+ timer:sleep(RestartAfter * 2),
+ true = is_pid(whereis(?MODULE)),
+ ok
+ after
+ 5000 ->
+ Info = logger_std_h:info(?MODULE),
+ ct:pal("Handler state = ~p", [Info]),
+ ct:fail("Handler not dead! It should not have survived this!")
+ end.
+mem_kill_new(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+%% choke the standard handler on remote node to verify the termination
+%% works as expected
+mem_kill_std(Config) ->
+ {skip,"Not done yet"}.
+
+restart_after(Config) ->
+ {Log,HConfig,StdHConfig} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig1 =
+ HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true,
+ handler_overloaded_qlen=>10,
+ handler_restart_after=>never}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig1),
+ MRef1 = erlang:monitor(process, whereis(?MODULE)),
+ %% kill handler
+ send_burst({n,100}, {spawn,2,0}, {chars,79}, info),
+ receive
+ {'DOWN', MRef1, _, _, _Info1} ->
+ timer:sleep(?HANDLER_RESTART_AFTER + 1000),
+ undefined = whereis(?MODULE),
+ ok
+ after
+ 5000 ->
+ ct:fail("Handler not dead! It should not have survived this!")
+ end,
+
+ {Log,_,_} = start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ RestartAfter = 2000,
+ NewHConfig2 =
+ HConfig#{logger_std_h=>StdHConfig#{enable_kill_overloaded=>true,
+ handler_overloaded_qlen=>10,
+ handler_restart_after=>RestartAfter}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig2),
+ Pid0 = whereis(?MODULE),
+ MRef2 = erlang:monitor(process, Pid0),
+ %% kill handler
+ send_burst({n,100}, {spawn,2,0}, {chars,79}, info),
+ receive
+ {'DOWN', MRef2, _, _, _Info2} ->
+ timer:sleep(RestartAfter + 1000),
+ Pid1 = whereis(?MODULE),
+ true = is_pid(Pid1),
+ false = (Pid1 == Pid0),
+ ok
+ after
+ 5000 ->
+ ct:fail("Handler not dead! It should not have survived this!")
+ end,
+ ok.
+restart_after(cleanup, _Config) ->
+ ok = stop_handler(?MODULE).
+
+%% send handler requests (filesync, info, reset, change_config)
+%% during high load to verify that sync, dropping and flushing is
+%% handled correctly.
+handler_requests_under_load() ->
+ [{timetrap,{seconds,60}}].
+handler_requests_under_load(Config) ->
+ {Log,HConfig,StdHConfig} =
+ start_handler(?MODULE, ?FUNCTION_NAME, Config),
+ NewHConfig =
+ HConfig#{logger_std_h => StdHConfig#{toggle_sync_qlen => 2,
+ drop_new_reqs_qlen => 1000,
+ flush_reqs_qlen => 2000,
+ enable_burst_limit => false}},
+ ok = logger:set_handler_config(?MODULE, NewHConfig),
+ Pid = spawn_link(fun() -> send_requests(?MODULE, 1, [{filesync,[]},
+ {info,[]},
+ {reset,[]},
+ {change_config,[]}])
+ end),
+ Sent = send_burst({t,10000}, seq, {chars,79}, info),
+ Pid ! {self(),finish},
+ ReqResult = receive {Pid,Result} -> Result end,
+ Logged = count_lines(Log),
+ ct:pal("Number of messages sent = ~w~nNumber of messages logged = ~w",
+ [Sent,Logged]),
+ FindError = fun(Res) ->
+ [E || E <- Res,
+ is_tuple(E) andalso (element(1,E) == error)]
+ end,
+ Errors = [{Req,FindError(Res)} || {Req,Res} <- ReqResult],
+ NoOfReqs = lists:foldl(fun({_,Res}, N) -> N + length(Res) end, 0, ReqResult),
+ ct:pal("~w requests made. Errors: ~n~p", [NoOfReqs,Errors]),
+ ok = file:delete(Log).
+handler_requests_under_load(cleanup, Config) ->
+ ok = stop_handler(?MODULE).
+
+send_requests(HName, TO, Reqs = [{Req,Res}|Rs]) ->
+ receive
+ {From,finish} ->
+ From ! {self(),Reqs}
+ after
+ TO ->
+ Result =
+ case Req of
+ change_config ->
+ logger:set_handler_config(HName, logger_std_h,
+ #{enable_kill_overloaded =>
+ false});
+ Func ->
+ logger_std_h:Func(HName)
+ end,
+ send_requests(HName, TO, Rs ++ [{Req,[Result|Res]}])
+ end.
+
+
+%%%-----------------------------------------------------------------
+%%%
+start_handler(Name, TTY, Config) when TTY == standard_io;
+ TTY == standard_error->
+ ok = logger:add_handler(Name,
+ logger_std_h,
+ #{logger_std_h => #{type => TTY},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([Name]),
+ formatter=>{?MODULE,op}}),
+ {ok,{_,HConfig = #{logger_std_h := StdHConfig}}} =
+ logger:get_handler_config(Name),
+ {HConfig,StdHConfig};
+
+start_handler(Name, FuncName, Config) ->
+ Dir = ?config(priv_dir,Config),
+ Log = filename:join(Dir, lists:concat([FuncName,".log"])),
+ ct:pal("Logging to ~tp", [Log]),
+ Type = {file,Log},
+ ok = logger:add_handler(Name,
+ logger_std_h,
+ #{logger_std_h => #{type => Type},
+ filter_default=>log,
+ filters=>?DEFAULT_HANDLER_FILTERS([Name]),
+ formatter=>{?MODULE,op}}),
+ {ok,{_,HConfig = #{logger_std_h := StdHConfig}}} =
+ logger:get_handler_config(Name),
+ {Log,HConfig,StdHConfig}.
+
+stop_handler(Name) ->
+ ok = logger:remove_handler(Name),
+ ct:pal("Handler ~p stopped!", [Name]).
+
+count_lines(File) ->
+ wait_until_written(File, -1),
+ count_lines1(File).
+
+wait_until_written(File, Sz) ->
+ timer:sleep(2000),
+ case file:read_file_info(File) of
+ {ok,#file_info{size = Sz}} ->
+ timer:sleep(1000),
+ case file:read_file_info(File) of
+ {ok,#file_info{size = Sz1}} ->
+ ok;
+ {ok,#file_info{size = Sz2}} ->
+ wait_until_written(File, Sz2)
+ end;
+ {ok,#file_info{size = Sz1}} ->
+ wait_until_written(File, Sz1)
+ end.
+
+count_lines1(File) ->
+ Counter = fun Cnt(Dev,LC) ->
+ case file:read_line(Dev) of
+ eof -> LC;
+ _ -> Cnt(Dev,LC+1)
+ end
+ end,
+ {_,Dev} = file:open(File, [read]),
+ Lines = Counter(Dev, 0),
+ file:close(Dev),
+ Lines.
+
+send_burst(NorT, Type, {chars,Sz}, Class) ->
+ Text = [34 + rand:uniform(126-34) || _ <- lists:seq(1,Sz)],
+ case NorT of
+ {n,N} ->
+ %% process_flag(priority, high),
+ send_n_burst(N, Type, Text, Class),
+ %% process_flag(priority, normal),
+ N;
+ {t,T} ->
+ ct:pal("Sending messages sequentially for ~w ms", [T]),
+ T0 = erlang:monotonic_time(millisecond),
+ send_t_burst(T0, T, Text, Class, 0)
+ end.
+
+send_n_burst(0, _, _Text, _Class) ->
+ ok;
+send_n_burst(N, seq, Text, Class) ->
+ ok = logger:Class(Text, ?domain),
+ send_n_burst(N-1, seq, Text, Class);
+send_n_burst(N, {spawn,Ps,TO}, Text, Class) ->
+ ct:pal("~w processes each sending ~w messages", [Ps,N]),
+ PerProc = fun() ->
+ send_n_burst(N, seq, Text, Class)
+ end,
+ MRefs = [begin if TO == 0 -> ok; true -> timer:sleep(TO) end,
+ monitor(process,spawn_link(PerProc)) end ||
+ _ <- lists:seq(1,Ps)],
+ lists:foreach(fun(MRef) ->
+ receive
+ {'DOWN', MRef, _, _, _} ->
+ ok
+ end
+ end, MRefs),
+ ct:pal("Message burst sent", []),
+ ok.
+
+send_t_burst(T0, T, Text, Class, N) ->
+ T1 = erlang:monotonic_time(millisecond),
+ if (T1-T0) > T ->
+ N;
+ true ->
+ ok = logger:Class(Text, ?domain),
+ send_t_burst(T0, T, Text, Class, N+1)
+ end.
+
+%%%-----------------------------------------------------------------
+%%% Formatter callback
+%%% Using this to send the formatted string back to the test case
+%%% process - so it can check for logged events.
+format(_,bad_return) ->
+ bad_return;
+format(_,crash) ->
+ erlang:error(formatter_crashed);
+format(#{msg:={string,String0}},no_nl) ->
+ String = unicode:characters_to_list(String0),
+ String;
+format(#{msg:={string,String0}},nl) ->
+ String = unicode:characters_to_list(String0),
+ String++"\n";
+format(#{msg:={string,String0}},op) ->
+ String = unicode:characters_to_list(String0),
+ String++"\n";
+format(#{msg:={report,#{label:={supervisor,progress}}}},op) ->
+ "";
+format(#{msg:={report,#{label:={gen_server,terminate}}}},op) ->
+ "";
+format(#{msg:={report,#{label:={proc_lib,crash}}}},op) ->
+ "";
+format(#{msg:={F,A}},Pid) when is_list(F), is_list(A) ->
+ String = lists:flatten(io_lib:format(F,A)),
+ Pid ! {log,String},
+ String++"\n";
+format(#{msg:={string,String0}},Pid) ->
+ String = unicode:characters_to_list(String0),
+ Pid ! {log,String},
+ String++"\n".
+
+add_remove_instance_nofile(Type) ->
+ ok = logger:add_handler(?MODULE,logger_std_h,
+ #{logger_std_h => #{type => Type},
+ filter_default=>stop,
+ filters=>?DEFAULT_HANDLER_FILTERS([?MODULE]),
+ formatter=>{?MODULE,self()}}),
+ Pid = whereis(?MODULE),
+ true = is_pid(Pid),
+ group_leader(group_leader(),Pid), % to get printouts in test log
+ logger:info(M1=?msg,?domain),
+ ?check(M1),
+ %% check that filesync doesn't do damage even if not relevant
+ ok = logger_std_h:filesync(?MODULE),
+ ok = logger:remove_handler(?MODULE),
+ timer:sleep(500),
+ undefined = whereis(?MODULE),
+ logger:info(?msg,?domain),
+ ?check_no_log,
+ ok.
+
+logger_std_h_remove() ->
+ logger:remove_handler(?MODULE).
+logger_std_h_remove(Id) ->
+ logger:remove_handler(Id).
+
+try_read_file(FileName, Expected, Time) when Time > 0 ->
+ case file:read_file(FileName) of
+ Expected ->
+ ok;
+ Error = {error,_Reason} ->
+ ct:pal("Can't read ~tp: ~tp", [FileName,Error]),
+ erlang:error(Error);
+ Got ->
+ ct:pal("try_read_file got ~tp", [Got]),
+ timer:sleep(500),
+ try_read_file(FileName, Expected, Time-500)
+ end;
+try_read_file(FileName, Expected, _) ->
+ ct:pal("Missing pattern ~tp in ~tp", [Expected,FileName]),
+ erlang:error({error,missing_expected_pattern}).
+
+try_match_file(FileName, Pattern, Time) ->
+ try_match_file(FileName, Pattern, Time, <<>>).
+
+try_match_file(FileName, Pattern, Time, _) when Time > 0 ->
+ case file:read_file(FileName) of
+ {ok, Bin} ->
+ case re:run(Bin,Pattern,[{capture,none}]) of
+ match ->
+ unicode:characters_to_list(Bin);
+ _ ->
+ timer:sleep(100),
+ try_match_file(FileName, Pattern, Time-100, Bin)
+ end;
+ Error ->
+ erlang:error(Error)
+ end;
+try_match_file(_,Pattern,_,Incorrect) ->
+ ct:pal("try_match_file did not match pattern: ~p~nGot: ~p~n",
+ [Pattern,Incorrect]),
+ erlang:error({error,not_matching_pattern,Pattern,Incorrect}).
+
+%%%-----------------------------------------------------------------
+%%%
+start_op_trace() ->
+ TraceFun = fun({trace,_,call,{_Mod,Func,Details}}, Pid) ->
+ Pid ! {trace_call,Func,Details},
+ Pid;
+ ({trace,_,return_from,{_Mod,Func,_},RetVal}, Pid) ->
+ Pid ! {trace_return,Func,RetVal},
+ Pid
+ end,
+ TRecvPid = spawn_link(fun() -> trace_receiver(5000) end),
+ {ok,_} = dbg:tracer(process, {TraceFun, TRecvPid}),
+
+ {ok,_} = dbg:p(whereis(?MODULE), [c]),
+ {ok,_} = dbg:p(self(), [c]),
+
+ MS1 = dbg:fun2ms(fun([_]) -> return_trace() end),
+ {ok,_} = dbg:tp(logger_h_common, check_load, 1, MS1),
+
+ {ok,_} = dbg:tpl(logger_h_common, flush_log_requests, 2, []),
+
+ MS2 = dbg:fun2ms(fun([_,mode]) -> return_trace() end),
+ {ok,_} = dbg:tpl(ets, lookup, 2, MS2),
+
+ ct:pal("Tracing started!", []),
+ TRecvPid.
+
+stop_op_trace(TRecvPid) ->
+ dbg:stop_clear(),
+ unlink(TRecvPid),
+ exit(TRecvPid, kill),
+ ok.
+
+find_mode(flush, Events) ->
+ lists:any(fun({trace_call,flush_log_requests,[_,_]}) -> true;
+ (_) -> false
+ end, Events);
+find_mode(Mode, Events) ->
+ lists:keymember([{mode,Mode}], 3, Events).
+
+find_switch(From, To, Events) ->
+ try lists:foldl(fun({trace_return,check_load,{To,_,_,_}},
+ {trace_call,check_load,[#{mode := From}]}) ->
+ throw(match);
+ (Event, _) ->
+ Event
+ end, undefined, Events) of
+ _ -> false
+ catch
+ throw:match -> true
+ end.
+
+analyse_trace(TRecvPid, TestFun) ->
+ TRecvPid ! {test,self(),TestFun},
+ receive
+ {result,TRecvPid,Result} ->
+ Result
+ after
+ 60000 ->
+ fails
+ end.
+
+trace_receiver(IdleT) ->
+ Msgs = receive_until_idle(IdleT, 5, []),
+ ct:pal("~w trace events generated", [length(Msgs)]),
+ analyse(Msgs).
+
+receive_until_idle(IdleT, WaitN, Msgs) ->
+ receive
+ Msg = {trace_call,_,_} ->
+ receive_until_idle(IdleT, 5, [Msg | Msgs]);
+ Msg = {trace_return,_,_} ->
+ receive_until_idle(IdleT, 5, [Msg | Msgs])
+ after
+ IdleT ->
+ if WaitN == 0 ->
+ Msgs;
+ true ->
+ receive_until_idle(IdleT, WaitN-1, Msgs)
+ end
+ end.
+
+analyse(Msgs) ->
+ receive
+ {test,From,TestFun} ->
+ From ! {result,self(),TestFun(Msgs)},
+ analyse(Msgs)
+ end.
diff --git a/lib/kernel/test/wrap_log_reader_SUITE.erl b/lib/kernel/test/wrap_log_reader_SUITE.erl
index 40a016aed0..b1ee29a11f 100644
--- a/lib/kernel/test/wrap_log_reader_SUITE.erl
+++ b/lib/kernel/test/wrap_log_reader_SUITE.erl
@@ -428,13 +428,14 @@ stop() ->
ok = wrap_log_test:stop(),
dl_wait().
-%% Give disk logs opened by 'logger' and 'wlt' time to close after
+%% Give disk logs opened by 'wlr_logger' and 'wlt' time to close after
%% receiving EXIT signals.
dl_wait() ->
case disk_log:accessible_logs() of
{[], []} ->
ok;
- _ ->
+ _X ->
+ erlang:display(_X),
timer:sleep(100),
dl_wait()
end.
@@ -507,27 +508,27 @@ add_ext(Name, Ext) ->
%% disk_log.
open(Log, File, Where) ->
- logger ! {open, self(), Log, File},
+ wlr_logger ! {open, self(), Log, File},
rec1(ok, Where).
open_ext(Log, File, Where) ->
- logger ! {open_ext, self(), Log, File},
+ wlr_logger ! {open_ext, self(), Log, File},
rec1(ok, Where).
close(Log) ->
- logger ! {close, self(), Log},
+ wlr_logger ! {close, self(), Log},
rec(ok, ?LINE).
sync(Log) ->
- logger ! {sync, self(), Log},
+ wlr_logger ! {sync, self(), Log},
rec(ok, ?LINE).
log_terms(File, Terms) ->
- logger ! {log_terms, self(), File, Terms},
+ wlr_logger ! {log_terms, self(), File, Terms},
rec(ok, ?LINE).
blog_terms(File, Terms) ->
- logger ! {blog_terms, self(), File, Terms},
+ wlr_logger ! {blog_terms, self(), File, Terms},
rec(ok, ?LINE).
rec1(M, Where) ->
diff --git a/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl b/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl
index 38449b6bb3..2b24ccc66f 100644
--- a/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl
+++ b/lib/kernel/test/wrap_log_reader_SUITE_data/wrap_log_test.erl
@@ -36,9 +36,9 @@
-endif.
init() ->
- spawn(fun() -> start(logger) end),
+ spawn(fun() -> start(wlr_logger) end),
spawn(fun() -> start2(wlt) end),
- wait_registered(logger),
+ wait_registered(wlr_logger),
wait_registered(wlt),
ok.
@@ -52,9 +52,9 @@ wait_registered(Name) ->
end.
stop() ->
- catch logger ! exit,
+ catch wlr_logger ! exit,
catch wlt ! exit,
- wait_unregistered(logger),
+ wait_unregistered(wlr_logger),
wait_unregistered(wlt),
ok.
@@ -82,47 +82,47 @@ loop() ->
{open, Pid, Name, File} ->
R = disk_log:open([{name, Name}, {type, wrap}, {file, File},
{size, {?fsize, ?fno}}]),
- ?format("logger: open ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: open ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{open_ext, Pid, Name, File} ->
R = disk_log:open([{name, Name}, {type, wrap}, {file, File},
{format, external}, {size, {?fsize, ?fno}}]),
- ?format("logger: open ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: open ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{close, Pid, Name} ->
R = disk_log:close(Name),
- ?format("logger: close ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: close ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{sync, Pid, Name} ->
R = disk_log:sync(Name),
- ?format("logger: sync ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: sync ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{log_terms, Pid, Name, Terms} ->
R = disk_log:log_terms(Name, Terms),
- ?format("logger: log_terms ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: log_terms ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
{blog_terms, Pid, Name, Terms} ->
R = disk_log:blog_terms(Name, Terms),
- ?format("logger: blog_terms ~p -> ~p~n", [Name, R]),
+ ?format("wlr_logger: blog_terms ~p -> ~p~n", [Name, R]),
Pid ! R,
loop();
exit ->
- ?format("Stopping logger~n", []),
+ ?format("Stopping wlr_logger~n", []),
exit(normal);
_Else ->
- ?format("logger: ignored: ~p~n", [_Else]),
+ ?format("wlr_logger: ignored: ~p~n", [_Else]),
loop()
end.
diff --git a/lib/public_key/doc/src/public_key.xml b/lib/public_key/doc/src/public_key.xml
index dea35bc390..7284da0499 100644
--- a/lib/public_key/doc/src/public_key.xml
+++ b/lib/public_key/doc/src/public_key.xml
@@ -95,10 +95,12 @@
<p><c>| {#'PBEParameter{}, digest_type()} | #'PBES2-params'{}}</c></p>
</item>
- <tag><c>public_key() =</c></tag>
+ <tag><marker id="type-public_key"/>
+ <c>public_key() =</c></tag>
<item><p><c>rsa_public_key() | dsa_public_key() | ec_public_key()</c></p></item>
- <tag><c>private_key() =</c></tag>
+ <tag><marker id="type-private_key"/>
+ <c>private_key() =</c></tag>
<item><p><c>rsa_private_key() | dsa_private_key() | ec_private_key()</c></p></item>
<tag><c>rsa_public_key() =</c></tag>
diff --git a/lib/reltool/src/reltool_target.erl b/lib/reltool/src/reltool_target.erl
index 503e1971b9..0eeeca4a61 100644
--- a/lib/reltool/src/reltool_target.erl
+++ b/lib/reltool/src/reltool_target.erl
@@ -55,7 +55,7 @@ mandatory_modules() ->
kernel_processes(KernelApp) ->
[
{kernelProcess, heart, {heart, start, []}},
- {kernelProcess, error_logger , {error_logger, start_link, []}},
+ {kernelProcess, logger , {logger_server, start_link, []}},
{kernelProcess,
application_controller,
{application_controller, start, [KernelApp]}}
diff --git a/lib/runtime_tools/src/appmon_info.erl b/lib/runtime_tools/src/appmon_info.erl
index b5500085a3..9c587475ca 100644
--- a/lib/runtime_tools/src/appmon_info.erl
+++ b/lib/runtime_tools/src/appmon_info.erl
@@ -690,7 +690,7 @@ find_avoid() ->
[P|Accu];
_ -> Accu end end,
[undefined],
- [application_controller, init, error_logger, gs,
+ [application_controller, init, gs,
node_serv, appmon, appmon_a, appmon_info]).
diff --git a/lib/sasl/doc/src/appup.xml b/lib/sasl/doc/src/appup.xml
index a43a966dcb..d9d339884f 100644
--- a/lib/sasl/doc/src/appup.xml
+++ b/lib/sasl/doc/src/appup.xml
@@ -249,9 +249,17 @@
<pre>
{restart_application, Application}
Application = atom()</pre>
- <p>Restarting an application means that the application is
- stopped and then started again, similar to using the instructions
- <c>remove_application</c> and <c>add_application</c> in sequence.</p>
+ <p>Restarting an application means that the application is stopped
+ and then started again, similar to using the instructions
+ <c>remove_application</c> and <c>add_application</c> in sequence.
+ Note that, even if the application has been started before the
+ release upgrade is performed, <c>restart_application</c> may only
+ <c>load</c> it rather than <c>start</c> it, depending on the
+ application's <c>start type</c>:
+ If <c>Type = load</c>, the application is only loaded.
+ If <c>Type = none</c>, the application is not loaded and not
+ started, although the code for its modules is loaded.
+ </p>
</section>
<section>
diff --git a/lib/sasl/doc/src/sasl_app.xml b/lib/sasl/doc/src/sasl_app.xml
index e0693fcb60..48b0b8eafb 100644
--- a/lib/sasl/doc/src/sasl_app.xml
+++ b/lib/sasl/doc/src/sasl_app.xml
@@ -34,12 +34,9 @@
<p>The SASL application provides the following services:</p>
<list type="bulleted">
<item><c>alarm_handler</c></item>
- <item><c>rb</c></item>
<item><c>release_handler</c></item>
<item><c>systools</c></item>
</list>
- <p>The SASL application also includes <c>error_logger</c> event
- handlers for formatting SASL error and crash reports.</p>
<note>
<p>The SASL application in OTP has nothing to do with
"Simple Authentication and Security Layer" (RFC 4422).</p>
@@ -47,51 +44,109 @@
</description>
<section>
- <title>Error Logger Event Handlers</title>
- <p>The following error logger event handlers are used by
- the SASL application.</p>
+ <title>Configuration</title>
+ <p>The following configuration parameters are defined for the SASL
+ application. For more information about configuration parameters, see
+ <seealso marker="kernel:app"><c>app(4)</c></seealso> in Kernel.</p>
+ <p>All configuration parameters are optional.</p>
+ <taglist>
+ <tag><c><![CDATA[start_prg = string() ]]></c></tag>
+ <item>
+ <p>Specifies the program to be used when restarting the system
+ during release installation. Default is
+ <c>$OTP_ROOT/bin/start</c>.</p>
+ </item>
+ <tag><c><![CDATA[masters = [atom()] ]]></c></tag>
+ <item>
+ <p>Specifies the nodes used by this node to read/write release
+ information. This parameter is ignored if parameter
+ <c>client_directory</c> is not set.</p>
+ </item>
+ <tag><c><![CDATA[client_directory = string() ]]></c></tag>
+ <item>
+ <p>This parameter specifies the client directory at the master
+ nodes. For details, see
+ <seealso marker="doc/design_principles:release_handling">Release Handling</seealso>
+ in <em>OTP Design Principles</em>. This parameter is
+ ignored if parameter <c>masters</c> is not set.</p>
+ </item>
+ <tag><c><![CDATA[static_emulator = true | false ]]></c></tag>
+ <item>
+ <p>Indicates if the Erlang emulator is statically installed. A
+ node with a static emulator cannot switch dynamically to a
+ new emulator, as the executable files are written into memory
+ statically. This parameter is ignored if parameters <c>masters</c>
+ and <c>client_directory</c> are not set.</p>
+ </item>
+ <tag><c><![CDATA[releases_dir = string() ]]></c></tag>
+ <item>
+ <p>Indicates where the <c>releases</c> directory is located.
+ The release handler writes all its files to this directory.
+ If this parameter is not set, the OS environment parameter
+ <c>RELDIR</c> is used. By default, this is
+ <c>$OTP_ROOT/releases</c>.</p>
+ </item>
+ <tag><c><![CDATA[utc_log = true | false ]]></c></tag>
+ <item>
+ <p>If set to <c>true</c>, all dates in textual log outputs are
+ displayed in Universal Coordinated Time with the string
+ <c>UTC</c> appended.</p>
+ </item>
+ </taglist>
+ </section>
+
+ <section>
+ <title>Deprecated Error Logger Event Handlers and Configuration</title>
+ <p>In OTP-21, a new API for logging was added to Erlang/OTP. The
+ old <c>error_logger</c> event manager, and event handlers
+ running on this manager, will still work, but they are not used
+ by default.</p>
+ <p>The error logger event handlers <c>sasl_report_tty_h</c>
+ and <c>sasl_report_file_h</c>, were earliger used for printing
+ the so called SASL reports, i.e. <em>supervisor
+ reports</em>, <em>crash reports</em>, and <em>progress
+ reports</em>. These reports are now also printed by the standard
+ logger handler started by the Kernel application. Progress
+ reports are by default stopped by a filter, but can easily be
+ added by setting the Kernel configuration
+ parameter <seealso marker="kernel:kernel_app#logger_log_progress"><c>logger_log_progress=true</c></seealso>.</p>
+ <p>If the old error logger event handlers are still desired, they
+ must be added by
+ calling <c>error_logger:add_report_handler/1,2</c>.</p>
<taglist>
<tag><c>sasl_report_tty_h</c></tag>
<item>
<p>Formats and writes <em>supervisor reports</em>, <em>crash
reports</em>, and <em>progress reports</em> to <c>stdio</c>.
This error logger event handler uses
- <seealso marker="kernel:kernel_app#error_logger_format_depth">error_logger_format_depth</seealso>
- in the Kernel application to limit how much detail is
- printed in crash and supervisor reports.</p>
+ <seealso marker="kernel:kernel_app#logger_format_depth"><c>logger_format_depth</c></seealso>
+ in the Kernel application to limit how much detail is printed
+ in crash and supervisor reports. If <c>logger_format_depth</c>
+ is not set, it uses the old <c>error_logger_format_depth</c>
+ instead.</p>
</item>
<tag><c>sasl_report_file_h</c></tag>
<item>
<p>Formats and writes <em>supervisor reports</em>, <em>crash
report</em>, and <em>progress report</em> to a single file.
This error logger event handler uses
- <seealso marker="kernel:kernel_app#error_logger_format_depth">error_logger_format_depth</seealso>
- in the Kernel application to limit the details
- printed in crash and supervisor reports.</p>
- </item>
- <tag><c>log_mf_h</c></tag>
- <item>
- <p>This error logger writes <em>all</em> events sent to the
- error logger to disk. Multiple files and log rotation are
- used. For efficiency reasons, each event is written as a
- binary. For more information about this handler,
- see <seealso marker="stdlib:log_mf_h">the STDLIB Reference
- Manual</seealso>.</p>
- <p>To activate this event handler, three SASL
- configuration parameters must be set,
- <c>error_logger_mf_dir</c>, <c>error_logger_mf_maxbytes</c>,
- and <c>error_logger_mf_maxfiles</c>. The next section provides
- more information about the configuration parameters.</p>
+ <seealso marker="kernel:kernel_app#logger_format_depth"><c>logger_format_depth</c></seealso>
+ in the Kernel application to limit the details printed in
+ crash and supervisor reports. If <c>logger_format_depth</c> is
+ not set, it uses the old <c>error_logger_format_depth</c>
+ instead.</p>
</item>
</taglist>
- </section>
-
- <section>
- <title>Configuration</title>
- <p>The following configuration parameters are defined for the SASL
- application. For more information about configuration parameters, see
- <seealso marker="kernel:app"><c>app(4)</c></seealso> in Kernel.</p>
- <p>All configuration parameters are optional.</p>
+ <p>A similar behaviour, but still using the new logger API, can be
+ obtained by setting the Kernel application environment
+ variable <seealso marker="kernel:kernel_app#logger_sasl_compatible"><c>logger_sasl_compatible=true</c></seealso>. This will add a
+ second instance of the standard logger handler
+ named <c>sasl_h</c>, which will only print the SASL reports. No
+ SASL reports will then be printed by the Kernel logger
+ handler.</p>
+ <p>The <c>sasl_h</c> handler will be configured according to the
+ values of the following SASL application environment
+ variables.</p>
<taglist>
<tag><c><![CDATA[sasl_error_logger = Value ]]></c></tag>
<item>
@@ -124,6 +179,19 @@
<c>sasl_error_logger</c> to error reports or progress reports,
or both. Default is <c>all</c>.</p>
</item>
+ </taglist>
+
+ <p>The error logger event handler <c>log_mf_h</c> can also still
+ be used. This event handler writes <em>all</em> events sent to
+ the error logger to disk. Multiple files and log rotation are
+ used. For efficiency reasons, each event is written as a
+ binary. For more information about this handler,
+ see <seealso marker="stdlib:log_mf_h">the STDLIB Reference
+ Manual</seealso>.</p>
+ <p>To activate this event handler, three SASL configuration
+ parameters must be
+ set:</p>
+ <taglist>
<tag><c><![CDATA[error_logger_mf_dir = string() | false ]]></c></tag>
<item>
<p>Specifies in which directory <c>log_mf_h</c> is to store
@@ -142,49 +210,12 @@
this parameter is undefined, the <c>log_mf_h</c> handler is
not installed.</p>
</item>
- <tag><c><![CDATA[start_prg = string() ]]></c></tag>
- <item>
- <p>Specifies the program to be used when restarting the system
- during release installation. Default is
- <c>$OTP_ROOT/bin/start</c>.</p>
- </item>
- <tag><c><![CDATA[masters = [atom()] ]]></c></tag>
- <item>
- <p>Specifies the nodes used by this node to read/write release
- information. This parameter is ignored if parameter
- <c>client_directory</c> is not set.</p>
- </item>
- <tag><c><![CDATA[client_directory = string() ]]></c></tag>
- <item>
- <p>This parameter specifies the client directory at the master
- nodes. For details, see
- <seealso marker="doc/design_principles:release_handling">Release Handling</seealso>
- in <em>OTP Design Principles</em>. This parameter is
- ignored if parameter <c>masters</c> is not set.</p>
- </item>
- <tag><c><![CDATA[static_emulator = true | false ]]></c></tag>
- <item>
- <p>Indicates if the Erlang emulator is statically installed. A
- node with a static emulator cannot switch dynamically to a
- new emulator, as the executable files are written into memory
- statically. This parameter is ignored if parameters <c>masters</c>
- and <c>client_directory</c> are not set.</p>
- </item>
- <tag><c><![CDATA[releases_dir = string() ]]></c></tag>
- <item>
- <p>Indicates where the <c>releases</c> directory is located.
- The release handler writes all its files to this directory.
- If this parameter is not set, the OS environment parameter
- <c>RELDIR</c> is used. By default, this is
- <c>$OTP_ROOT/releases</c>.</p>
- </item>
- <tag><c><![CDATA[utc_log = true | false ]]></c></tag>
- <item>
- <p>If set to <c>true</c>, all dates in textual log outputs are
- displayed in Universal Coordinated Time with the string
- <c>UTC</c> appended.</p>
- </item>
</taglist>
+ <p>The new <seealso marker="kernel:logger_disk_log_h">
+ <c>logger_disk_log_h</c></seealso> might be an alternative
+ to <c>log_mf_h</c> if log rotation is desired. This does,
+ however, write the log events in clear text and not as binaries.</p>
+
</section>
<section>
diff --git a/lib/sasl/src/sasl.app.src b/lib/sasl/src/sasl.app.src
index 1e8e58a978..60d08ffa54 100644
--- a/lib/sasl/src/sasl.app.src
+++ b/lib/sasl/src/sasl.app.src
@@ -40,8 +40,7 @@
]},
{registered, [sasl_sup, alarm_handler, release_handler]},
{applications, [kernel, stdlib]},
- {env, [{sasl_error_logger, tty},
- {errlog_type, all}]},
+ {env, []},
{mod, {sasl, []}},
{runtime_dependencies, ["tools-2.6.14","stdlib-3.4","kernel-5.3",
"erts-9.0"]}]}.
diff --git a/lib/sasl/src/sasl.appup.src b/lib/sasl/src/sasl.appup.src
index 94af164b20..221427874c 100644
--- a/lib/sasl/src/sasl.appup.src
+++ b/lib/sasl/src/sasl.appup.src
@@ -18,7 +18,11 @@
%% %CopyrightEnd%
{"%VSN%",
%% Up from - max one major revision back
- [{<<"3\\.0\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-20.*
+ [{<<"3\\.0\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.0
+ {<<"3\\.1(\\.[0-2]+)*">>,[restart_new_emulator]}, % OTP-20.1+
+ {<<"3\\.1(\\.[3-9]+)*">>,[restart_new_emulator]}], % OTP-21
%% Down to - max one major revision back
- [{<<"3\\.0\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-20.*
+ [{<<"3\\.0\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.*
+ {<<"3\\.1(\\.[0-2]+)*">>,[restart_new_emulator]}, % OTP-20.1+
+ {<<"3\\.1(\\.[3-9]+)*">>,[restart_new_emulator]}] % OTP-21
}.
diff --git a/lib/sasl/src/sasl.erl b/lib/sasl/src/sasl.erl
index 24afaee183..657eb6688a 100644
--- a/lib/sasl/src/sasl.erl
+++ b/lib/sasl/src/sasl.erl
@@ -31,45 +31,52 @@
%%%-----------------------------------------------------------------
-behaviour(application).
--record(state, {sasl_error_logger, error_logger_mf}).
+-record(state, {sasl_logger, error_logger_mf}).
start(_, []) ->
- Handler = get_sasl_error_logger(),
- Type = get_sasl_error_logger_type(),
+ {Dest,Level} = get_logger_info(),
Mf = get_error_logger_mf(),
- add_sasl_error_logger(Handler, Type),
+ add_sasl_logger(Dest, Level),
add_error_logger_mf(Mf),
- State = #state{sasl_error_logger = Handler, error_logger_mf = Mf},
+ State = #state{sasl_logger = Dest, error_logger_mf = Mf},
case supervisor:start_link({local, sasl_sup}, sasl, []) of
{ok, Pid} -> {ok, Pid, State};
Error -> Error
end.
stop(State) ->
- delete_sasl_error_logger(State#state.sasl_error_logger),
+ delete_sasl_logger(State#state.sasl_logger),
delete_error_logger_mf(State#state.error_logger_mf).
%%-----------------------------------------------------------------
%% Internal functions
%%-----------------------------------------------------------------
-get_sasl_error_logger() ->
+get_logger_info() ->
+ case application:get_env(kernel, logger_sasl_compatible) of
+ {ok,true} ->
+ {get_logger_dest(),get_logger_level()};
+ _ ->
+ {std,undefined}
+ end.
+
+get_logger_dest() ->
case application:get_env(sasl, sasl_error_logger) of
- {ok, false} -> undefined;
- {ok, tty} -> tty;
- {ok, {file, File}} when is_list(File) -> {file, File, [write]};
- {ok, {file, File, Modes}} when is_list(File), is_list(Modes) ->
- {file, File, Modes};
- {ok, Bad} -> exit({bad_config, {sasl, {sasl_error_logger, Bad}}});
- _ -> undefined
+ {ok, false} -> undefined;
+ {ok, tty} -> standard_io;
+ {ok, {file, File}} when is_list(File) -> {file, File};
+ {ok, {file, File, Modes}} when is_list(File), is_list(Modes) ->
+ {file, File, Modes};
+ {ok, Bad} -> exit({bad_config, {sasl, {sasl_logger_dest, Bad}}});
+ undefined -> standard_io
end.
-get_sasl_error_logger_type() ->
+get_logger_level() ->
case application:get_env(sasl, errlog_type) of
- {ok, error} -> error;
- {ok, progress} -> progress;
- {ok, all} -> all;
- {ok, Bad} -> exit({bad_config, {sasl, {errlog_type, Bad}}});
- _ -> all
+ {ok, error} -> error;
+ {ok, progress} -> info;
+ {ok, all} -> info;
+ {ok, Bad} -> exit({bad_config, {sasl, {errlog_type, Bad}}});
+ _ -> info
end.
get_error_logger_mf() ->
@@ -119,23 +126,32 @@ get_mf_maxf() ->
{ok, Bad} -> exit({bad_config, {sasl, {error_logger_mf_maxfiles, Bad}}})
end.
-add_sasl_error_logger(undefined, _Type) -> ok;
-add_sasl_error_logger(Handler, Type) ->
- error_logger:add_report_handler(mod(Handler), args(Handler, Type)).
-
-delete_sasl_error_logger(undefined) -> ok;
-delete_sasl_error_logger(Type) ->
- error_logger:delete_report_handler(mod(Type)).
-
-mod(tty) -> sasl_report_tty_h;
-mod({file, _File, _Modes}) -> sasl_report_file_h.
-
-args({file, File, Modes}, Type) -> {File, Modes, type(Type)};
-args(_, Type) -> type(Type).
-
-type(error) -> error;
-type(progress) -> progress;
-type(_) -> all.
+add_sasl_logger(undefined, _Level) -> ok;
+add_sasl_logger(std, undefined) -> ok;
+add_sasl_logger(Dest, Level) ->
+ FC0 = #{legacy_header=>true,
+ template=>[{logger_formatter,header},"\n",msg,"\n"]},
+ FC = case application:get_env(sasl,utc_log) of
+ {ok,Bool} when is_boolean(Bool) ->
+ FC0#{utc=>Bool};
+ _ ->
+ FC0
+ end,
+ ok = logger:add_handler(sasl_h,logger_std_h,
+ #{level=>Level,
+ filter_default=>stop,
+ filters=>
+ [{sasl_domain,
+ {fun logger_filters:domain/2,
+ {log,equals,[beam,erlang,otp,sasl]}}}],
+ logger_std_h=>#{type=>Dest},
+ formatter=>{logger_formatter,FC}}).
+
+delete_sasl_logger(undefined) -> ok;
+delete_sasl_logger(std) -> ok;
+delete_sasl_logger(_Type) ->
+ _ = logger:remove_handler(sasl_h),
+ ok.
add_error_logger_mf(undefined) -> ok;
add_error_logger_mf({Dir, MaxB, MaxF}) ->
diff --git a/lib/sasl/src/systools_make.erl b/lib/sasl/src/systools_make.erl
index a9e8bcecfa..f4b1b54fd1 100644
--- a/lib/sasl/src/systools_make.erl
+++ b/lib/sasl/src/systools_make.erl
@@ -1546,6 +1546,12 @@ mandatory_modules() ->
gen_server,
heart,
kernel,
+ logger,
+ logger_filters,
+ logger_server,
+ logger_backend,
+ logger_config,
+ logger_simple,
lists,
proc_lib,
supervisor
@@ -1570,7 +1576,7 @@ preloaded() ->
kernel_processes() ->
[{heart, heart, start, []},
- {error_logger, error_logger, start_link, []},
+ {logger, logger_server, start_link, []},
{application_controller, application_controller, start,
fun(Appls) ->
[{_,App}] = filter(fun({{kernel,_},_App}) -> true;
diff --git a/lib/sasl/test/sasl_SUITE.erl b/lib/sasl/test/sasl_SUITE.erl
index f12bde9b3d..7b63684c53 100644
--- a/lib/sasl/test/sasl_SUITE.erl
+++ b/lib/sasl/test/sasl_SUITE.erl
@@ -21,6 +21,7 @@
-include_lib("common_test/include/ct.hrl").
%% Test server specific exports
+-export([init_per_suite/1,end_per_suite/1]).
-export([all/0,groups/0,init_per_group/2,end_per_group/2]).
-export([init_per_testcase/2, end_per_testcase/2]).
@@ -37,6 +38,19 @@ all() ->
groups() ->
[].
+init_per_suite(Config) ->
+ S = application:get_env(kernel,logger_sasl_compatible),
+ application:set_env(kernel,logger_sasl_compatible,true),
+ [{sasl_compatible,S}|Config].
+
+end_per_suite(Config) ->
+ case ?config(sasl_compatible,Config) of
+ {ok,X} ->
+ application:set_env(kernel,logger_sasl_compatible,X);
+ undefined ->
+ application:unset_env(kernel,logger_sasl_compatible)
+ end.
+
init_per_group(_GroupName, Config) ->
Config.
diff --git a/lib/sasl/test/sasl_report_SUITE.erl b/lib/sasl/test/sasl_report_SUITE.erl
index 92df5e6e40..96975aaf69 100644
--- a/lib/sasl/test/sasl_report_SUITE.erl
+++ b/lib/sasl/test/sasl_report_SUITE.erl
@@ -53,13 +53,17 @@ gen_server_crash_unicode(Config) ->
gen_server_crash(Config, unicode).
gen_server_crash(Config, Encoding) ->
+ StopFilter = {fun(_,_) -> stop end, ok},
+ logger:add_handler_filter(logger_std_h,stop_all,StopFilter),
+ logger:add_handler_filter(cth_log_redirect,stop_all,StopFilter),
try
do_gen_server_crash(Config, Encoding)
after
- error_logger:tty(true),
+ ok = application:unset_env(kernel, logger_sasl_compatible),
ok = application:unset_env(sasl, sasl_error_logger),
ok = application:unset_env(kernel, error_logger_format_depth),
- error_logger:add_report_handler(cth_log_redirect)
+ logger:remove_handler_filter(logger_std_h,stop_all),
+ logger:remove_handler_filter(cth_log_redirect,stop_all)
end,
ok.
@@ -70,26 +74,26 @@ do_gen_server_crash(Config, Encoding) ->
SaslLog = filename:join(LogDir, "sasl.log"),
ok = filelib:ensure_dir(SaslLog),
- error_logger:delete_report_handler(cth_log_redirect),
- error_logger:tty(false),
application:stop(sasl),
Modes = [write, {encoding, Encoding}],
+ ok = application:set_env(kernel, logger_sasl_compatible, true),
ok = application:set_env(sasl, sasl_error_logger, {file,SaslLog,Modes},
[{persistent,true}]),
application:set_env(kernel, error_logger_format_depth, 30),
error_logger:logfile({open,KernelLog}),
application:start(sasl),
- io:format("~p\n", [gen_event:which_handlers(error_logger)]),
+ logger:i(print),
crash_me(),
error_logger:logfile(close),
+ application:stop(sasl),
check_file(KernelLog, utf8, 70000, 150000),
check_file(SaslLog, Encoding, 70000, 150000),
- %% ok = file:delete(KernelLog),
- %% ok = file:delete(SaslLog),
+ ok = file:delete(KernelLog),
+ ok = file:delete(SaslLog),
ok.
check_file(File, Encoding, Min, Max) ->
diff --git a/lib/ssh/doc/specs/.gitignore b/lib/ssh/doc/specs/.gitignore
new file mode 100644
index 0000000000..322eebcb06
--- /dev/null
+++ b/lib/ssh/doc/specs/.gitignore
@@ -0,0 +1 @@
+specs_*.xml
diff --git a/lib/ssh/doc/src/Makefile b/lib/ssh/doc/src/Makefile
index f54f5e0708..7c4dbd7af8 100644
--- a/lib/ssh/doc/src/Makefile
+++ b/lib/ssh/doc/src/Makefile
@@ -38,19 +38,22 @@ RELSYSDIR = $(RELEASE_PATH)/lib/$(APPLICATION)-$(VSN)
# Target Specs
# ----------------------------------------------------
XML_APPLICATION_FILES = ref_man.xml
-XML_REF3_FILES = ssh.xml \
- ssh_channel.xml \
- ssh_connection.xml \
+XML_REF3_FILES = \
+ ssh.xml \
+ ssh_client_channel.xml \
ssh_client_key_api.xml \
+ ssh_connection.xml \
+ ssh_server_channel.xml \
ssh_server_key_api.xml \
ssh_sftp.xml \
ssh_sftpd.xml \
XML_REF6_FILES = ssh_app.xml
-XML_PART_FILES = \
- usersguide.xml
-XML_CHAPTER_FILES = notes.xml \
+XML_PART_FILES = usersguide.xml
+
+XML_CHAPTER_FILES = \
+ notes.xml \
introduction.xml \
using_ssh.xml \
configure_algos.xml
@@ -84,12 +87,19 @@ HTML_REF_MAN_FILE = $(HTMLDIR)/index.html
TOP_PDF_FILE = $(PDFDIR)/$(APPLICATION)-$(VSN).pdf
+SPECS_FILES = $(XML_REF3_FILES:%.xml=$(SPECDIR)/specs_%.xml)
+
+TOP_SPECS_FILE = specs.xml
+
# ----------------------------------------------------
# FLAGS
# ----------------------------------------------------
XML_FLAGS +=
DVIPS_FLAGS +=
+#SPECS_FLAGS = -I../../include -I../../../kernel/include
+SPECS_FLAGS = -I../../../public_key/include -I../../../public_key/src -I../../..
+
# ----------------------------------------------------
# Targets
# ----------------------------------------------------
@@ -111,6 +121,7 @@ clean clean_docs:
rm -rf $(HTMLDIR)/*
rm -f $(MAN3DIR)/*
rm -f $(TOP_PDF_FILE) $(TOP_PDF_FILE:%.pdf=%.fo)
+ rm -f $(SPECS_FILES)
rm -f errs core *~
man: $(MAN3_FILES) $(MAN6_FILES)
diff --git a/lib/ssh/doc/src/configure_algos.xml b/lib/ssh/doc/src/configure_algos.xml
index dd60324851..15aece8968 100644
--- a/lib/ssh/doc/src/configure_algos.xml
+++ b/lib/ssh/doc/src/configure_algos.xml
@@ -117,6 +117,7 @@
<p>Due to this, it impossible to list in documentation what algorithms that are available in a certain installation.</p>
<p>There is an important command to list the actual algorithms and their ordering:
<seealso marker="ssh#default_algorithms-0">ssh:default_algorithms/0</seealso>.</p>
+ <marker id="example_default_algorithms"/>
<code type="erl">
0> ssh:default_algorithms().
[{kex,['ecdh-sha2-nistp384','ecdh-sha2-nistp521',
@@ -156,7 +157,7 @@
<section>
<title>Replacing the default set: preferred_algorithms</title>
- <p>See the <seealso marker="ssh#option_preferred_algorithms">Reference Manual</seealso> for details</p>
+ <p>See the <seealso marker="ssh#type-preferred_algorithms_common_option">Reference Manual</seealso> for details</p>
<p>Here follows a series of examples ranging from simple to more complex.</p>
@@ -301,7 +302,7 @@
First one has to list them with <c>ssh:default_algorithms()</c> and then do changes in the lists.</p>
<p>To facilitate addition or removal of algorithms the option <c>modify_algorithms</c> is available.
- See the <seealso marker="ssh#option_modify_algorithms">Reference Manual</seealso> for details.</p>
+ See the <seealso marker="ssh#type-modify_algorithms_common_option">Reference Manual</seealso> for details.</p>
<p>The option takes a list with instructions to append, prepend or remove algorithms:</p>
<code type="erl">
diff --git a/lib/ssh/doc/src/introduction.xml b/lib/ssh/doc/src/introduction.xml
index b7a73e2597..6fd8425adf 100644
--- a/lib/ssh/doc/src/introduction.xml
+++ b/lib/ssh/doc/src/introduction.xml
@@ -145,7 +145,7 @@
data that can be sent to the channel peer without adjusting the
window. Typically, an SSH client opens a channel, sends data (commands),
receives data (control information), and then closes the channel.
- The <seealso marker="ssh_channel">ssh_channel</seealso> behaviour
+ The <seealso marker="ssh_client_channel">ssh_client_channel</seealso> behaviour
handles generic parts of SSH channel management. This makes it easy
to write your own SSH client/server processes that use flow-control
and thus opens for more focus on the application logic.
diff --git a/lib/ssh/doc/src/ref_man.xml b/lib/ssh/doc/src/ref_man.xml
index 140ebd8c76..76e6520f94 100644
--- a/lib/ssh/doc/src/ref_man.xml
+++ b/lib/ssh/doc/src/ref_man.xml
@@ -35,7 +35,8 @@
</description>
<xi:include href="ssh_app.xml"/>
<xi:include href="ssh.xml"/>
- <xi:include href="ssh_channel.xml"/>
+ <xi:include href="ssh_client_channel.xml"/>
+ <xi:include href="ssh_server_channel.xml"/>
<xi:include href="ssh_connection.xml"/>
<xi:include href="ssh_client_key_api.xml"/>
<xi:include href="ssh_server_key_api.xml"/>
diff --git a/lib/ssh/doc/src/specs.xml b/lib/ssh/doc/src/specs.xml
new file mode 100644
index 0000000000..acdbe2ddfd
--- /dev/null
+++ b/lib/ssh/doc/src/specs.xml
@@ -0,0 +1,13 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<specs xmlns:xi="http://www.w3.org/2001/XInclude">
+ <xi:include href="../specs/specs_ssh.xml"/>
+ <xi:include href="../specs/specs_ssh_client_channel.xml"/>
+ <xi:include href="../specs/specs_ssh_client_key_api.xml"/>
+ <xi:include href="../specs/specs_ssh_connection.xml"/>
+ <xi:include href="../specs/specs_ssh_server_channel.xml"/>
+ <xi:include href="../specs/specs_ssh_server_key_api.xml"/>
+ <xi:include href="../specs/specs_ssh_sftp.xml"/>
+ <xi:include href="../specs/specs_ssh_sftpd.xml"/>
+</specs>
+
+
diff --git a/lib/ssh/doc/src/ssh.xml b/lib/ssh/doc/src/ssh.xml
index c641badd9a..0223831cb1 100644
--- a/lib/ssh/doc/src/ssh.xml
+++ b/lib/ssh/doc/src/ssh.xml
@@ -11,8 +11,8 @@
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
+
+ http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
@@ -31,203 +31,211 @@
<module>ssh</module>
<modulesummary>Main API of the ssh application</modulesummary>
<description>
- <p>Interface module for the <c>ssh</c> application.</p>
- <p>See <seealso marker="ssh:SSH_app#supported">ssh(6)</seealso> for details of supported version,
- algorithms and unicode support.</p>
+ <p>This is the interface module for the <c>SSH</c> application.
+ The Secure Shell (SSH) Protocol is a protocol for secure remote login
+ and other secure network services over an insecure network.
+ See <seealso marker="ssh:SSH_app#supported">ssh(6)</seealso> for details of supported RFCs, versions,
+ algorithms and unicode handling.
+ </p>
+ <p>With the SSH application it is possible to start <i>clients</i> and to start <i>daemons</i> (servers).
+ </p>
+ <p>Clients are started with
+ <seealso marker="#connect/2">connect/2</seealso>,
+ <seealso marker="#connect/3">connect/3</seealso> or
+ <seealso marker="#connect/4">connect/4</seealso>. They open an encrypted connection on top of TCP/IP.
+ In that encrypted connection one or more channels could be opened with
+ <seealso marker="ssh_connection#session_channel/2">ssh_connection:session_channel/2,4</seealso>.
+ </p>
+ <p>Each channel is an isolated "pipe" between a client-side process and a server-side process. Thoose process
+ pairs could handle for example file transfers (sftp) or remote command execution (shell, exec and/or cli).
+ If a custom shell is implemented, the user of the client could execute the special commands remotely. Note that
+ the user is not necessarily a human but probably a system interfacing the SSH app.
+ </p>
+ <p>A server-side subssystem (channel) server is requested by the client with
+ <seealso marker="ssh_connection#subsystem/4">ssh_connection:subsystem/4</seealso>.
+ </p>
+ <p>A server (daemon) is started with
+ <seealso marker="#daemon/2">daemon/1</seealso>,
+ <seealso marker="#daemon/2">daemon/2</seealso> or
+ <seealso marker="#daemon/2">daemon/3</seealso>.
+ Possible channel handlers (subsystems) are declared with the
+ <seealso marker="#type-subsystem_daemon_option">subsystem</seealso> option when the daemon is started.
+ </p>
+ <p>To just run a shell on a remote machine, there are functions that bundles the needed
+ three steps needed into one:
+ <seealso marker="#shell/1">shell/1,2,3</seealso>.
+ Similarily, to just open an sftp (file transfer) connection to a remote machine, the simplest way is to use
+ <seealso marker="ssh_sftp#start_channel/1">ssh_sftp:start_channel/1,2,3</seealso>.
+ </p>
+ <p>To write your own client channel handler, use the behaviour
+ <seealso marker="ssh_client_channel">ssh_client_channel</seealso>. For server channel handlers use
+ <seealso marker="ssh_server_channel">ssh_server_channel</seealso> behaviour (replaces ssh_daemon_channel).
+ </p>
+ <p>Both clients and daemons accepts options that controls the exact behaviour. Some options are common to both.
+ The three sets are called
+ <seealso marker="#type-client_options">Client Options</seealso>,
+ <seealso marker="#type-daemon_options">Daemon Options</seealso> and
+ <seealso marker="#type-common_options">Common Options</seealso>.
+ </p>
+ <p>The descriptions of the options uses the
+ <seealso marker="doc/reference_manual:typespec">Erlang Type Language</seealso> with explaining text.
+ </p>
+ <note>
+ <p>The <seealso marker="users_guide">User's Guide</seealso> has examples and a
+ <seealso marker="using_ssh">Getting Started</seealso>
+ section.
+ </p>
+ </note>
</description>
<section>
- <title>OPTIONS</title>
- <p>The exact behaviour of some functions can be adjusted with the use of options which are documented together
- with the functions. Generally could each option be used at most one time in each function call. If given two or more
- times, the effect is not predictable unless explicitly documented.</p>
- <p>The options are of different kinds:</p>
- <taglist>
- <tag>Limits</tag>
- <item><p>which alters limits in the system, for example number of simultaneous login attempts.</p></item>
-
- <tag>Timeouts</tag>
- <item><p>which give some defined behaviour if too long time elapses before a given event or action,
- for example time to wait for an answer.</p></item>
-
- <tag>Callbacks</tag>
- <item><p>which gives the caller of the function the possibility to execute own code on some events,
- for example calling an own logging function or to perform an own login function</p></item>
-
- <tag>Behaviour</tag>
- <item><p>which changes the systems behaviour.</p></item>
- </taglist>
- </section>
-
- <section>
- <title>DATA TYPES</title>
- <p>Type definitions that are used more than once in
- this module, or abstractions to indicate the intended use of the data
- type, or both:</p>
- <taglist>
- <tag><c>boolean() =</c></tag>
- <item><p><c>true | false</c></p></item>
-
- <tag><c>string() =</c></tag>
- <item><p><c>[byte()]</c></p></item>
-
- <tag><c>ssh_daemon_ref() =</c></tag>
- <item><p>opaque() -
- as returned by <c>ssh:daemon/[1,2,3]</c></p></item>
-
- <tag><c>ok_error(OKtype) = </c></tag>
- <item><p><c>{ok,OKtype} | {error, term()}</c></p></item>
-
- <tag><c>ok_error() = </c></tag>
- <item><p><c>ok_error(term())</c></p></item>
-
- <tag><c>ssh_connection_ref() =</c></tag>
- <item><p>opaque() - as returned by <c>ssh:connect/3</c></p></item>
-
- <tag><c>ip_address() =</c></tag>
- <item><p><c>inet::ip_address()</c></p></item>
-
- <tag><c>port_number() =</c></tag>
- <item><p><c>inet::port_number()</c></p></item>
-
- <tag><c>subsystem_spec() =</c></tag>
- <item><p><c>{subsystem_name(),
- {channel_callback(), channel_init_args()}}</c></p></item>
-
- <tag><c>subsystem_name() =</c></tag>
- <item><p><c>string()</c></p></item>
-
- <tag><c>channel_callback() =</c></tag>
- <item><p><c>atom()</c> - Name of the Erlang module
- implementing the subsystem using the <c>ssh_channel</c> behavior, see
- <seealso marker="ssh_channel">ssh_channel(3)</seealso></p></item>
+ <title>Keys and files</title>
+ <p>A number of objects must be present for the SSH application to work.
+ Thoose objects are per default stored in files.
+ The default names, paths and file formats are the same as for
+ <url href="http://www.openssh.com">OpenSSH</url>. Keys could be generated with the <c>ssh-keygen</c>
+ program from OpenSSH. See the
+ <seealso marker="using_ssh#running-an-erlang-ssh-daemon">User's Guide</seealso>.
+ </p>
+
+ <p>The paths could easily be changed by options:
+ <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso> and
+ <seealso marker="#type-system_dir_daemon_option"><c>system_dir</c></seealso>.
+ </p>
+ <p>A completly different storage could be interfaced by writing call-back modules
+ using the behaviours
+ <seealso marker="ssh_client_key_api">ssh_client_key_api</seealso> and/or
+ <seealso marker="ssh_server_key_api">ssh_server_key_api</seealso>.
+ A callback module is installed with the option
+ <seealso marker="#type-key_cb_common_option"><c>key_cb</c></seealso>
+ to the client and/or the daemon.
+ </p>
+
+ <section>
+ <title>Daemons</title>
+ <p>The keys are by default stored in files:</p>
+ <list>
+ <item>Mandatory: one or more <i>Host key(s)</i>, both private and public. Default is to
+ store them in the directory <c>/etc/ssh</c> in the files
+ <list>
+ <item><c>ssh_host_dsa_key</c> and <c>ssh_host_dsa_key.pub</c></item>
+ <item><c>ssh_host_rsa_key</c> and <c>ssh_host_rsa_key.pub</c></item>
+ <item><c>ssh_host_ecdsa_key</c> and <c>ssh_host_ecdsa_key.pub</c></item>
+ </list>
+ <p>The host keys directory could be changed with the option
+ <seealso marker="#type-system_dir_daemon_option"><c>system_dir</c></seealso>.</p>
+ </item>
+ <item>Optional: one or more <i>User's public key</i> in case of <c>publickey</c> authorization.
+ Default is to store them concatenated in the file <c>.ssh/authorized_keys</c> in the user's home directory.
+ <p>The user keys directory could be changed with the option
+ <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso>.</p>
+ </item>
+ </list>
+ </section>
+
+ <section>
+ <title>Clients</title>
+ <p>The keys and some other data are by default stored in files in the directory <c>.ssh</c>
+ in the user's home directory.</p>
+ <p>The directory could be changed with the option
+ <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso>.
+ </p>
+ <list>
+ <item>Optional: a list of <i>Host public key(s)</i> for previously connected hosts. This list
+ is handled by the SSH application without any need of user assistance. The default
+ is to store them in the file <c>known_hosts</c>.
+ <p>The
+ <seealso marker="#type-host_accepting_client_options">host_accepting_client_options()</seealso>
+ are associated with this list of keys.
+ </p>
+ </item>
+ <item>Optional: one or more <i>User's private key(s)</i> in case of <c>publickey</c> authorization.
+ The default files are
+ <list>
+ <item><c>id_dsa</c> and <c>id_dsa.pub</c></item>
+ <item><c>id_rsa</c> and <c>id_rsa.pub</c></item>
+ <item><c>id_ecdsa</c> and <c>id_ecdsa.pub</c></item>
+ </list>
+ </item>
+ </list>
+ </section>
- <tag><c>key_cb() =</c></tag>
- <item>
- <p><c>atom() | {atom(), list()}</c></p>
- <p><c>atom()</c> - Name of the erlang module implementing the behaviours
- <seealso marker="ssh_client_key_api">ssh_client_key_api</seealso> or
- <seealso marker="ssh_server_key_api">ssh_server_key_api</seealso> as the
- case maybe.</p>
- <p><c>list()</c> - List of options that can be passed to the callback module.</p>
- </item>
+ </section>
- <tag><c>channel_init_args() =</c></tag>
- <item><p><c>list()</c></p></item>
+ <!--
+ ================================================================
+ = Data types =
+ ================================================================
+ -->
- <tag><c>algs_list() =</c></tag>
- <item><p><c>list( alg_entry() )</c></p></item>
+ <datatypes>
- <tag><c>alg_entry() =</c></tag>
- <item><p><c>{kex, simple_algs()} | {public_key, simple_algs()} | {cipher, double_algs()} | {mac, double_algs()} | {compression, double_algs()}</c></p></item>
+ <datatype_title>Client Options</datatype_title>
- <tag><c>simple_algs() =</c></tag>
- <item><p><c>list( atom() )</c></p></item>
-
- <tag><c>double_algs() =</c></tag>
- <item><p><c>[{client2serverlist,simple_algs()},{server2client,simple_algs()}] | simple_algs()</c></p></item>
+ <datatype>
+ <name name="client_options"/>
+ <name name="client_option"/>
+ <desc>
+ <p>Options for <seealso marker="#connect/3">clients</seealso>.
+ The individual options are further explained below or by following the hyperlinks.
+ </p>
+ </desc>
+ </datatype>
- <tag><c>modify_algs_list() =</c></tag>
- <item><p><c>list( {append,algs_list()} | {prepend,algs_list()} | {rm,algs_list()} )</c></p></item>
- </taglist>
-</section>
+ <datatype>
+ <name name="pref_public_key_algs_client_option"/>
+ <desc>
+ <p>List of user (client) public key algorithms to try to use.</p>
+ <p>The default value is the <c>public_key</c> entry in the list returned by
+ <seealso marker="#default_algorithms/0">ssh:default_algorithms/0</seealso>.
+ </p>
+ <p>If there is no public key of a specified type available, the corresponding entry is ignored.
+ Note that the available set is dependent on the underlying cryptolib and current user's public keys.
+ </p>
+ <p>See also the option <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso>
+ for specifying the path to the user's keys.
+ </p>
+ </desc>
+ </datatype>
- <funcs>
-
- <func>
- <name>close(ConnectionRef) -> ok </name>
- <fsummary>Closes an SSH connection.</fsummary>
- <type>
- <v>ConnectionRef = ssh_connection_ref()</v>
- </type>
- <desc><p>Closes an SSH connection.</p>
+ <datatype>
+ <name name="pubkey_passphrase_client_options"/>
+ <desc>
+ <p>If the user's DSA, RSA or ECDSA key is protected by a passphrase, it can be
+ supplied with thoose options.
+ </p>
</desc>
- </func>
-
- <func>
- <name>connect(Host, Port, Options) -> </name>
- <name>connect(Host, Port, Options, Timeout) -> </name>
- <name>connect(TcpSocket, Options) -> </name>
- <name>connect(TcpSocket, Options, Timeout) ->
- {ok, ssh_connection_ref()} | {error, Reason}</name>
- <fsummary>Connects to an SSH server.</fsummary>
- <type>
- <v>Host = string()</v>
- <v>Port = integer()</v>
- <d><c><![CDATA[22]]></c> is default, the assigned well-known port
- number for SSH.</d>
- <v>Options = [{Option, Value}]</v>
- <v>Timeout = infinity | integer()</v>
- <d>Negotiation time-out in milli-seconds. The default value is <c>infinity</c>.
- For connection time-out, use option <c>{connect_timeout, timeout()}</c>.</d>
- <v>TcpSocket = port()</v>
- <d>The socket is supposed to be from <seealso marker="kernel:gen_tcp#connect-3">gen_tcp:connect</seealso> or <seealso marker="kernel:gen_tcp#accept-1">gen_tcp:accept</seealso> with option <c>{active,false}</c></d>
- </type>
+ </datatype>
+
+ <datatype>
+ <name name="host_accepting_client_options"/>
+ <name name="accept_hosts"/>
+ <name name="fp_digest_alg"/>
+ <name name="accept_callback"/>
+ <name name="fingerprint"/>
<desc>
- <p>Connects to an SSH server. No channel is started. This is done
- by calling
- <seealso marker="ssh_connection#session_channel/2">
- ssh_connection:session_channel/[2, 4]</seealso>.</p>
- <p>Options:</p>
<taglist>
- <tag><c><![CDATA[{inet, inet | inet6}]]></c></tag>
- <item>
- <p>IP version to use.</p>
- </item>
- <tag><marker id="opt_user_dir"></marker><c><![CDATA[{user_dir, string()}]]></c></tag>
- <item>
- <p>Sets the user directory, that is, the directory containing
- <c>ssh</c> configuration files for the user, such as
- <c><![CDATA[known_hosts]]></c>, <c><![CDATA[id_rsa,
- id_dsa]]></c>, and
- <c><![CDATA[authorized_key]]></c>. Defaults to the
- directory normally referred to as
- <c><![CDATA[~/.ssh]]></c>.</p>
- </item>
- <tag><c><![CDATA[{dsa_pass_phrase, string()}]]></c></tag>
- <item>
- <p>If the user DSA key is protected by a passphrase, it can be
- supplied with this option.
- </p>
- </item>
- <tag><c><![CDATA[{rsa_pass_phrase, string()}]]></c></tag>
- <item>
- <p>If the user RSA key is protected by a passphrase, it can be
- supplied with this option.
- </p>
- </item>
- <tag><c><![CDATA[{ecdsa_pass_phrase, string()}]]></c></tag>
- <item>
- <p>If the user ECDSA key is protected by a passphrase, it can be
- supplied with this option.
- </p>
- </item>
- <tag>
- <c><![CDATA[{silently_accept_hosts, boolean()}]]></c> <br/>
- <c><![CDATA[{silently_accept_hosts, CallbackFun}]]></c> <br/>
- <c><![CDATA[{silently_accept_hosts, {HashAlgoSpec, CallbackFun} }]]></c> <br/>
- <br/>
- <c><![CDATA[HashAlgoSpec = crypto:digest_type() | [ crypto:digest_type() ] ]]></c><br/>
- <c><![CDATA[CallbackFun = fun(PeerName, FingerPrint) -> boolean()]]></c><br/>
- <c><![CDATA[PeerName = string()]]></c><br/>
- <c><![CDATA[FingerPrint = string() | [ string() ] ]]></c>
- </tag>
+ <tag><c>silently_accept_hosts</c></tag>
<item>
- <p>This option guides the <c>connect</c> function how to act when the connected server presents a Host
+ <p>This option guides the <c>connect</c> function on how to act when the connected server presents a Host
Key that the client has not seen before. The default is to ask the user with a question on stdio of whether to
accept or reject the new Host Key.
- See also the option <seealso marker="#opt_user_dir"><c>user_dir</c></seealso>
- for the path to the file <c>known_hosts</c> where previously accepted Host Keys are recorded.
- </p>
+ See the option <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso>
+ for specifying the path to the file <c>known_hosts</c> where previously accepted Host Keys are recorded.
+ See also the option
+ <seealso marker="#type-key_cb_common_option">key_cb</seealso>
+ for the general way to handle keys.
+ </p>
<p>The option can be given in three different forms as seen above:</p>
<list>
- <item>The value is a <c>boolean()</c>. The value <c>true</c> will make the client accept any unknown
- Host Key without any user interaction. The value <c>false</c> keeps the default behaviour of asking the
- the user on stdio.
+ <item>The value is a <c>boolean()</c>.
+ The value <c>true</c> will make the client accept any unknown Host Key without any user interaction.
+ The value <c>false</c> preserves the default behaviour of asking the user on stdio.
</item>
- <item>A <c>CallbackFun</c> will be called and the boolean return value <c>true</c> will make the client
- accept the Host Key. A return value of <c>false</c> will make the client to reject the Host Key and therefore
- also the connection will be closed. The arguments to the fun are:
+ <item>An <c>accept_callback()</c> will be called and the boolean return value <c>true</c>
+ will make the client
+ accept the Host Key. A return value of <c>false</c> will make the client to reject the Host Key and as a
+ result the connection will be closed. The arguments to the fun are:
<list type="bulleted">
<item><c>PeerName</c> - a string with the name or address of the remote host.</item>
<item><c>FingerPrint</c> - the fingerprint of the Host Key as
@@ -236,532 +244,335 @@
</item>
</list>
</item>
- <item>A tuple <c>{HashAlgoSpec, CallbackFun}</c>. The <c>HashAlgoSpec</c> specifies which hash algorithm
- shall be used to calculate the fingerprint used in the call of the <c>CallbackFun</c>. The <c>HashALgoSpec</c>
- is either an atom or a list of atoms as the first argument in
- <seealso marker="public_key:public_key#ssh_hostkey_fingerprint-2">public_key:ssh_hostkey_fingerprint/2</seealso>.
- If it is a list of hash algorithm names, the <c>FingerPrint</c> argument in the <c>CallbackFun</c> will be
- a list of fingerprints in the same order as the corresponding name in the <c>HashAlgoSpec</c> list.
+ <item>A tuple <c>{HashAlgoSpec, accept_callback}</c>. The <c>HashAlgoSpec</c>
+ specifies which hash algorithm
+ shall be used to calculate the fingerprint used in the call of the <c>accept_callback()</c>.
+ The <c>HashALgoSpec</c>
+ is either an atom or a list of atoms as the first argument in
+ <seealso marker="public_key:public_key#ssh_hostkey_fingerprint-2">public_key:ssh_hostkey_fingerprint/2</seealso>.
+ If it is a list of hash algorithm names, the <c>FingerPrint</c> argument in the
+ <c>accept_callback()</c> will be
+ a list of fingerprints in the same order as the corresponding name in the <c>HashAlgoSpec</c> list.
</item>
</list>
</item>
-
- <tag><c><![CDATA[{save_accepted_host, boolean()}]]></c></tag>
- <item>
- <p>If <c>true</c>, the client saves an accepted host key to avoid the
- accept question the next time the same host is connected. If the option
- <c>key_cb</c> is not present, the key is saved in the file "known_hosts".
- </p>
- <p>If <c>false</c>, the key is not saved and the key will still be unknown
- at the next access of the same host.
- </p>
- </item>
-
- <tag><c><![CDATA[{user_interaction, boolean()}]]></c></tag>
+
+ <tag><c>user_interaction</c></tag>
<item>
<p>If <c>false</c>, disables the client to connect to the server
if any user interaction is needed, such as accepting
the server to be added to the <c>known_hosts</c> file, or
- supplying a password. Defaults to <c>true</c>.
- Even if user interaction is allowed it can be
+ supplying a password.</p>
+ <p>Even if user interaction is allowed it can be
suppressed by other options, such as <c>silently_accept_hosts</c>
and <c>password</c>. However, those options are not always desirable
to use from a security point of view.</p>
+ <p>Defaults to <c>true</c>.</p>
</item>
- <tag><c><![CDATA[{disconnectfun, fun(Reason:term()) -> _}]]></c></tag>
- <item>
- <p>Provides a fun to implement your own logging when a server disconnects the client.</p>
- </item>
-
- <tag><c><![CDATA[{unexpectedfun, fun(Message:term(), Peer) -> report | skip }]]></c></tag>
- <item>
- <p>Provides a fun to implement your own logging or other action when an unexpected message arrives.
- If the fun returns <c>report</c> the usual info report is issued but if <c>skip</c> is returned no
- report is generated.</p>
- <p><c>Peer</c> is in the format of <c>{Host,Port}</c>.</p>
- </item>
-
- <tag><c><![CDATA[{pref_public_key_algs, list()}]]></c></tag>
- <item>
- <p>List of user (client) public key algorithms to try to use.</p>
- <p>The default value is the <c>public_key</c> entry in
- <seealso marker="#default_algorithms/0">ssh:default_algorithms/0</seealso>.
- </p>
- <p>If there is no public key of a specified type available, the corresponding entry is ignored.
- Note that the available set is dependent on the underlying cryptolib and current user's public keys.
- </p>
- </item>
-
- <tag><marker id="option_preferred_algorithms"></marker>
- <c><![CDATA[{preferred_algorithms, algs_list()}]]></c></tag>
+ <tag><c>save_accepted_host</c></tag>
<item>
- <p>List of algorithms to use in the algorithm negotiation. The default <c>algs_list()</c> can
- be obtained from <seealso marker="#default_algorithms/0">default_algorithms/0</seealso>.
+ <p>If <c>true</c>, the client saves an accepted host key to avoid the
+ accept question the next time the same host is connected. If the option
+ <seealso marker="#type-key_cb_common_option"><c>key_cb</c></seealso>
+ is not present, the key is saved in the file "known_hosts". See option
+ <seealso marker="#type-user_dir_common_option"><c>user_dir</c></seealso> for
+ the location of that file.
</p>
- <p>If an alg_entry() is missing in the algs_list(), the default value is used for that entry.</p>
- <p>Here is an example of this option:</p>
- <code>
-{preferred_algorithms,
- [{public_key,['ssh-rsa','ssh-dss']},
- {cipher,[{client2server,['aes128-ctr']},
- {server2client,['aes128-cbc','3des-cbc']}]},
- {mac,['hmac-sha2-256','hmac-sha1']},
- {compression,[none,zlib]}
- ]
-}
-</code>
- <p>The example specifies different algorithms in the two directions (client2server and server2client),
- for cipher but specifies the same algorithms for mac and compression in both directions.
- The kex (key exchange) is implicit but public_key is set explicitly.</p>
-
- <p>For background and more examples see the <seealso marker="configure_algos#introduction">User's Guide</seealso>.</p>
-
- <warning>
- <p>Changing the values can make a connection less secure. Do not change unless you
- know exactly what you are doing. If you do not understand the values then you
- are not supposed to change them.</p>
- </warning>
- </item>
-
- <tag><marker id="option_modify_algorithms"></marker>
- <c><![CDATA[{modify_algorithms, modify_algs_list()}]]></c></tag>
- <item>
- <p>Modifies the list of algorithms to use in the algorithm negotiation. The modifications are
- applied after the option <c>preferred_algorithms</c> (if existing) is applied.</p>
- <p>The algoritm for modifications works like this:</p>
- <list>
- <item>
- <p>Input is the <c>modify_algs_list()</c> and a set of algorithms <c>A</c>
- obtained from the <c>preferred_algorithms</c> option if existing, or else from the
- <seealso marker="ssh#default_algorithms-0">ssh:default_algorithms/0</seealso>.
- </p>
- </item>
- <item>
- <p>The head of the <c>modify_algs_list()</c> modifies <c>A</c> giving the result <c>A'</c>.</p>
- <p>The possible modifications are:</p>
- <list>
- <item>
- <p>Append or prepend supported but not enabled algorithm(s) to the list of
- algorithms. If the wanted algorithms already are in <c>A</c> they will first
- be removed and then appended or prepended,
- </p>
- </item>
- <item>
- <p>Remove (rm) one or more algorithms from <c>A</c>.
- </p>
- </item>
- </list>
- </item>
- <item>
- <p>Repeat the modification step with the tail of <c>modify_algs_list()</c> and the resulting
- <c>A'</c>.
- </p>
- </item>
- </list>
- <p>If an unsupported algorithm is in the <c>modify_algs_list()</c>, it will be silently ignored</p>
- <p>If there are more than one modify_algorithms options, the result is undefined.</p>
- <p>Here is an example of this option:</p>
- <code>
-{modify_algorithms,
- [{prepend, [{kex, ['diffie-hellman-group1-sha1']}],
- {rm, [{compression, [none]}]}
- ]
-}
-</code>
- <p>The example specifies that:</p>
- <list>
- <item><p>the old key exchange algorithm 'diffie-hellman-group1-sha1' should be
- the main alternative. It will be the main alternative since it is prepened to the list</p>
- </item>
- <item><p>The compression algorithm none (= no compression) is removed so compression is enforced</p>
- </item>
- </list>
- <p>For background and more examples see the <seealso marker="configure_algos#introduction">User's Guide</seealso>.</p>
- </item>
-
- <tag><c><![CDATA[{dh_gex_limits,{Min=integer(),I=integer(),Max=integer()}}]]></c></tag>
- <item>
- <p>Sets the three diffie-hellman-group-exchange parameters that guides the connected server in choosing a group.
- See RFC 4419 for the function of thoose. The default value is <c>{1024, 6144, 8192}</c>.
+ <p>If <c>false</c>, the key is not saved and the key will still be unknown
+ at the next access of the same host.
</p>
+ <p>Defaults to <c>true</c></p>
</item>
- <tag><c><![CDATA[{connect_timeout, timeout()}]]></c></tag>
+ <tag><c>quiet_mode</c></tag>
<item>
- <p>Sets a time-out on the transport layer
- connection. For <c>gen_tcp</c> the time is in milli-seconds and the default value is
- <c>infinity</c>.</p>
+ <p>If <c>true</c>, the client does not print anything on authorization.</p>
+ <p>Defaults to <c>false</c></p>
</item>
+ </taglist>
+ </desc>
+ </datatype>
- <tag><c><![CDATA[{auth_methods, string()}]]></c></tag>
- <item>
- <p>Comma-separated string that determines which
- authentication methods that the client shall support and
- in which order they are tried. Defaults to
- <c><![CDATA["publickey,keyboard-interactive,password"]]></c></p>
- </item>
-
- <tag><c><![CDATA[{user, string()}]]></c></tag>
+ <datatype>
+ <name name="authentication_client_options"/>
+ <desc>
+ <taglist>
+ <tag><c>user</c></tag>
<item>
- <p>Provides a username. If this option is not given, <c>ssh</c>
+ <p>Provides the username. If this option is not given, <c>ssh</c>
reads from the environment (<c><![CDATA[LOGNAME]]></c> or
<c><![CDATA[USER]]></c> on UNIX,
<c><![CDATA[USERNAME]]></c> on Windows).</p>
</item>
- <tag><c><![CDATA[{password, string()}]]></c></tag>
+ <tag><c>password</c></tag>
<item>
<p>Provides a password for password authentication.
If this option is not given, the user is asked for a
password, if the password authentication method is
attempted.</p>
</item>
-
- <!--tag><c><![CDATA[{send_ext_info, boolean()}]]></c></tag>
- <item>
- <p>Send a list of extensions to the server if the server has asked for it. See
- <url href="https://tools.ietf.org/html/draft-ietf-curdle-ssh-ext-info">Draft-ietf-curdle-ssh-ext-info (work in progress)</url> for details.
- </p>
- <p>Currently the client do not react on any extensions.
- </p>
- <p>Default value is <c>true</c>.
- </p>
- </item-->
-
- <tag><c><![CDATA[{recv_ext_info, boolean()}]]></c></tag>
- <item>
- <p>Tell the server that the client accepts extension negotiation. See
- <url href="https://tools.ietf.org/html/draft-ietf-curdle-ssh-ext-info">Draft-ietf-curdle-ssh-ext-info (work in progress)</url> for details.
- </p>
- <p>Currently implemented extension is <c>server-sig-algs</c> which is the list of the server's preferred
- user's public key algorithms.
- </p>
- <p>Default value is <c>true</c>.
- </p>
- </item>
-
- <tag><c><![CDATA[{key_cb, key_cb()}]]></c></tag>
- <item>
- <p>Module implementing the behaviour <seealso
- marker="ssh_client_key_api">ssh_client_key_api</seealso>. Can be used to
- customize the handling of public keys. If callback options are provided
- along with the module name, they are made available to the callback
- module via the options passed to it under the key 'key_cb_private'.
- </p>
- </item>
-
- <tag><c><![CDATA[{quiet_mode, atom() = boolean()}]]></c></tag>
- <item>
- <p>If <c>true</c>, the client does not print anything on authorization.</p>
- </item>
-
- <tag><c><![CDATA[{id_string, random | string()}]]></c></tag>
- <item>
- <p>The string that the client presents to a connected server initially. The default value is "Erlang/VSN" where VSN is the ssh application version number.
- </p>
- <p>The value <c>random</c> will cause a random string to be created at each connection attempt. This is to make it a bit more difficult for a malicious peer to find the ssh software brand and version.
- </p>
- </item>
-
- <tag><c><![CDATA[{fd, file_descriptor()}]]></c></tag>
- <item>
- <p>Allows an existing file descriptor to be used
- (by passing it on to the transport protocol).</p></item>
- <tag><c><![CDATA[{rekey_limit, integer()}]]></c></tag>
- <item>
- <p>Provides, in bytes, when rekeying is to be initiated.
- Defaults to once per each GB and once per hour.</p>
- </item>
- <tag><c><![CDATA[{idle_time, integer()}]]></c></tag>
- <item>
- <p>Sets a time-out on a connection when no channels are active.
- Defaults to <c>infinity</c>.</p></item>
- <tag><c><![CDATA[{ssh_msg_debug_fun, fun(ConnectionRef::ssh_connection_ref(), AlwaysDisplay::boolean(), Msg::binary(), LanguageTag::binary()) -> _}]]></c></tag>
- <item>
- <p>Provide a fun to implement your own logging of the SSH message SSH_MSG_DEBUG. The last three parameters are from the message, see RFC4253, section 11.3. The <c>ConnectionRef</c> is the reference to the connection on which the message arrived. The return value from the fun is not checked.</p>
- <p>The default behaviour is ignore the message.
- To get a printout for each message with <c>AlwaysDisplay = true</c>, use for example <c>{ssh_msg_debug_fun, fun(_,true,M,_)-> io:format("DEBUG: ~p~n", [M]) end}</c></p>
- </item>
-
</taglist>
- </desc>
- </func>
+ </desc>
+ </datatype>
- <func>
- <name>connection_info(ConnectionRef, [Option]) ->[{Option,
- Value}]</name>
- <fsummary>Retrieves information about a connection.</fsummary>
- <type>
- <v>Option = client_version | server_version | user | peer | sockname </v>
- <v>Value = [option_value()] </v>
- <v>option_value() = {{Major::integer(), Minor::integer()}, VersionString::string()} |
- User::string() | Peer::{inet:hostname(), {ip_address(), port_number()}} |
- Sockname::{ip_address(), port_number()}</v>
- </type>
+ <datatype>
+ <name name="diffie_hellman_group_exchange_client_option"/>
<desc>
- <p>Retrieves information about a connection.</p>
+ <p>Sets the three diffie-hellman-group-exchange parameters that guides the connected server in choosing a group.
+ See
+ <url href="https://tools.ietf.org/html/rfc4419">RFC 4419</url>
+ for the details. The default value is <c>{1024, 6144, 8192}</c>.
+ </p>
</desc>
- </func>
-
- <func>
- <name>daemon(Port) -> </name>
- <name>daemon(Port, Options) -> </name>
- <name>daemon(HostAddress, Port, Options) -> </name>
- <name>daemon(TcpSocket) -> </name>
- <name>daemon(TcpSocket, Options) -> {ok, ssh_daemon_ref()} | {error, atom()}</name>
- <fsummary>Starts a server listening for SSH connections
- on the given port.</fsummary>
- <type>
- <v>Port = integer()</v>
- <v>HostAddress = ip_address() | any | loopback</v>
- <v>Options = [{Option, Value}]</v>
- <v>Option = atom()</v>
- <v>Value = term()</v>
- <v>TcpSocket = port()</v>
- <d>The socket is supposed to be from <seealso marker="kernel:gen_tcp#connect-3">gen_tcp:connect</seealso> or <seealso marker="kernel:gen_tcp#accept-1">gen_tcp:accept</seealso> with option <c>{active,false}</c></d>
- </type>
- <desc>
- <p>Starts a server listening for SSH connections on the given
- port. If the <c>Port</c> is 0, a random free port is selected. See
- <seealso marker="#daemon_info/1">daemon_info/1</seealso> about how to find the selected port number.</p>
-
- <p>Please note that by historical reasons both the <c>HostAddress</c> argument and the inet socket option
- <c>ip</c> set the listening address. This is a source of possible inconsistent settings.</p>
+ </datatype>
- <p>The rules for handling the two address passing options are:</p>
- <list>
- <item>if <c>HostAddress</c> is an IP-address, that IP-address is the listening address.
- An 'ip'-option will be discarded if present.</item>
+ <datatype>
+ <name name="connect_timeout_client_option"/>
+ <desc>
+ <p>Sets a timeout on the transport layer connect time.
+ For <seealso marker="kernel:gen_tcp"><c>gen_tcp</c></seealso> the time is in milli-seconds and the default
+ value is <c>infinity</c>.
+ </p>
+ <p>See the parameter <c>Timeout</c> in <seealso marker="#connect/4">connect/4</seealso> for
+ a timeout of the negotiation phase.
+ </p>
+ </desc>
+ </datatype>
- <item>if <c>HostAddress</c> is <c>loopback</c>, the listening address
- is <c>loopback</c> and an loopback address will be choosen by the underlying layers.
- An 'ip'-option will be discarded if present.</item>
+ <datatype>
+ <name name="recv_ext_info_client_option"/>
+ <desc>
+ <p>Make the client tell the server that the client accepts extension negotiation, that is,
+ include <c>ext-info-c</c> in the kexinit message sent. See
+ <url href="https://tools.ietf.org/html/rfc8308">RFC 8308</url>
+ for details and <seealso marker="SSH_app#supported-ext-info">ssh(6)</seealso>
+ for a list of currently implemented extensions.
+ </p>
+ <p>
+ Default value is <c>true</c> which is compatible with other implementations not supporting ext-info.
+ </p>
+ </desc>
+ </datatype>
- <item>if <c>HostAddress</c> is <c>any</c> and no 'ip'-option is present, the listening address is
- <c>any</c> and the socket will listen to all addresses</item>
+ <!--................................................................-->
+ <datatype_title>Daemon Options (Server Options)</datatype_title>
- <item>if <c>HostAddress</c> is <c>any</c> and an 'ip'-option is present, the listening address is
- set to the value of the 'ip'-option</item>
- </list>
+ <datatype>
+ <name name="daemon_options"/>
+ <name name="daemon_option"/>
+ <desc>
+ <p>Options for <seealso marker="#daemon/1">daemons</seealso>.
+ The individual options are further explained below or by following the hyperlinks.
+ </p>
+ </desc>
+ </datatype>
- <p>Options:</p>
- <taglist>
- <tag><c><![CDATA[{inet, inet | inet6}]]></c></tag>
- <item><p>IP version to use when the host address is specified as <c>any</c>.</p></item>
- <tag><c><![CDATA[{subsystems, [subsystem_spec()]}]]></c></tag>
- <item>
- <p>Provides specifications for handling of subsystems. The
- "sftp" subsystem specification is retrieved by calling
- <c>ssh_sftpd:subsystem_spec/1</c>. If the subsystems option is
- not present, the value of
- <c>[ssh_sftpd:subsystem_spec([])]</c> is used.
- The option can be set to the empty list if
- you do not want the daemon to run any subsystems.</p>
- </item>
+
+ <datatype>
+ <name name="subsystem_daemon_option"/>
+ <name name="subsystem_spec"/>
+ <desc>
+ <p>Defines a subsystem in the daemon.</p>
+ <p>The <c>subsystem_name</c> is the name that a client requests to start with for example
+ <seealso marker="ssh_connection#subsystem/4">ssh_connection:subsystem/4</seealso>.
+ </p>
+ <p>The <c>channel_callback</c> is the module that implements the
+ <seealso marker="ssh_server_channel">ssh_server_channel</seealso> (replaces ssh_daemon_channel)
+ behaviour in the daemon. See the section
+ <seealso marker="using_ssh#usersguide_creating_a_subsystem">Creating a Subsystem</seealso>
+ in the User's Guide for more information and an example.
+ </p>
+ <p>If the subsystems option is not present, the value of <c>ssh_sftpd:subsystem_spec([])</c> is used.
+ This enables the sftp subsystem by default.
+ The option can be set to the empty list if you do not want the daemon to run any subsystems.</p>
+ </desc>
+ </datatype>
- <tag><marker id="daemon_opt_shell"/>
- <c><![CDATA[{shell, {Module, Function, Args} |
- fun(string() = User) - > pid() | fun(string() = User,
- ip_address() = PeerAddr) -> pid()}]]></c></tag>
+ <datatype>
+ <name name="shell_daemon_option"/>
+ <name name="'shell_fun/1'"/>
+ <name name="'shell_fun/2'"/>
+ <desc>
+ <p>Defines the read-eval-print loop used in a daemon when a shell is requested by the client.
+ The default is to use the Erlang shell: <c><![CDATA[{shell, start, []}]]></c>
+ </p>
+ <p>See the option <seealso marker="#type-exec_daemon_option"><c>exec</c></seealso>
+ for a description of how the daemon execute exec-requests depending on
+ the shell- and exec-options.</p>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="exec_daemon_option"/>
+ <name name="'exec_fun/1'"/>
+ <name name="'exec_fun/2'"/>
+ <name name="'exec_fun/3'"/>
+ <name name="exec_result"/>
+ <desc>
+ <p>This option changes how the daemon execute exec-requests from clients. The term in the return value
+ is formatted to a string if it is a non-string type. No trailing newline is added in the ok-case but in the
+ error case.</p>
+ <p>Error texts are returned on channel-type 1 which usually is piped to <c>stderr</c> on e.g Linux systems.
+ Texts from a successful execution will in similar manner be piped to <c>stdout</c>. The exit-status code
+ is set to 0 for success and -1 for errors. The exact results presented on the client side depends on the
+ client and the client's operating system.
+ </p>
+ <p>The option cooperates with the daemon-option <seealso marker="#type-shell_daemon_option"><c>shell</c></seealso>
+ in the following way:</p>
+ <taglist>
+ <tag>1. If the exec-option is present (the shell-option may or may not be present):</tag>
<item>
- <p>Defines the read-eval-print loop used when a shell is
- requested by the client. The default is to use the Erlang shell:
- <c><![CDATA[{shell, start, []}]]></c></p>
- <p>See the option <seealso marker="#daemon_opt_exec"><c>exec</c></seealso>
- for a description of how the daemon execute exec-requests depending on
- the shell- and exec-options.</p>
+ <p>The exec-option fun is called with the same number of parameters as the arity of the fun,
+ and the result is returned to the client.
+ </p>
</item>
-
- <tag><marker id="daemon_opt_exec"/>
- <c><![CDATA[{exec, {direct, exec_spec()}}]]></c>
- <br/><c>where:</c>
- <br/><c>exec_spec() = </c>
- <br/><c> fun(Cmd::string()) -> ok_error()</c>
- <br/><c> | fun(Cmd::string(), User::string()) -> ok_error()</c>
- <br/><c> | fun(Cmd::string(), User::string(), ClientAddr::{ip_address(), port_number()}) -> ok_error()</c>
- </tag>
+
+ <tag>2. If the exec-option is absent, but a shell-option is present with the default Erlang shell:</tag>
<item>
- <p>This option changes how the daemon execute exec-requests from clients. The term in <c>ok_error()</c>
- is formatted to a string if it is a non-string type. No trailing newline is added in the ok-case but in the
- error case.</p>
- <p>Error texts are returned on channel-type 1 which usually are piped to <c>stderr</c> on e.g Linux systems.
- Texts from a successful execution will in similar manner be piped to <c>stdout</c>. The exit-status code
- is set to 0 for success and -1 for errors. The exact results presented on the client side depends on the
- client.
- </p>
- <p>The option cooperates with the daemon-option <seealso marker="#daemon_opt_shell"><c>shell</c></seealso>
- in the following way:</p>
- <taglist>
- <tag>1. If the exec-option is present (the shell-option may or may not be present):</tag>
- <item>
- <p>The exec-option fun is called with the same number of parameters as the arity of the fun,
- and the result is returned to the client.
- </p>
- </item>
-
- <tag>2. If the exec-option is absent, but a shell-option is present with the default Erlang shell:</tag>
- <item>
- <p>The default Erlang evaluator is used and the result is returned to the client.</p>
- </item>
-
- <tag>3. If the exec-option is absent, but a shell-option is present that is not the default Erlang shell:</tag>
- <item>
- <p>The exec-request is not evaluated and an error message is returned to the client.</p>
- </item>
-
- <tag>4. If neither the exec-option nor the shell-option is present:</tag>
- <item>
- <p>The default Erlang evaluator is used and the result is returned to the client.</p>
- </item>
- </taglist>
- <p>If a custom CLI is installed (see the option <seealso marker="#daemon_opt_ssh_cli"><c>ssh_cli</c></seealso>)
- the rules above are replaced by thoose implied by the custom CLI.
- </p>
- <note>
- <p>The exec-option has existed for a long time but has not previously been documented. The old
- definition and behaviour are retained but obey the rules 1-4 above if conflicting.
- The old and undocumented style should not be used in new programs.</p>
- </note>
+ <p>The default Erlang evaluator is used and the result is returned to the client.</p>
</item>
-
- <tag><marker id="daemon_opt_ssh_cli"/>
- <c><![CDATA[{ssh_cli, {channel_callback(),
- channel_init_args()} | no_cli}]]></c></tag>
+
+ <tag>3. If the exec-option is absent, but a shell-option is present that is not the default Erlang shell:</tag>
<item>
- <p>Provides your own CLI implementation, that is, a channel callback
- module that implements a shell and command execution. The shell
- read-eval-print loop can be customized, using the
- option <seealso marker="#daemon_opt_shell"><c>shell</c></seealso>. This means less work than implementing
- an own CLI channel. If <c>ssh_cli</c> is set to <c>no_cli</c>, the CLI channels
- like <seealso marker="#daemon_opt_shell"><c>shell</c></seealso>
- and <seealso marker="#daemon_opt_exec"><c>exec</c></seealso>
- are disabled and only subsystem channels are allowed.</p>
+ <p>The exec-request is not evaluated and an error message is returned to the client.</p>
</item>
- <tag><c><![CDATA[{user_dir, string()}]]></c></tag>
+
+ <tag>4. If neither the exec-option nor the shell-option is present:</tag>
<item>
- <p>Sets the user directory. That is, the directory containing
- <c>ssh</c> configuration files for the user, such as
- <c><![CDATA[known_hosts]]></c>, <c><![CDATA[id_rsa,
- id_dsa]]></c>, and
- <c><![CDATA[authorized_key]]></c>. Defaults to the
- directory normally referred to as
- <c><![CDATA[~/.ssh]]></c>.</p>
+ <p>The default Erlang evaluator is used and the result is returned to the client.</p>
</item>
- <tag><c><![CDATA[{system_dir, string()}]]></c></tag>
+ </taglist>
+ <p>If a custom CLI is installed (see the option <seealso marker="#type-ssh_cli_daemon_option"><c>ssh_cli</c></seealso>)
+ the rules above are replaced by thoose implied by the custom CLI.
+ </p>
+ <note>
+ <p>The exec-option has existed for a long time but has not previously been documented. The old
+ definition and behaviour are retained but obey the rules 1-4 above if conflicting.
+ The old and undocumented style should not be used in new programs.</p>
+ </note>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="ssh_cli_daemon_option"/>
+ <desc>
+ <p>Provides your own CLI implementation in a daemon.</p>
+ <p>It is a channel callback module that implements a shell
+ and command execution. The shell's read-eval-print loop can be customized, using the
+ option <seealso marker="#type-shell_daemon_option"><c>shell</c></seealso>. This means less work than implementing
+ an own CLI channel. If <c>ssh_cli</c> is set to <c>no_cli</c>, the CLI channels
+ like <seealso marker="#type-shell_daemon_option"><c>shell</c></seealso>
+ and <seealso marker="#type-exec_daemon_option"><c>exec</c></seealso>
+ are disabled and only subsystem channels are allowed.</p>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="authentication_daemon_options"/>
+ <name name="prompt_texts"/>
+ <name name="kb_int_tuple"/>
+ <name name="kb_int_fun_3"/>
+ <name name="pwdfun_2"/>
+ <name name="pwdfun_4"/>
+ <desc>
+ <taglist>
+ <tag><marker id="type-system_dir_daemon_option"/><c>system_dir</c></tag>
<item>
<p>Sets the system directory, containing the host key files
that identify the host keys for <c>ssh</c>. Defaults to
- <c><![CDATA[/etc/ssh]]></c>. For security reasons,
- this directory is normally accessible only to the root user.</p>
+ <c>/etc/ssh</c>.</p>
+ <p>For security reasons, this directory is normally accessible only to the root user.</p>
+ <p>See also the option
+ <seealso marker="#type-key_cb_common_option">key_cb</seealso>
+ for the general way to handle keys.
+ </p>
</item>
- <tag><c><![CDATA[{auth_methods, string()}]]></c></tag>
+ <tag><c>auth_method_kb_interactive_data</c></tag>
<item>
- <p>Comma-separated string that determines which
- authentication methods that the server is to support and
- in what order they are tried. Defaults to
- <c><![CDATA["publickey,keyboard-interactive,password"]]></c></p>
- <p>Note that the client is free to use any order and to exclude methods.</p>
- </item>
-
- <tag><c><![CDATA[{auth_method_kb_interactive_data, PromptTexts}]]></c>
- <br/><c>where:</c>
- <br/><c>PromptTexts = kb_int_tuple() | fun(Peer::{IP::tuple(),Port::integer()}, User::string(), Service::string()) -> kb_int_tuple()</c>
- <br/><c>kb_int_tuple() = {Name::string(), Instruction::string(), Prompt::string(), Echo::boolean()}</c>
- </tag>
- <item>
- <p>Sets the text strings that the daemon sends to the client for presentation to the user when using <c>keyboar-interactive</c> authentication. If the fun/3 is used, it is called when the actual authentication occurs and may therefore return dynamic data like time, remote ip etc.</p>
+ <p>Sets the text strings that the daemon sends to the client for presentation to the user when
+ using <c>keyboard-interactive</c> authentication.</p>
+ <p>If the fun/3 is used, it is called when the actual authentication occurs and may therefore
+ return dynamic data like time, remote ip etc.</p>
<p>The parameter <c>Echo</c> guides the client about need to hide the password.</p>
<p>The default value is:
- <c>{auth_method_kb_interactive_data, {"SSH server", "Enter password for \""++User++"\"", "password: ", false}></c></p>
+ <c>{auth_method_kb_interactive_data, {"SSH server", "Enter password for \""++User++"\"", "password: ", false}></c>
+ </p>
</item>
- <tag><c><![CDATA[{user_passwords, [{string() = User,
- string() = Password}]}]]></c></tag>
+ <tag><c>user_passwords</c></tag>
<item>
- <p>Provides passwords for password authentication. The passwords
- are used when someone tries to connect to the server and
- public key user-authentication fails. The option provides
+ <p>Provides passwords for password authentication. The passwords are used when someone tries
+ to connect to the server and public key user-authentication fails. The option provides
a list of valid usernames and the corresponding passwords.
</p>
</item>
- <tag><c><![CDATA[{password, string()}]]></c></tag>
+
+ <tag><c>password</c></tag>
<item>
- <p>Provides a global password that authenticates any
- user. From a security perspective this option makes
- the server very vulnerable.</p>
+ <p>Provides a global password that authenticates any user.</p>
+ <warning>
+ <p>Intended to facilitate testing.</p>
+ <p>From a security perspective this option makes the server very vulnerable.</p>
+ </warning>
</item>
- <tag><c><![CDATA[{preferred_algorithms, algs_list()}]]></c></tag>
+ <tag><c>pwdfun</c> with <c>pwdfun_4()</c></tag>
<item>
- <p>List of algorithms to use in the algorithm negotiation. The default <c>algs_list()</c> can
- be obtained from <seealso marker="#default_algorithms/0">default_algorithms/0</seealso>.
+ <p>Provides a function for password validation. This could used for calling an external system or handeling
+ passwords stored as hash values.
</p>
- <p>If an alg_entry() is missing in the algs_list(), the default value is used for that entry.</p>
- <p>Here is an example of this option:</p>
- <code>
-{preferred_algorithms,
- [{public_key,['ssh-rsa','ssh-dss']},
- {cipher,[{client2server,['aes128-ctr']},
- {server2client,['aes128-cbc','3des-cbc']}]},
- {mac,['hmac-sha2-256','hmac-sha1']},
- {compression,[none,zlib]}
- ]
-}
-</code>
- <p>The example specifies different algorithms in the two directions (client2server and server2client),
- for cipher but specifies the same algorithms for mac and compression in both directions.
- The kex (key exchange) is implicit but public_key is set explicitly.</p>
-
- <p>For background and more examples see the <seealso marker="configure_algos#introduction">User's Guide</seealso>.</p>
+ <p>This fun can also be used to make delays in authentication tries for example by calling
+ <seealso marker="stdlib:timer#sleep/1">timer:sleep/1</seealso>.</p>
+ <p>To facilitate for instance counting of failed tries,
+ the <c>State</c> variable could be used. This state is per connection only. The first time the pwdfun
+ is called for a connection, the <c>State</c> variable has the value <c>undefined</c>.
+ </p>
+
+ <p>The fun should return:
+ </p>
+ <list type="bulleted">
+ <item><c>true</c> if the user and password is valid</item>
+ <item><c>false</c> if the user or password is invalid</item>
+ <item><c>disconnect</c> if a SSH_MSG_DISCONNECT message should be sent immediately. It will
+ be followed by a close of the underlying tcp connection.</item>
+ <item><c>{true, NewState:any()}</c> if the user and password is valid</item>
+ <item><c>{false, NewState:any()}</c> if the user or password is invalid</item>
+ </list>
- <warning>
- <p>Changing the values can make a connection less secure. Do not change unless you
- know exactly what you are doing. If you do not understand the values then you
- are not supposed to change them.</p>
- </warning>
+ <p>A third usage is to block login attempts from a missbehaving peer. The <c>State</c> described above
+ can be used for this. The return value <c>disconnect</c> is useful for this.</p>
</item>
- <tag><marker id="option_modify_algorithms"></marker>
- <c><![CDATA[{modify_algorithms, modify_algs_list()}]]></c></tag>
+ <tag><c>pwdfun</c> with <c>pwdfun_2()</c></tag>
<item>
- <p>Modifies the list of algorithms to use in the algorithm negotiation. The modifications are
- applied after the option <c>preferred_algorithms</c> is applied (if existing)</p>
- <p>The possible modifications are to:</p>
- <list>
- <item><p>Append or prepend supported but not enabled algorithm(s) to the list of
- algorithms.</p><p>If the wanted algorithms already are in the list of algorithms, they will first
- be removed and then appended or prepended.
- </p>
- </item>
- <item><p>Remove (rm) one or more algorithms from the list of algorithms.</p></item>
+ <p>Provides a function for password validation. This function is called with user and password
+ as strings, and returns:</p>
+ <list type="bulleted">
+ <item><c>true</c> if the user and password is valid</item>
+ <item><c>false</c> if the user or password is invalid</item>
</list>
- <p>If an unsupported algorithm is in the list, it will be silently ignored</p>
-
- <p>Here is an example of this option:</p>
- <code>
-{modify_algorithms,
- [{prepend, [{kex, ['diffie-hellman-group1-sha1']}],
- {rm, [{compression, [none]}]}
- ]
-}
-</code>
- <p>The example specifies that:</p>
- <list>
- <item><p>the old key exchange algorithm 'diffie-hellman-group1-sha1' should be
- the main alternative. It will be the main alternative since it is prepened to the list</p>
- </item>
- <item><p>The compression algorithm none (= no compression) is removed so compression is enforced</p>
- </item>
- </list>
- <p>For background and more examples see the <seealso marker="configure_algos#introduction">User's Guide</seealso>.</p>
+ <p>This variant is kept for compatibility.</p>
</item>
+ </taglist>
+ </desc>
+ </datatype>
- <tag><c><![CDATA[{dh_gex_groups, [{Size=integer(),G=integer(),P=integer()}] | {file,filename()} {ssh_moduli_file,filename()} }]]></c></tag>
+ <datatype>
+ <name name="diffie_hellman_group_exchange_daemon_option"/>
+ <name name="explicit_group"/>
+ <name name="explicit_group_file"/>
+ <name name="ssh_moduli_file"/>
+ <desc>
+ <taglist>
+ <tag><c>dh_gex_groups</c></tag>
<item>
<p>Defines the groups the server may choose among when diffie-hellman-group-exchange is negotiated.
- See RFC 4419 for details. The three variants of this option are:
+ See
+ <url href="https://tools.ietf.org/html/rfc4419">RFC 4419</url>
+ for details. The three variants of this option are:
</p>
<taglist>
<tag><c>{Size=integer(),G=integer(),P=integer()}</c></tag>
@@ -783,7 +594,7 @@
</p>
</item>
- <tag><c><![CDATA[{dh_gex_limits,{Min=integer(),Max=integer()}}]]></c></tag>
+ <tag><c>dh_gex_limits</c></tag>
<item>
<p>Limits what a client can ask for in diffie-hellman-group-exchange.
The limits will be
@@ -794,57 +605,29 @@
</p>
<p>If <c>MaxUsed &lt; MinUsed</c> in a key exchange, it will fail with a disconnect.
</p>
- <p>See RFC 4419 for the function of the Max and Min values.</p>
- </item>
-
- <tag><c><![CDATA[{pwdfun, fun(User::string(), Password::string(), PeerAddress::{ip_adress(),port_number()}, State::any()) -> boolean() | disconnect | {boolean(),any()} }]]></c></tag>
- <item>
- <p>Provides a function for password validation. This could used for calling an external system or if
- passwords should be stored as a hash. The fun returns:
- </p>
- <list type="bulleted">
- <item><c>true</c> if the user and password is valid and</item>
- <item><c>false</c> otherwise.</item>
- </list>
- <p>This fun can also be used to make delays in authentication tries for example by calling
- <seealso marker="stdlib:timer#sleep/1">timer:sleep/1</seealso>. To facilitate counting of failed tries
- the <c>State</c> variable could be used. This state is per connection only. The first time the pwdfun
- is called for a connection, the <c>State</c> variable has the value <c>undefined</c>.
- The pwdfun can return - in addition to the values above - a new state
- as:
- </p>
- <list type="bulleted">
- <item><c>{true, NewState:any()}</c> if the user and password is valid or</item>
- <item><c>{false, NewState:any()}</c> if the user or password is invalid</item>
- </list>
- <p>A third usage is to block login attempts from a missbehaving peer. The <c>State</c> described above
- can be used for this. In addition to the responses above, the following return value is introduced:
- </p>
- <list type="bulleted">
- <item><c>disconnect</c> if the connection should be closed immediately after sending a SSH_MSG_DISCONNECT
- message.</item>
- </list>
- </item>
-
- <tag><c><![CDATA[{pwdfun, fun(User::string(), Password::string()) -> boolean()}]]></c></tag>
- <item>
- <p>Provides a function for password validation. This function is called
- with user and password as strings, and returns
- <c><![CDATA[true]]></c> if the password is valid and
- <c><![CDATA[false]]></c> otherwise.</p>
- <p>This option (<c>{pwdfun,fun/2}</c>) is the same as a subset of the previous
- (<c>{pwdfun,fun/4}</c>). It is kept for compatibility.</p>
+ <p>See
+ <url href="https://tools.ietf.org/html/rfc4419">RFC 4419</url>
+ for the function of the Max and Min values.</p>
</item>
+ </taglist>
+ </desc>
+ </datatype>
- <tag><c><![CDATA[{negotiation_timeout, integer()}]]></c></tag>
- <item>
- <p>Maximum time in milliseconds for the authentication negotiation.
- Defaults to 120000 (2 minutes). If the client fails to log in within this time,
- the connection is closed.
- </p>
- </item>
+ <datatype>
+ <name name="negotiation_timeout_daemon_option"/>
+ <desc>
+ <p>Maximum time in milliseconds for the authentication negotiation.
+ Defaults to 120000 ms (2 minutes). If the client fails to log in within this time,
+ the connection is closed.
+ </p>
+ </desc>
+ </datatype>
- <tag><c><![CDATA[{max_sessions, pos_integer()}]]></c></tag>
+ <datatype>
+ <name name="hardening_daemon_options"/>
+ <desc>
+ <taglist>
+ <tag><c>max_sessions</c></tag>
<item>
<p>The maximum number of simultaneous sessions that are accepted at any time
for this daemon. This includes sessions that are being authorized.
@@ -864,7 +647,7 @@
</p>
</item>
- <tag><c><![CDATA[{max_channels, pos_integer()}]]></c></tag>
+ <tag><c>max_channels</c></tag>
<item>
<p>The maximum number of channels with active remote subsystem that are accepted for
each connection to this daemon</p>
@@ -872,8 +655,7 @@
</p>
</item>
-
- <tag><c><![CDATA[{parallel_login, boolean()}]]></c></tag>
+ <tag><c>parallel_login</c></tag>
<item>
<p>If set to false (the default value), only one login is handled at a time.
If set to true, an unlimited number of login attempts are allowed simultaneously.
@@ -890,171 +672,543 @@
</warning>
</item>
- <tag><c><![CDATA[{minimal_remote_max_packet_size, non_negative_integer()}]]></c></tag>
+ <tag><c>minimal_remote_max_packet_size</c></tag>
<item>
- <p>The least maximum packet size that the daemon will accept in channel open requests from the client. The default value is 0.
+ <p>The least maximum packet size that the daemon will accept in channel open requests from the client.
+ The default value is 0.
</p>
</item>
+
+ </taglist>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="callbacks_daemon_options"/>
+ <desc>
+ <taglist>
+ <tag><c>connectfun</c></tag>
+ <item>
+ <p>Provides a fun to implement your own logging when a user authenticates to the server.</p>
+ </item>
- <tag><c><![CDATA[{id_string, random | string()}]]></c></tag>
+ <tag><c>failfun</c></tag>
<item>
- <p>The string the daemon will present to a connecting peer initially. The default value is "Erlang/VSN" where VSN is the ssh application version number.
- </p>
- <p>The value <c>random</c> will cause a random string to be created at each connection attempt. This is to make it a bit more difficult for a malicious peer to find the ssh software brand and version.
- </p>
+ <p>Provides a fun to implement your own logging when a user fails to authenticate.</p>
</item>
+ </taglist>
+ </desc>
+ </datatype>
- <tag><c><![CDATA[{send_ext_info, boolean()}]]></c></tag>
- <item>
- <p>Send a list of extensions to the client if the client has asked for it. See
- <url href="https://tools.ietf.org/html/draft-ietf-curdle-ssh-ext-info">Draft-ietf-curdle-ssh-ext-info (work in progress)</url> for details.
- </p>
- <p>Currently implemented extension is sending <c>server-sig-algs</c> which is the list of the server's preferred
- user's public key algorithms.
- </p>
- <p>Default value is <c>true</c>.
- </p>
- </item>
+ <datatype>
+ <name name="send_ext_info_daemon_option"/>
+ <desc>
+ <p>Make the server (daemon) tell the client that the server accepts extension negotiation, that is,
+ include <c>ext-info-s</c> in the kexinit message sent. See
+ <url href="https://tools.ietf.org/html/rfc8308">RFC 8308</url>
+ for details and <seealso marker="SSH_app#supported-ext-info">ssh(6)</seealso>
+ for a list of currently implemented extensions.
+ </p>
+ <p>Default value is <c>true</c> which is compatible with other implementations not supporting ext-info.
+ </p>
+ </desc>
+ </datatype>
- <!--tag><c><![CDATA[{recv_ext_info, boolean()}]]></c></tag>
- <item>
- <p>Tell the client that the server accepts extension negotiation. See
- <url href="https://tools.ietf.org/html/draft-ietf-curdle-ssh-ext-info">Draft-ietf-curdle-ssh-ext-info (work in progress)</url> for details.
- </p>
- <p>Default value is <c>true</c>.
- </p>
- </item-->
- <tag><c><![CDATA[{key_cb, key_cb()}]]></c></tag>
+ <!--................................................................-->
+ <datatype_title>Options common to clients and daemons</datatype_title>
+ <datatype>
+ <name name="common_options"/>
+ <name name="common_option"/>
+ <desc><p>The options above can be used both in clients and in daemons (servers). They are further explained below.</p>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="user_dir_common_option"/>
+ <desc>
+ <p>Sets the user directory. That is, the directory containing <c>ssh</c> configuration
+ files for the user, such as
+ <c>known_hosts</c>, <c>id_rsa</c>, <c>id_dsa</c>>, <c>id_ecdsa</c> and <c>authorized_key</c>.
+ Defaults to the directory normally referred to as <c>~/.ssh</c>.
+ </p>
+ <p>See also the option
+ <seealso marker="#type-key_cb_common_option">key_cb</seealso>
+ for the general way to handle keys.
+ </p>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="profile_common_option"/>
+ <desc>
+ <p>Used together with <c>ip-address</c> and <c>port</c> to
+ uniquely identify a ssh daemon. This can be useful in a
+ virtualized environment, where there can be more that one
+ server that has the same <c>ip-address</c> and
+ <c>port</c>. If this property is not explicitly set, it is
+ assumed that the the <c>ip-address</c> and <c>port</c>
+ uniquely identifies the SSH daemon.
+ </p>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="max_idle_time_common_option"/>
+ <desc>
+ <p>Sets a time-out on a connection when no channels are active. Defaults to <c>infinity</c>.</p>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="rekey_limit_common_option"/>
+ <desc>
+ <p>Sets a limit, in bytes, when rekeying is to be initiated.
+ Defaults to once per each GB and once per hour.</p>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="key_cb_common_option"/>
+ <desc>
+ <p>Module implementing the behaviour
+ <seealso marker="ssh_client_key_api">ssh_client_key_api</seealso> and/or
+ <seealso marker="ssh_server_key_api">ssh_server_key_api</seealso>.
+ Can be used to
+ customize the handling of public keys. If callback options are provided
+ along with the module name, they are made available to the callback
+ module via the options passed to it under the key 'key_cb_private'.
+ </p>
+ <p>The <c>Opts</c> defaults to <c>[]</c> when only the <c>Module</c> is specified.
+ </p>
+ <p>The default value of this option is <c>{ssh_file, []}</c>.
+ </p>
+ <p>A call to the call-back function <c>F</c> will be</p>
+ <code>
+ Module:F(..., [{key_cb_private,Opts}|UserOptions])
+ </code>
+ <p>where <c>...</c> are arguments to <c>F</c> as in
+ <seealso marker="ssh_client_key_api">ssh_client_key_api</seealso> and/or
+ <seealso marker="ssh_server_key_api">ssh_server_key_api</seealso>.
+ The <c>UserOptions</c> are the options given to <c>ssh:connect</c>, <c>ssh:shell</c> or <c>ssh:daemon</c>.
+ </p>
+
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="disconnectfun_common_option"/>
+ <desc>
+ <p>Provides a fun to implement your own logging when the peer disconnects.</p>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="unexpectedfun_common_option"/>
+ <desc>
+ <p>Provides a fun to implement your own logging or other action when an unexpected message arrives.
+ If the fun returns <c>report</c> the usual info report is issued but if <c>skip</c> is returned no
+ report is generated.</p>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="ssh_msg_debug_fun_common_option"/>
+ <desc>
+ <p>Provide a fun to implement your own logging of the SSH message SSH_MSG_DEBUG.
+ The last three parameters are from the message, see
+ <url href="https://tools.ietf.org/html/rfc4253#section-11.3">RFC 4253, section 11.3</url>.
+ The <seealso marker="#type-connection_ref"><c>connection_ref()</c></seealso> is the reference
+ to the connection on which the message arrived.
+ The return value from the fun is not checked.
+ </p>
+ <p>The default behaviour is ignore the message.
+ To get a printout for each message with <c>AlwaysDisplay = true</c>,
+ use for example <c>{ssh_msg_debug_fun, fun(_,true,M,_)-> io:format("DEBUG: ~p~n", [M]) end}</c></p>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="id_string_common_option"/>
+ <desc>
+ <p>The string the daemon will present to a connecting peer initially.
+ The default value is "Erlang/VSN" where VSN is the ssh application version number.
+ </p>
+ <p>The value <c>random</c> will cause a random string to be created at each connection attempt.
+ This is to make it a bit more difficult for a malicious peer to find the ssh software brand and version.
+ </p>
+ <p>The value <c>{random, Nmin, Nmax}</c> will make a random string with at least <c>Nmin</c> characters and
+ at most <c>Nmax</c> characters.
+ </p>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="preferred_algorithms_common_option"/>
+ <name name="algs_list"/>
+ <name name="alg_entry"/>
+ <name name="kex_alg"/>
+ <name name="pubkey_alg"/>
+ <name name="cipher_alg"/>
+ <name name="mac_alg"/>
+ <name name="compression_alg"/>
+ <name name="double_algs"/>
+
+ <desc>
+ <p>List of algorithms to use in the algorithm negotiation. The default <c>algs_list()</c> can
+ be obtained from <seealso marker="#default_algorithms/0">default_algorithms/0</seealso>.
+ </p>
+ <p>If an alg_entry() is missing in the algs_list(), the default value is used for that entry.</p>
+ <p>Here is an example of this option:</p>
+ <code>
+ {preferred_algorithms,
+ [{public_key,['ssh-rsa','ssh-dss']},
+ {cipher,[{client2server,['aes128-ctr']},
+ {server2client,['aes128-cbc','3des-cbc']}]},
+ {mac,['hmac-sha2-256','hmac-sha1']},
+ {compression,[none,zlib]}
+ ]
+ }
+ </code>
+ <p>The example specifies different algorithms in the two directions (client2server and server2client),
+ for cipher but specifies the same algorithms for mac and compression in both directions.
+ The kex (key exchange) is implicit but public_key is set explicitly.</p>
+
+ <p>For background and more examples see the <seealso marker="configure_algos#introduction">User's Guide</seealso>.</p>
+
+ <p>If an algorithm name occurs more than once in a list, the behaviour is undefined. The tags in the property lists
+ are also assumed to occur at most one time.
+ </p>
+
+ <warning>
+ <p>Changing the values can make a connection less secure. Do not change unless you
+ know exactly what you are doing. If you do not understand the values then you
+ are not supposed to change them.</p>
+ </warning>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="modify_algorithms_common_option"/>
+ <name name="modify_algs_list"/>
+ <desc>
+ <p>Modifies the list of algorithms to use in the algorithm negotiation. The modifications are
+ applied after the option <c>preferred_algorithms</c> (if existing) is applied.</p>
+ <p>The algoritm for modifications works like this:</p>
+ <list>
<item>
- <p>Module implementing the behaviour <seealso
- marker="ssh_server_key_api">ssh_server_key_api</seealso>. Can be used to
- customize the handling of public keys. If callback options are provided
- along with the module name, they are made available to the callback
- module via the options passed to it under the key 'key_cb_private'.
+ <p>Input is the <c>modify_algs_list()</c> and a set of algorithms <c>A</c>
+ obtained from the <c>preferred_algorithms</c> option if existing, or else from the
+ <seealso marker="ssh#default_algorithms-0">ssh:default_algorithms/0</seealso>.
</p>
</item>
-
- <tag><c>{profile, atom()}</c></tag>
<item>
- <p>Used together with <c>ip-address</c> and <c>port</c> to
- uniquely identify a ssh daemon. This can be useful in a
- virtualized environment, where there can be more that one
- server that has the same <c>ip-address</c> and
- <c>port</c>. If this property is not explicitly set, it is
- assumed that the the <c>ip-address</c> and <c>port</c>
- uniquely identifies the SSH daemon.
- </p>
+ <p>The head of the <c>modify_algs_list()</c> modifies <c>A</c> giving the result <c>A'</c>.</p>
+ <p>The possible modifications are:</p>
+ <list>
+ <item>
+ <p>Append or prepend supported but not enabled algorithm(s) to the list of
+ algorithms. If the wanted algorithms already are in <c>A</c> they will first
+ be removed and then appended or prepended,
+ </p>
+ </item>
+ <item>
+ <p>Remove (rm) one or more algorithms from <c>A</c>.
+ </p>
+ </item>
+ </list>
</item>
-
- <tag><c><![CDATA[{fd, file_descriptor()}]]></c></tag>
- <item>
- <p>Allows an existing file-descriptor to be used
- (passed on to the transport protocol).</p></item>
- <tag><c><![CDATA[{failfun, fun(User::string(),
- PeerAddress::ip_address(), Reason::term()) -> _}]]></c></tag>
<item>
- <p>Provides a fun to implement your own logging when a user fails to authenticate.</p>
+ <p>Repeat the modification step with the tail of <c>modify_algs_list()</c> and the resulting
+ <c>A'</c>.
+ </p>
</item>
- <tag><c><![CDATA[{connectfun, fun(User::string(), PeerAddress::ip_address(),
- Method::string()) ->_}]]></c></tag>
- <item>
- <p>Provides a fun to implement your own logging when a user authenticates to the server.</p>
+ </list>
+ <p>If an unsupported algorithm is in the <c>modify_algs_list()</c>, it will be silently ignored</p>
+ <p>If there are more than one modify_algorithms options, the result is undefined.</p>
+ <p>Here is an example of this option:</p>
+ <code>
+ {modify_algorithms,
+ [{prepend, [{kex, ['diffie-hellman-group1-sha1']}],
+ {rm, [{compression, [none]}]}
+ ]
+ }
+ </code>
+ <p>The example specifies that:</p>
+ <list>
+ <item><p>the old key exchange algorithm 'diffie-hellman-group1-sha1' should be
+ the main alternative. It will be the main alternative since it is prepened to the list</p>
</item>
- <tag><c><![CDATA[{disconnectfun, fun(Reason:term()) -> _}]]></c></tag>
- <item>
- <p>Provides a fun to implement your own logging when a user disconnects from the server.</p>
+ <item><p>The compression algorithm none (= no compression) is removed so compression is enforced</p>
</item>
+ </list>
+ <p>For background and more examples see the <seealso marker="configure_algos#introduction">User's Guide</seealso>.</p>
+ </desc>
+ </datatype>
- <tag><c><![CDATA[{unexpectedfun, fun(Message:term(), Peer) -> report | skip }]]></c></tag>
- <item>
- <p>Provides a fun to implement your own logging or other action when an unexpected message arrives.
- If the fun returns <c>report</c> the usual info report is issued but if <c>skip</c> is returned no
- report is generated.</p>
- <p><c>Peer</c> is in the format of <c>{Host,Port}</c>.</p>
- </item>
- <tag><c><![CDATA[{idle_time, integer()}]]></c></tag>
- <item>
- <p>Sets a time-out on a connection when no channels are active.
- Defaults to <c>infinity</c>.</p>
- </item>
+ <datatype>
+ <name name="inet_common_option"/>
+ <desc>
+ <p>IP version to use when the host address is specified as <c>any</c>.</p>
+ </desc>
+ </datatype>
- <tag><c><![CDATA[{ssh_msg_debug_fun, fun(ConnectionRef::ssh_connection_ref(), AlwaysDisplay::boolean(), Msg::binary(), LanguageTag::binary()) -> _}]]></c></tag>
- <item>
- <p>Provide a fun to implement your own logging of the SSH message SSH_MSG_DEBUG. The last three parameters are from the message, see RFC4253, section 11.3. The <c>ConnectionRef</c> is the reference to the connection on which the message arrived. The return value from the fun is not checked.</p>
- <p>The default behaviour is ignore the message.
- To get a printout for each message with <c>AlwaysDisplay = true</c>, use for example <c>{ssh_msg_debug_fun, fun(_,true,M,_)-> io:format("DEBUG: ~p~n", [M]) end}</c></p>
- </item>
+ <datatype>
+ <name name="auth_methods_common_option"/>
+ <desc>
+ <p>Comma-separated string that determines which authentication methods that the client shall
+ support and in which order they are tried. Defaults to <c>"publickey,keyboard-interactive,password"</c>
+ </p>
+ <p>Note that the client is free to use any order and to exclude methods.
+ </p>
+ </desc>
+ </datatype>
- </taglist>
- </desc>
- </func>
+ <datatype>
+ <name name="fd_common_option"/>
+ <desc>
+ <p>Allows an existing file-descriptor to be used (passed on to the transport protocol).</p>
+ </desc>
+ </datatype>
- <func>
- <name>daemon_info(Daemon) -> {ok, [DaemonInfo]} | {error,Error}</name>
- <fsummary>Get info about a daemon</fsummary>
- <type>
- <v>DaemonInfo = {port,Port::pos_integer()} | {listen_address, any|ip_address()} | {profile,atom()}</v>
- <v>Port = integer()</v>
- <v>Error = bad_daemon_ref</v>
- </type>
+ <!--................................................................-->
+ <datatype_title>Other data types</datatype_title>
+
+ <datatype>
+ <name name="host"/>
+ <desc>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="ip_port"/>
<desc>
- <p>Returns a key-value list with information about the daemon. For now, only the listening port is returned. This is intended for the case the daemon is started with the port set to 0.</p>
</desc>
+ </datatype>
+
+ <datatype>
+ <name name="mod_args"/>
+ <desc>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="mod_fun_args"/>
+ <desc>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="open_socket"/>
+ <desc>
+ <p>The socket is supposed to be result of a <seealso marker="kernel:gen_tcp#connect-3">gen_tcp:connect</seealso>
+ or a <seealso marker="kernel:gen_tcp#accept-1">gen_tcp:accept</seealso>. The socket must be in passive
+ mode (that is, opened with the option <c>{active,false})</c>.
+ </p>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="daemon_ref"/>
+ <desc>
+ <p>Opaque data type representing a daemon.</p>
+ <p>Returned by the functions <seealso marker="ssh#daemon-1"><c>daemon/1,2,3</c></seealso>.</p>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name>connection_ref()</name>
+ <desc>
+ <p>Opaque data type representing a connection between a client and a server (daemon).</p>
+ <p>Returned by the functions
+ <seealso marker="ssh#connect-3"><c>connect/2,3,4</c></seealso> and
+ <seealso marker="ssh_sftp#start_channel-2"><c>ssh_sftp:start_channel/2,3</c></seealso>.
+ </p>
+ </desc>
+ </datatype>
+
+ <datatype>
+ <name name="channel_id"/>
+ <desc>
+ <p>Opaque data type representing a channel inside a connection.</p>
+ <p>Returned by the functions
+ <seealso marker="ssh_connection#session_channel/2">ssh_connection:session_channel/2,4</seealso>.
+ </p>
+ </desc>
+ </datatype>
+
+
+ <datatype>
+ <name>opaque_client_options</name>
+ <name>opaque_daemon_options</name>
+ <name>opaque_common_options</name>
+ <desc>
+ <marker id="type-opaque_client_options"/>
+ <marker id="type-opaque_daemon_options"/>
+ <marker id="type-opaque_common_options"/>
+ <p>Opaque types that define experimental options that are not to be used in products.</p>
+ </desc>
+ </datatype>
+ </datatypes>
+
+<!--
+ ================================================================
+ = Function definitions =
+ ================================================================
+-->
+
+ <funcs>
+
+<!-- CLOSE/1 -->
+ <func>
+ <name name="close" arity="1"/>
+ <fsummary>Closes an SSH connection.</fsummary>
+ <desc><p>Closes an SSH connection.</p></desc>
</func>
+
+<!-- CONNECT/2 etc -->
+ <func>
+ <name>connect(Host, Port, Options) -> Result </name>
+ <name>connect(Host, Port, Options, NegotiationTimeout) -> Result </name>
+ <name>connect(TcpSocket, Options) -> Result</name>
+ <name>connect(TcpSocket, Options, NegotiationTimeout) -> Result</name>
+ <fsummary>Connects to an SSH server.</fsummary>
+ <type>
+ <v>Host = <seealso marker="#type-host">host()</seealso></v>
+ <v>Port = <seealso marker="kernel:inet#type-port_number">inet:port_number()</seealso></v>
+ <v>Options = <seealso marker="#type-client_options">client_options()</seealso></v>
+ <v>TcpSocket = <seealso marker="#type-open_socket">open_socket()</seealso></v>
+ <v>NegotiationTimeout = timeout()</v>
+ <v>Result = {ok, <seealso marker="#type-connection_ref">connection_ref()</seealso>} | {error, term()}</v>
+ </type>
+ <desc>
+ <p>Connects to an SSH server at the <c>Host</c> on <c>Port</c>.
+ </p>
+ <p>As an alternative, an already open TCP socket could be passed to the function in <c>TcpSocket</c>.
+ The SSH initiation and negotiation will be initiated on that one with the SSH that should be at the
+ other end.
+ </p>
+ <p>No channel is started. This is done by calling <seealso marker="ssh_connection#session_channel/2">
+ ssh_connection:session_channel/[2, 4]</seealso>.
+ </p>
+ <p>The <c>NegotiationTimeout</c> is in milli-seconds. The default value is <c>infinity</c>.
+ For connection timeout, use the option
+ <seealso marker="#type-connect_timeout_client_option"><c>connect_timeout</c></seealso>.
+ </p>
+ </desc>
+ </func>
+
+<!-- CONNECTION_INFO/1, CONNECTION_INFO/2 -->
+ <func>
+ <name name="connection_info" arity="2"/>
+ <fsummary>Retrieves information about a connection.</fsummary>
+ <desc>
+ <p>Retrieves information about a connection. The list <c>Keys</c> defines which information that
+ is returned.</p>
+ </desc>
+ </func>
+
+<!-- DEAMON/1,2,3 -->
+ <func>
+ <name>daemon(Port | TcpSocket) -> Result</name>
+ <name>daemon(Port | TcpSocket, Options) -> Result</name>
+ <name>daemon(HostAddress, Port, Options) -> Result</name>
+ <fsummary>Starts a server listening for SSH connections.</fsummary>
+ <type>
+ <v>Port = integer()</v>
+ <v>TcpSocket = <seealso marker="#type-open_socket">open_socket()</seealso></v>
+ <v>Options = <seealso marker="#type-daemon_options">daemon_options()</seealso></v>
+ <v>HostAddress = <seealso marker="#type-host">host()</seealso> | any</v>
+ <v>Result = {ok, <seealso marker="#type-daemon_ref">daemon_ref()</seealso>} | {error, atom()}</v>
+ </type>
+ <desc>
+ <p>Starts a server listening for SSH connections on the given port. If the <c>Port</c> is 0,
+ a random free port is selected. See <seealso marker="#daemon_info/1">daemon_info/1</seealso>
+ about how to find the selected port number.
+ </p>
+ <p>As an alternative, an already open TCP socket could be passed to the function in <c>TcpSocket</c>.
+ The SSH initiation and negotiation will be initiated on that one when an SSH starts at the other end
+ of the TCP socket.
+ </p>
+ <p>For a description of the options, see <seealso marker="#type-daemon_options">Daemon Options</seealso>.
+ </p>
+ <p>Please note that by historical reasons both the <c>HostAddress</c> argument and the
+ <seealso marker="kernel:gen_tcp#type-connect_option">gen_tcp connect_option() <c>{ip,Address}</c></seealso>
+ set the listening address. This is a source of possible inconsistent settings.
+ </p>
+ <p>The rules for handling the two address passing options are:</p>
+ <list>
+ <item>if <c>HostAddress</c> is an IP-address, that IP-address is the listening address.
+ An 'ip'-option will be discarded if present.</item>
+
+ <item>if <c>HostAddress</c> is the atom <c>loopback</c>, the listening address
+ is <c>loopback</c> and an loopback address will be choosen by the underlying layers.
+ An 'ip'-option will be discarded if present.</item>
+
+ <item>if <c>HostAddress</c> is the atom <c>any</c> and no 'ip'-option is present, the listening address is
+ <c>any</c> and the socket will listen to all addresses</item>
+
+ <item>if <c>HostAddress</c> is <c>any</c> and an 'ip'-option is present, the listening address is
+ set to the value of the 'ip'-option</item>
+ </list>
+ </desc>
+ </func>
+
+<!-- DAEMON_INFO/1 -->
+ <func>
+ <name name="daemon_info" arity="1"/>
+ <fsummary>Get info about a daemon</fsummary>
+ <desc>
+ <p>Returns a key-value list with information about the daemon.</p>
+ </desc>
+ </func>
+
+<!-- DEFAULT_ALGORITHMS/0 -->
<func>
- <name>default_algorithms() -> algs_list()</name>
+ <name name="default_algorithms" arity="0"/>
<fsummary>Get a list declaring the supported algorithms</fsummary>
<desc>
<p>Returns a key-value list, where the keys are the different types of algorithms and the values are the
- algorithms themselves. An example:</p>
- <code>
-20> ssh:default_algorithms().
-[{kex,['diffie-hellman-group1-sha1']},
- {public_key,['ssh-rsa','ssh-dss']},
- {cipher,[{client2server,['aes128-ctr','aes128-cbc','3des-cbc']},
- {server2client,['aes128-ctr','aes128-cbc','3des-cbc']}]},
- {mac,[{client2server,['hmac-sha2-256','hmac-sha1']},
- {server2client,['hmac-sha2-256','hmac-sha1']}]},
- {compression,[{client2server,[none,zlib]},
- {server2client,[none,zlib]}]}]
-21>
-</code>
+ algorithms themselves.</p>
+ <p>See the <seealso marker="configure_algos#example_default_algorithms">User's Guide</seealso> for
+ an example.</p>
</desc>
</func>
+<!-- SHELL/1,2,3 -->
<func>
- <name>shell(Host) -> </name>
- <name>shell(Host, Option) -> </name>
- <name>shell(Host, Port, Option) -> </name>
- <name>shell(TcpSocket) -> _</name>
- <fsummary>Starts an interactive shell over an SSH server.</fsummary>
+ <name>shell(Host | TcpSocket) -> Result </name>
+ <name>shell(Host | TcpSocket, Options) -> Result </name>
+ <name>shell(Host, Port, Options) -> Result </name>
+ <fsummary>Starts an interactive shell on a remote SSH server.</fsummary>
<type>
- <v>Host = string()</v>
- <v>Port = integer()</v>
- <v>Options - see ssh:connect/3</v>
- <v>TcpSocket = port()</v>
- <d>The socket is supposed to be from <seealso marker="kernel:gen_tcp#connect-3">gen_tcp:connect</seealso> or <seealso marker="kernel:gen_tcp#accept-1">gen_tcp:accept</seealso> with option <c>{active,false}</c></d>
+ <v>Host = <seealso marker="#type-host">host()</seealso></v>
+ <v>TcpSocket = <seealso marker="#type-open_socket">open_socket()</seealso></v>
+ <v>Port = <seealso marker="kernel:inet#type-port_number">inet:port_number()</seealso></v>
+ <v>Options = <seealso marker="#type-client_options">client_options()</seealso></v>
+ <v>Result = ok | {error, Reason::term()}</v>
</type>
<desc>
- <p>Starts an interactive shell over an SSH server on the
- given <c>Host</c>. The function waits for user input,
- and does not return until the remote shell is ended (that is,
+ <p>Connects to an SSH server at <c>Host</c> and <c>Port</c> (defaults to 22) and starts an
+ interactive shell on that remote host.
+ </p>
+ <p>As an alternative, an already open TCP socket could be passed to the function in <c>TcpSocket</c>.
+ The SSH initiation and negotiation will be initiated on that one and finaly a shell will be started
+ on the host at the other end of the TCP socket.
+ </p>
+ <p>For a description of the options, see <seealso marker="#type-client_options">Client Options</seealso>.</p>
+ <p>The function waits for user input, and does not return until the remote shell is ended (that is,
exit from the shell).
</p>
</desc>
</func>
<func>
- <name>start() -> </name>
- <name>start(Type) -> ok | {error, Reason}</name>
+ <name name="start" arity="0"/>
+ <name name="start" arity="1"/>
<fsummary>Starts the SSH application.</fsummary>
- <type>
- <v>Type = permanent | transient | temporary</v>
- <v>Reason = term() </v>
- </type>
<desc>
<p>Utility function that starts the applications <c>crypto</c>, <c>public_key</c>,
and <c>ssh</c>. Default type is <c>temporary</c>.
@@ -1064,11 +1218,8 @@
</func>
<func>
- <name>stop() -> ok | {error, Reason}</name>
+ <name name="stop" arity="0"/>
<fsummary>Stops the <c>ssh</c> application.</fsummary>
- <type>
- <v>Reason = term()</v>
- </type>
<desc>
<p>Stops the <c>ssh</c> application.
For more information, see the <seealso marker="kernel:application">application(3)</seealso>
@@ -1077,34 +1228,22 @@
</func>
<func>
- <name>stop_daemon(DaemonRef) -> </name>
- <name>stop_daemon(Address, Port) -> ok </name>
- <fsummary>Stops the listener and all connections started by
- the listener.</fsummary>
- <type>
- <v>DaemonRef = ssh_daemon_ref()</v>
- <v>Address = ip_address()</v>
- <v>Port = integer()</v>
- </type>
+ <name name="stop_daemon" arity="1"/>
+ <name name="stop_daemon" arity="2"/>
+ <name name="stop_daemon" arity="3"/>
+ <fsummary>Stops the listener and all connections started by the listener.</fsummary>
<desc>
- <p>Stops the listener and all connections started by
- the listener.</p>
+ <p>Stops the listener and all connections started by the listener.</p>
</desc>
</func>
<func>
- <name>stop_listener(DaemonRef) -> </name>
- <name>stop_listener(Address, Port) -> ok </name>
- <fsummary>Stops the listener, but leaves existing connections started
- by the listener operational.</fsummary>
- <type>
- <v>DaemonRef = ssh_daemon_ref()</v>
- <v>Address = ip_address()</v>
- <v>Port = integer()</v>
- </type>
+ <name name="stop_listener" arity="1"/>
+ <name name="stop_listener" arity="2"/>
+ <name name="stop_listener" arity="3"/>
+ <fsummary>Stops the listener, but leaves existing connections started by the listener operational.</fsummary>
<desc>
- <p>Stops the listener, but leaves existing connections started
- by the listener operational.</p>
+ <p>Stops the listener, but leaves existing connections started by the listener operational.</p>
</desc>
</func>
diff --git a/lib/ssh/doc/src/ssh_app.xml b/lib/ssh/doc/src/ssh_app.xml
index faee9ef992..6d180a5272 100644
--- a/lib/ssh/doc/src/ssh_app.xml
+++ b/lib/ssh/doc/src/ssh_app.xml
@@ -333,7 +333,8 @@
<item><url href="https://tools.ietf.org/html/rfc8332">RFC 8332</url>, Use of RSA Keys with SHA-256 and SHA-512 in the Secure Shell (SSH) Protocol.
</item>
- <item><url href="https://tools.ietf.org/html/rfc8308">RFC 8308</url>, Extension Negotiation in the Secure Shell (SSH) Protocol.
+ <item><marker id="supported-ext-info"/>
+ <url href="https://tools.ietf.org/html/rfc8308">RFC 8308</url>, Extension Negotiation in the Secure Shell (SSH) Protocol.
<p>Implemented are:</p>
<list type="bulleted">
<item>The Extension Negotiation Mechanism</item>
diff --git a/lib/ssh/doc/src/ssh_channel.xml b/lib/ssh/doc/src/ssh_client_channel.xml
index 7b598494f7..eed49beffa 100644
--- a/lib/ssh/doc/src/ssh_channel.xml
+++ b/lib/ssh/doc/src/ssh_client_channel.xml
@@ -23,21 +23,27 @@
The Initial Developer of the Original Code is Ericsson AB.
</legalnotice>
- <title>ssh_channel</title>
+ <title>ssh_client_channel</title>
<prepared></prepared>
<docno></docno>
<date></date>
<rev></rev>
</header>
- <module>ssh_channel</module>
- <modulesummary>-behaviour(ssh_channel).
+ <module>ssh_client_channel</module>
+ <modulesummary>-behaviour(ssh_client_channel). (Replaces ssh_channel)
</modulesummary>
<description>
+ <note>
+ <p>This module replaces ssh_channel.</p>
+ <p>The old module is still available for compatibility, but should not be used for new programs.
+ The old module will not be maintained except for some error corrections
+ </p>
+ </note>
<p>SSH services (clients and servers) are implemented as channels
that are multiplexed over an SSH connection and communicates over
the <url href="http://www.ietf.org/rfc/rfc4254.txt"> SSH
Connection Protocol</url>. This module provides a callback API
- that takes care of generic channel aspects, such as flow control
+ that takes care of generic channel aspects for clients, such as flow control
and close messages. It lets the callback functions take care of
the service (application) specific parts. This behavior also ensures
that the channel process honors the principal of an OTP-process so
@@ -46,41 +52,19 @@
the <c>ssh</c> applications supervisor tree.
</p>
- <note><p>When implementing an <c>ssh</c> subsystem, use
- <c>-behaviour(ssh_daemon_channel)</c> instead of <c>-behaviour(ssh_channel)</c>.
- The reason is that the only relevant callback functions for subsystems are
- <c>init/1</c>, <c>handle_ssh_msg/2</c>, <c>handle_msg/2</c>, and <c>terminate/2</c>.
- So, the <c>ssh_daemon_channel</c> behaviour is a limited version of the
- <c>ssh_channel</c> behaviour.
- </p></note>
- </description>
+ <note><p>When implementing a <c>ssh</c> subsystem for daemons, use
+ <seealso marker="ssh_server_channel">-behaviour(ssh_server_channel)</seealso> (Replaces ssh_daemon_channel)
+ instead.
+ </p>
+ </note>
- <section>
- <title>DATA TYPES</title>
+ <dont>
+ <p>Functions in this module are not supposed to be called outside a module implementing this
+ behaviour!
+ </p>
+ </dont>
- <p>Type definitions that are used more than once in this module,
- or abstractions to indicate the intended use of the data
- type, or both:</p>
-
- <taglist>
- <tag><c>boolean() =</c></tag>
- <item><p><c>true | false</c></p></item>
- <tag><c>string() =</c></tag>
- <item><p>list of ASCII characters</p></item>
- <tag><c>timeout() =</c></tag>
- <item><p><c>infinity | integer()</c> in milliseconds</p></item>
- <tag><c>ssh_connection_ref() =</c></tag>
- <item><p>opaque() -as returned by
- <c>ssh:connect/3</c> or sent to an SSH channel process</p></item>
- <tag><c>ssh_channel_id() =</c></tag>
- <item><p><c>integer()</c></p></item>
- <tag><c>ssh_data_type_code() =</c></tag>
- <item><p><c>1</c> ("stderr") | <c>0</c> ("normal") are
- the valid values,
- see <url href="http://www.ietf.org/rfc/rfc4254.txt">RFC 4254</url>
- Section 5.2</p></item>
- </taglist>
- </section>
+ </description>
<funcs>
<func>
@@ -89,7 +73,7 @@
<fsummary>Makes a synchronous call to a channel.</fsummary>
<type>
<v>ChannelRef = pid() </v>
- <d>As returned by <seealso marker = "#start_link-4">ssh_channel:start_link/4</seealso></d>
+ <d>As returned by <seealso marker = "#start_link-4">start_link/4</seealso></d>
<v>Msg = term()</v>
<v>Timeout = timeout()</v>
<v>Reply = term()</v>
@@ -113,7 +97,7 @@
ChannelRef and returns ok.</fsummary>
<type>
<v>ChannelRef = pid()</v>
- <d>As returned by <seealso marker = "#start_link-4">ssh_channel:start_link/4</seealso></d>
+ <d>As returned by <seealso marker = "#start_link-4">start_link/4</seealso></d>
<v>Msg = term()</v>
</type>
<desc>
@@ -126,29 +110,29 @@
</desc>
</func>
- <func>
+ <func>
<name>enter_loop(State) -> _ </name>
- <fsummary>Makes an existing process an ssh_channel process.</fsummary>
+ <fsummary>Makes an existing process an ssh_client_channel (replaces ssh_channel) process.</fsummary>
<type>
<v>State = term()</v>
- <d>as returned by <seealso marker = "#init-1">ssh_channel:init/1</seealso></d>
+ <d>as returned by <seealso marker = "#init-1">init/1</seealso></d>
</type>
<desc>
- <p>Makes an existing process an <c>ssh_channel</c>
+ <p>Makes an existing process an <c>ssh_client_channel</c> (replaces ssh_channel)
process. Does not return, instead the calling process
- enters the <c>ssh_channel</c> process receive loop and become an
- <c>ssh_channel process</c>. The process must have been started using
+ enters the <c>ssh_client_channel</c> (replaces ssh_channel) process receive loop and become an
+ <c>ssh_client_channel</c> process. The process must have been started using
one of the start functions in <c>proc_lib</c>, see the <seealso
marker="stdlib:proc_lib">proc_lib(3)</seealso> manual page in STDLIB.
The user is responsible for any initialization of the process
- and must call <seealso marker = "#init-1">ssh_channel:init/1</seealso>.
+ and must call <seealso marker = "#init-1">init/1</seealso>.
</p>
</desc>
</func>
<func>
<name>init(Options) -> {ok, State} | {ok, State, Timeout} | {stop, Reason} </name>
- <fsummary>Initiates an <c>ssh_channel</c> process.</fsummary>
+ <fsummary>Initiates an <c>ssh_client_channel</c> process.</fsummary>
<type>
<v>Options = [{Option, Value}]</v>
<v>State = term()</v>
@@ -160,18 +144,21 @@
The following options must be present:
</p>
<taglist>
- <tag><c><![CDATA[{channel_cb, atom()}]]></c></tag>
+ <tag><c>{channel_cb, atom()}</c></tag>
<item><p>The module that implements the channel behaviour.</p></item>
- <tag><c><![CDATA[{init_args(), list()}]]></c></tag>
+ <tag><c>{init_args(), list()}</c></tag>
<item><p>The list of arguments to the <c>init</c> function of the callback module.</p></item>
- <tag><c><![CDATA[{cm, connection_ref()}]]></c></tag>
- <item><p>Reference to the <c>ssh</c> connection as returned by <seealso
- marker="ssh#connect-3">ssh:connect/3</seealso></p></item>
+ <tag><c>{cm, ssh:connection_ref()}</c></tag>
+ <item><p>Reference to the <c>ssh</c> connection as returned by
+ <seealso marker="ssh#connect-3">ssh:connect/3</seealso>.
+ </p></item>
- <tag><c><![CDATA[{channel_id, channel_id()}]]></c></tag>
- <item><p>Id of the <c>ssh</c> channel.</p></item>
+ <tag><c>{channel_id, ssh:channel_id()}</c></tag>
+ <item><p>Id of the <c>ssh</c> channel as returned by
+ <seealso marker="ssh_connection#session_channel/2">ssh_connection:session_channel/2,4</seealso>.
+ </p></item>
</taglist>
@@ -179,8 +166,8 @@
user. The user only needs to call if the
channel process needs to be started with help of
<c>proc_lib</c> instead of calling
- <c>ssh_channel:start/4</c> or
- <c>ssh_channel:start_link/4</c>.</p>
+ <c>start/4</c> or
+ <c>start_link/4</c>.</p>
</note>
</desc>
</func>
@@ -201,26 +188,31 @@
the callback function <c>handle_call/3</c>.
<c>Reply</c> is an arbitrary term,
which is given back to the client as the return value of
- <seealso marker="#call-2">ssh_channel:call/[2,3].</seealso></p>
+ <seealso marker="#call-2">call/[2,3].</seealso></p>
</desc>
</func>
-
+
<func>
<name>start(SshConnection, ChannelId, ChannelCb, CbInitArgs) -> </name>
<name>start_link(SshConnection, ChannelId, ChannelCb, CbInitArgs) ->
{ok, ChannelRef} | {error, Reason}</name>
<fsummary>Starts a process that handles an SSH channel.</fsummary>
<type>
- <v>SshConnection = ssh_connection_ref()</v>
- <v>ChannelId = ssh_channel_id()</v>
+ <v>SshConnection = ssh:connection_ref()</v>
+ <d>As returned by <seealso marker="ssh#connect-3">ssh:connect/3</seealso></d>
+
+ <v>ChannelId = <seealso marker="ssh#type-channel_id">ssh:channel_id()</seealso></v>
<d>As returned by
<seealso marker ="ssh_connection#session_channel/2">
ssh_connection:session_channel/[2,4]</seealso>.</d>
+
<v>ChannelCb = atom()</v>
<d>Name of the module implementing the service-specific parts
of the channel.</d>
+
<v>CbInitArgs = [term()]</v>
<d>Argument list for the <c>init</c> function in the callback module.</d>
+
<v>ChannelRef = pid()</v>
</type>
<desc>
@@ -235,13 +227,19 @@
</funcs>
<section>
+ <title>Callback Functions</title>
+ <p>
+ The following functions are to be exported from a
+ <c>ssh_client_channel</c> callback module.
+ </p>
<marker id="cb_timeouts"></marker>
- <title>CALLBACK TIME-OUTS</title>
-
- <p>The time-out values that can be returned by the callback functions
- have the same semantics as in a <seealso marker="stdlib:gen_server">gen_server</seealso>.
- If the time-out occurs, <seealso marker="#Module:handle_msg-2">handle_msg/2</seealso>
- is called as <c>handle_msg(timeout, State)</c>.</p>
+ <section>
+ <title>Callback timeouts</title>
+ <p>The timeout values that can be returned by the callback functions
+ have the same semantics as in a <seealso marker="stdlib:gen_server">gen_server</seealso>.
+ If the time-out occurs, <seealso marker="#Module:handle_msg-2">handle_msg/2</seealso>
+ is called as <c>handle_msg(timeout, State)</c>.</p>
+ </section>
</section>
<funcs>
@@ -295,7 +293,7 @@
initial channel state if the initializations succeed.</fsummary>
<type>
<v>Args = term()</v>
- <d>Last argument to <c>ssh_channel:start_link/4</c>.</d>
+ <d>Last argument to <c>start_link/4</c>.</d>
<v>State = term()</v>
<v>Reason = term()</v>
</type>
@@ -304,41 +302,41 @@
state if the initializations succeed.
</p>
<p>For more detailed information on time-outs, see Section
- <seealso marker="#cb_timeouts">CALLBACK TIME-OUTS</seealso>. </p>
+ <seealso marker="#cb_timeouts">Callback timeouts</seealso>. </p>
</desc>
</func>
<func>
<name>Module:handle_call(Msg, From, State) -> Result</name>
<fsummary>Handles messages sent by calling
- <c>ssh_channel:call/[2,3]</c>.</fsummary>
+ <c>call/[2,3]</c>.</fsummary>
<type>
<v>Msg = term()</v>
<v>From = opaque()</v>
<d>Is to be used as argument to
- <seealso marker="#reply-2">ssh_channel:reply/2</seealso></d>
+ <seealso marker="#reply-2">reply/2</seealso></d>
<v>State = term()</v>
<v>Result = {reply, Reply, NewState} | {reply, Reply, NewState, timeout()}
| {noreply, NewState} | {noreply , NewState, timeout()}
| {stop, Reason, Reply, NewState} | {stop, Reason, NewState} </v>
<v>Reply = term()</v>
- <d>Will be the return value of <seealso marker="#call-2">ssh_channel:call/[2,3]</seealso></d>
+ <d>Will be the return value of <seealso marker="#call-2">call/[2,3]</seealso></d>
<v>NewState = term()</v>
<v>Reason = term()</v>
</type>
<desc>
<p>Handles messages sent by calling
- <seealso marker="#call-2">ssh_channel:call/[2,3]</seealso>
+ <seealso marker="#call-2">call/[2,3]</seealso>
</p>
<p>For more detailed information on time-outs,, see Section
- <seealso marker="#cb_timeouts">CALLBACK TIME-OUTS</seealso>.</p>
+ <seealso marker="#cb_timeouts">Callback timeouts</seealso>.</p>
</desc>
</func>
<func>
<name>Module:handle_cast(Msg, State) -> Result</name>
<fsummary>Handles messages sent by calling
- <c>ssh_channel:cact/2</c>.</fsummary>
+ <c>cast/2</c>.</fsummary>
<type>
<v>Msg = term()</v>
<v>State = term()</v>
@@ -349,10 +347,10 @@
</type>
<desc>
<p>Handles messages sent by calling
- <c>ssh_channel:cast/2</c>.
+ <c>cast/2</c>.
</p>
<p>For more detailed information on time-outs, see Section
- <seealso marker="#cb_timeouts">CALLBACK TIME-OUTS</seealso>.</p>
+ <seealso marker="#cb_timeouts">Callback timeouts</seealso>.</p>
</desc>
</func>
@@ -364,7 +362,7 @@
call, or cast messages sent to the channel.</fsummary>
<type>
<v>Msg = timeout | term()</v>
- <v>ChannelId = ssh_channel_id()</v>
+ <v>ChannelId = <seealso marker="ssh#type-channel_id">ssh:channel_id()</seealso></v>
<v>State = term() </v>
</type>
<desc>
@@ -376,11 +374,10 @@
function and all channels are to handle the following message.</p>
<taglist>
- <tag><c><![CDATA[{ssh_channel_up, ssh_channel_id(),
- ssh_connection_ref()}]]></c></tag>
+ <tag><c>{ssh_channel_up, ssh:channel_id(), ssh:connection_ref()}</c></tag>
<item><p>This is the first message that the channel receives.
It is sent just before the <seealso
- marker="#init-1">ssh_channel:init/1</seealso> function
+ marker="#init-1">init/1</seealso> function
returns successfully. This is especially useful if the
server wants to send a message to the client without first
receiving a message from it. If the message is not
@@ -397,7 +394,7 @@
<fsummary>Handles <c>ssh</c> connection protocol messages.</fsummary>
<type>
<v>Msg = ssh_connection:event()</v>
- <v>ChannelId = ssh_channel_id()</v>
+ <v>ChannelId = <seealso marker="ssh#type-channel_id">ssh:channel_id()</seealso></v>
<v>State = term()</v>
</type>
<desc>
@@ -407,10 +404,10 @@
</p>
<p>The following message is taken care of by the
- <c>ssh_channel</c> behavior.</p>
+ <c>ssh_client_channel</c> behavior.</p>
<taglist>
- <tag><c><![CDATA[{closed, ssh_channel_id()}]]></c></tag>
+ <tag><c>{closed, ssh:channel_id()}</c></tag>
<item><p>The channel behavior sends a close message to the
other side, if such a message has not already been sent.
Then it terminates the channel with reason <c>normal</c>.</p></item>
diff --git a/lib/ssh/doc/src/ssh_client_key_api.xml b/lib/ssh/doc/src/ssh_client_key_api.xml
index 98a1676ca4..9fc54341ed 100644
--- a/lib/ssh/doc/src/ssh_client_key_api.xml
+++ b/lib/ssh/doc/src/ssh_client_key_api.xml
@@ -41,7 +41,7 @@
see the <seealso marker="SSH_app"> ssh(6)</seealso> application manual.</p>
</description>
- <section>
+ <!-- section>
<title>DATA TYPES</title>
<p>Type definitions that are used more than once in this module,
@@ -68,23 +68,34 @@
| 'rsa-sha2-256' | 'rsa-sha2-384' | 'rsa-sha2-512'
| 'ecdsa-sha2-nistp256' | 'ecdsa-sha2-nistp384' | 'ecdsa-sha2-nistp521' </c></p></item>
</taglist>
- </section>
+ </section -->
+
+ <datatypes>
+ <datatype>
+ <name name="client_key_cb_options"/>
+ <desc>
+ <p>Options provided to <seealso marker="ssh#connect-3">ssh:connect/[3,4]</seealso>.
+ </p>
+ <p>The option list given in the
+ <seealso marker="ssh#type-key_cb_common_option"><c>key_cb</c></seealso>
+ option is available with the key <c>key_cb_private</c>.
+ </p>
+ </desc>
+ </datatype>
+ </datatypes>
<funcs>
<func>
- <name>Module:add_host_key(HostNames, Key, ConnectOptions) -> ok | {error, Reason}</name>
+ <name>Module:add_host_key(HostNames, PublicHostKey, ConnectOptions) -> ok | {error, Reason}</name>
<fsummary>Adds a host key to the set of trusted host keys.</fsummary>
<type>
- <v>HostNames = string()</v>
- <d>Description of the host that owns the <c>PublicKey</c>.</d>
+ <v>HostNames = string()</v>
+ <d>Description of the host that owns the <c>PublicHostKey</c>.</d>
- <v>Key = public_key()</v>
- <d>Normally an RSA, DSA or ECDSA public key, but handling of other public keys can be added.</d>
+ <v>PublicHostKey = <seealso marker="public_key:public_key#type-public_key">public_key:public_key()</seealso></v>
+ <d>Of ECDSA keys, only the Normally an RSA, DSA or ECDSA public key, but handling of other public keys can be added.</d>
- <v>ConnectOptions = proplists:proplist()</v>
- <d>Options provided to <seealso marker="ssh#connect-3">ssh:connect/[3,4]</seealso>. The option list given in
- the <c>key_cb</c> option is available with the key <c>key_cb_private</c>.</d>
- <v>Reason = term().</v>
+ <v>ConnectOptions = <seealso marker="#type-client_key_cb_options">client_key_cb_options()</seealso></v>
</type>
<desc>
<p>Adds a host key to the set of trusted host keys.</p>
@@ -95,18 +106,16 @@
<name>Module:is_host_key(Key, Host, Algorithm, ConnectOptions) -> Result</name>
<fsummary>Checks if a host key is trusted.</fsummary>
<type>
- <v>Key = public_key() </v>
+ <v>Key = <seealso marker="public_key:public_key#type-public_key">public_key:public_key()</seealso></v>
<d>Normally an RSA, DSA or ECDSA public key, but handling of other public keys can be added.</d>
<v>Host = string()</v>
<d>Description of the host.</d>
- <v>Algorithm = public_key_algorithm()</v>
+ <v>Algorithm = <seealso marker="ssh#type-pubkey_alg">ssh:pubkey_alg()</seealso></v>
<d>Host key algorithm.</d>
- <v>ConnectOptions = proplists:proplist() </v>
- <d>Options provided to <seealso marker="ssh#connect-3">ssh:connect/[3,4]</seealso>. The option list given in
- the <c>key_cb</c> option is available with the key <c>key_cb_private</c>.</d>
+ <v>ConnectOptions = <seealso marker="#type-client_key_cb_options">client_key_cb_options()</seealso></v>
<v>Result = boolean()</v>
</type>
@@ -120,14 +129,12 @@
{ok, PrivateKey} | {error, Reason}</name>
<fsummary>Fetches the users <em>public key</em> matching the <c>Algorithm</c>.</fsummary>
<type>
- <v>Algorithm = public_key_algorithm()</v>
+ <v>Algorithm = <seealso marker="ssh#type-pubkey_alg">ssh:pubkey_alg()</seealso></v>
<d>Host key algorithm.</d>
- <v>ConnectOptions = proplists:proplist()</v>
- <d>Options provided to <seealso marker="ssh#connect-3">ssh:connect/[3,4]</seealso>. The option list given in
- the <c>key_cb</c> option is available with the key <c>key_cb_private</c>.</d>
+ <v>ConnectOptions = <seealso marker="#type-client_key_cb_options">client_key_cb_options()</seealso></v>
- <v>PrivateKey = private_key()</v>
+ <v>PrivateKey = <seealso marker="public_key:public_key#type-private_key">public_key:private_key()</seealso></v>
<d>Private key of the user matching the <c>Algorithm</c>.</d>
<v>Reason = term()</v>
diff --git a/lib/ssh/doc/src/ssh_connection.xml b/lib/ssh/doc/src/ssh_connection.xml
index 72830de04d..821dfef93d 100644
--- a/lib/ssh/doc/src/ssh_connection.xml
+++ b/lib/ssh/doc/src/ssh_connection.xml
@@ -43,10 +43,10 @@
which are received as messages by the remote channel.
If the receiving channel is an Erlang process, the
messages have the format
- <c><![CDATA[{ssh_cm, ssh_connection_ref(), ssh_event_msg()}]]></c>.
- If the <seealso marker="ssh_channel">ssh_channel</seealso> behavior is used to
+ <c><![CDATA[{ssh_cm, connection_ref(), ssh_event_msg()}]]></c>.
+ If the <seealso marker="ssh_client_channel">ssh_client_channel</seealso> behavior is used to
implement the channel process, these messages are handled by
- <seealso marker="ssh_channel#Module:handle_ssh_msg-2">handle_ssh_msg/2</seealso>.</p>
+ <seealso marker="ssh_client_channel#Module:handle_ssh_msg-2">handle_ssh_msg/2</seealso>.</p>
</description>
<section>
@@ -63,10 +63,10 @@
<item><p>list of ASCII characters</p></item>
<tag><c>timeout() =</c></tag>
<item><p><c>infinity | integer()</c> in milliseconds</p></item>
- <tag><c>ssh_connection_ref() =</c></tag>
+ <tag><c>connection_ref() =</c></tag>
<item><p>opaque() -as returned by
<c>ssh:connect/3</c> or sent to an SSH channel processes</p></item>
- <tag><c>ssh_channel_id() =</c></tag>
+ <tag><c>channel_id() =</c></tag>
<item><p><c>integer()</c></p></item>
<tag><c>ssh_data_type_code() =</c></tag>
<item><p><c>1</c> ("stderr") | <c>0</c> ("normal") are
@@ -75,7 +75,7 @@
<tag><c>ssh_request_status() =</c></tag>
<item><p> <c>success | failure</c></p></item>
<tag><c>event() =</c></tag>
- <item><p><c>{ssh_cm, ssh_connection_ref(), ssh_event_msg()}</c></p></item>
+ <item><p><c>{ssh_cm, connection_ref(), ssh_event_msg()}</c></p></item>
<tag><c>ssh_event_msg() =</c></tag>
<item><p><c>data_events() | status_events() | terminal_events()</c></p></item>
<tag><c>reason() =</c></tag>
@@ -86,12 +86,12 @@
<tag><em>data_events()</em></tag>
<item>
<taglist>
- <tag><c><![CDATA[{data, ssh_channel_id(), ssh_data_type_code(), Data :: binary()}]]></c></tag>
+ <tag><c><![CDATA[{data, channel_id(), ssh_data_type_code(), Data :: binary()}]]></c></tag>
<item><p>Data has arrived on the channel. This event is sent as a
result of calling <seealso marker="ssh_connection#send-3">
ssh_connection:send/[3,4,5]</seealso>.</p></item>
- <tag><c><![CDATA[{eof, ssh_channel_id()}]]></c></tag>
+ <tag><c><![CDATA[{eof, channel_id()}]]></c></tag>
<item><p>Indicates that the other side sends no more data.
This event is sent as a result of calling <seealso
marker="ssh_connection#send_eof-2"> ssh_connection:send_eof/2</seealso>.
@@ -103,7 +103,7 @@
<item>
<taglist>
- <tag><c><![CDATA[{signal, ssh_channel_id(), ssh_signal()}]]></c></tag>
+ <tag><c><![CDATA[{signal, channel_id(), ssh_signal()}]]></c></tag>
<item><p>A signal can be delivered to the remote process/service
using the following message. Some systems do not support
signals, in which case they are to ignore this message. There is
@@ -111,7 +111,7 @@
referred to are on OS-level and not something generated by an
Erlang program.</p></item>
- <tag><c><![CDATA[{exit_signal, ssh_channel_id(), ExitSignal :: string(), ErrorMsg ::string(),
+ <tag><c><![CDATA[{exit_signal, channel_id(), ExitSignal :: string(), ErrorMsg ::string(),
LanguageString :: string()}]]></c></tag>
<item><p>A remote execution can terminate violently because of a signal.
@@ -119,7 +119,7 @@
values, see <url href="http://www.ietf.org/rfc/rfc4254.txt">RFC 4254</url>
Section 6.10, which shows a special case of these signals.</p></item>
- <tag><c><![CDATA[{exit_status, ssh_channel_id(), ExitStatus :: integer()}]]></c></tag>
+ <tag><c><![CDATA[{exit_status, channel_id(), ExitStatus :: integer()}]]></c></tag>
<item><p>When the command running at the other end terminates, the
following message can be sent to return the exit status of the
command. A zero <c>exit_status</c> usually means that the command
@@ -127,11 +127,11 @@
<seealso marker="ssh_connection#exit_status-3">
ssh_connection:exit_status/3</seealso>.</p></item>
- <tag><c><![CDATA[{closed, ssh_channel_id()}]]></c></tag>
+ <tag><c><![CDATA[{closed, channel_id()}]]></c></tag>
<item><p>This event is sent as a result of calling
<seealso marker="ssh_connection#close-2">ssh_connection:close/2</seealso>.
Both the handling of this event and sending it are taken care of by the
- <seealso marker="ssh_channel">ssh_channel</seealso> behavior.</p></item>
+ <seealso marker="ssh_client_channel">ssh_client_channel</seealso> behavior.</p></item>
</taglist>
</item>
@@ -149,14 +149,14 @@
with the boolean value of <c>WantReply</c> as the second argument.</p>
<taglist>
- <tag><c><![CDATA[{env, ssh_channel_id(), WantReply :: boolean(),
+ <tag><c><![CDATA[{env, channel_id(), WantReply :: boolean(),
Var ::string(), Value :: string()}]]></c></tag>
<item><p>Environment variables can be passed to the shell/command
to be started later. This event is sent as a result of calling <seealso
marker="ssh_connection#setenv-5"> ssh_connection:setenv/5</seealso>.
</p></item>
- <tag><c><![CDATA[{pty, ssh_channel_id(),
+ <tag><c><![CDATA[{pty, channel_id(),
WantReply :: boolean(), {Terminal :: string(), CharWidth :: integer(),
RowHeight :: integer(), PixelWidth :: integer(), PixelHeight :: integer(),
TerminalModes :: [{Opcode :: atom() | integer(),
@@ -181,13 +181,13 @@
<seealso marker="ssh_connection#shell-2"> ssh_connection:shell/2</seealso>.
</p></item>
- <tag><c><![CDATA[{window_change, ssh_channel_id(), CharWidth() :: integer(),
+ <tag><c><![CDATA[{window_change, channel_id(), CharWidth() :: integer(),
RowHeight :: integer(), PixWidth :: integer(), PixHeight :: integer()}]]></c></tag>
<item><p>When the window (terminal) size changes on the client
side, it <em>can</em> send a message to the server side to inform it of
the new dimensions. No API function generates this event.</p></item>
- <tag><c><![CDATA[{exec, ssh_channel_id(),
+ <tag><c><![CDATA[{exec, channel_id(),
WantReply :: boolean(), Cmd :: string()}]]></c></tag>
<item><p>This message requests that the server starts
execution of the given command. This event is sent as a result of calling <seealso
@@ -204,18 +204,18 @@
<name>adjust_window(ConnectionRef, ChannelId, NumOfBytes) -> ok</name>
<fsummary>Adjusts the SSH flow control window.</fsummary>
<type>
- <v>ConnectionRef = ssh_connection_ref()</v>
- <v>ChannelId = ssh_channel_id()</v>
+ <v>ConnectionRef = connection_ref()</v>
+ <v>ChannelId = channel_id()</v>
<v>NumOfBytes = integer()</v>
</type>
<desc>
<p>Adjusts the SSH flow control window. This is to be done by both the
client- and server-side channel processes.</p>
- <note><p>Channels implemented with the <seealso marker="ssh_channel"> ssh_channel</seealso>
+ <note><p>Channels implemented with the <seealso marker="ssh_client_channel"> ssh_client_channel</seealso>
behavior do not normally need to call this function as flow control
is handled by the behavior. The behavior adjusts the window every time
- the callback <seealso marker="ssh_channel#Module:handle_ssh_msg-2">
+ the callback <seealso marker="ssh_client_channel#Module:handle_ssh_msg-2">
handle_ssh_msg/2</seealso> returns after processing channel data.</p></note>
</desc>
</func>
@@ -224,17 +224,17 @@
<name>close(ConnectionRef, ChannelId) -> ok</name>
<fsummary>Sends a close message on the channel <c>ChannelId</c>.</fsummary>
<type>
- <v>ConnectionRef = ssh_connection_ref()</v>
- <v>ChannelId = ssh_channel_id()</v>
+ <v>ConnectionRef = connection_ref()</v>
+ <v>ChannelId = channel_id()</v>
</type>
<desc>
<p>A server- or client-channel process can choose to close their session by
sending a close event.
</p>
- <note><p>This function is called by the <c>ssh_channel</c>
+ <note><p>This function is called by the <c>ssh_client_channel</c>
behavior when the channel is terminated, see <seealso
- marker="ssh_channel"> ssh_channel(3)</seealso>. Thus, channels implemented
+ marker="ssh_client_channel"> ssh_client_channel(3)</seealso>. Thus, channels implemented
with the behavior are not to call this function explicitly.</p></note>
</desc>
</func>
@@ -244,8 +244,8 @@
{error, reason()}</name>
<fsummary>Requests that the server starts the execution of the given command.</fsummary>
<type>
- <v>ConnectionRef = ssh_connection_ref()</v>
- <v>ChannelId = ssh_channel_id()</v>
+ <v>ConnectionRef = connection_ref()</v>
+ <v>ChannelId = channel_id()</v>
<v>Command = string()</v>
<v>Timeout = timeout()</v>
</type>
@@ -256,28 +256,28 @@
request is a one-time execution that closes the channel when it is done.</p>
<taglist>
- <tag><c>N x {ssh_cm, ssh_connection_ref(),
- {data, ssh_channel_id(), ssh_data_type_code(), Data :: binary()}}</c></tag>
+ <tag><c>N x {ssh_cm, connection_ref(),
+ {data, channel_id(), ssh_data_type_code(), Data :: binary()}}</c></tag>
<item><p>The result of executing the command can be only one line
or thousands of lines depending on the command.</p></item>
- <tag><c>0 or 1 x {ssh_cm, ssh_connection_ref(), {eof, ssh_channel_id()}}</c></tag>
+ <tag><c>0 or 1 x {ssh_cm, connection_ref(), {eof, channel_id()}}</c></tag>
<item><p>Indicates that no more data is to be sent.</p></item>
<tag><c>0 or 1 x {ssh_cm,
- ssh_connection_ref(), {exit_signal,
- ssh_channel_id(), ExitSignal :: string(), ErrorMsg :: string(), LanguageString :: string()}}</c></tag>
+ connection_ref(), {exit_signal,
+ channel_id(), ExitSignal :: string(), ErrorMsg :: string(), LanguageString :: string()}}</c></tag>
<item><p>Not all systems send signals. For details on valid string
values, see RFC 4254, Section 6.10</p></item>
- <tag><c>0 or 1 x {ssh_cm, ssh_connection_ref(), {exit_status,
- ssh_channel_id(), ExitStatus :: integer()}}</c></tag>
+ <tag><c>0 or 1 x {ssh_cm, connection_ref(), {exit_status,
+ channel_id(), ExitStatus :: integer()}}</c></tag>
<item><p>It is recommended by the SSH Connection Protocol to send this
message, but that is not always the case.</p></item>
- <tag><c>1 x {ssh_cm, ssh_connection_ref(),
- {closed, ssh_channel_id()}}</c></tag>
- <item><p>Indicates that the <c>ssh_channel</c> started for the
+ <tag><c>1 x {ssh_cm, connection_ref(),
+ {closed, channel_id()}}</c></tag>
+ <item><p>Indicates that the <c>ssh_client_channel</c> started for the
execution of the command has now been shut down.</p></item>
</taglist>
</desc>
@@ -287,8 +287,8 @@
<name>exit_status(ConnectionRef, ChannelId, Status) -> ok</name>
<fsummary>Sends the exit status of a command to the client.</fsummary>
<type>
- <v>ConnectionRef = ssh_connection_ref() </v>
- <v>ChannelId = ssh_channel_id()</v>
+ <v>ConnectionRef = connection_ref() </v>
+ <v>ChannelId = channel_id()</v>
<v>Status = integer()</v>
</type>
<desc>
@@ -304,8 +304,8 @@
<fsummary>Sends an SSH Connection Protocol <c>pty_req</c>,
to allocate a pseudo-terminal.</fsummary>
<type>
- <v>ConnectionRef = ssh_connection_ref()</v>
- <v>ChannelId = ssh_channel_id()</v>
+ <v>ConnectionRef = connection_ref()</v>
+ <v>ChannelId = channel_id()</v>
<v>Options = proplists:proplist()</v>
</type>
<desc>
@@ -342,10 +342,10 @@
<name>reply_request(ConnectionRef, WantReply, Status, ChannelId) -> ok</name>
<fsummary>Sends status replies to requests that want such replies.</fsummary>
<type>
- <v>ConnectionRef = ssh_connection_ref()</v>
+ <v>ConnectionRef = connection_ref()</v>
<v>WantReply = boolean()</v>
<v>Status = ssh_request_status()</v>
- <v>ChannelId = ssh_channel_id()</v>
+ <v>ChannelId = channel_id()</v>
</type>
<desc>
<p>Sends status replies to requests where the requester has
@@ -364,8 +364,8 @@
ok | {error, timeout} | {error, closed}</name>
<fsummary>Sends channel data.</fsummary>
<type>
- <v>ConnectionRef = ssh_connection_ref()</v>
- <v>ChannelId = ssh_channel_id()</v>
+ <v>ConnectionRef = connection_ref()</v>
+ <v>ChannelId = channel_id()</v>
<v>Data = binary()</v>
<v>Type = ssh_data_type_code()</v>
<v>Timeout = timeout()</v>
@@ -383,8 +383,8 @@
<name>send_eof(ConnectionRef, ChannelId) -> ok | {error, closed}</name>
<fsummary>Sends EOF on channel <c>ChannelId</c>.</fsummary>
<type>
- <v>ConnectionRef = ssh_connection_ref()</v>
- <v>ChannelId = ssh_channel_id()</v>
+ <v>ConnectionRef = connection_ref()</v>
+ <v>ChannelId = channel_id()</v>
</type>
<desc>
<p>Sends EOF on channel <c>ChannelId</c>.</p>
@@ -394,10 +394,10 @@
<func>
<name>session_channel(ConnectionRef, Timeout) -></name>
<name>session_channel(ConnectionRef, InitialWindowSize,
- MaxPacketSize, Timeout) -> {ok, ssh_channel_id()} | {error, reason()}</name>
+ MaxPacketSize, Timeout) -> {ok, channel_id()} | {error, reason()}</name>
<fsummary>Opens a channel for an SSH session.</fsummary>
<type>
- <v>ConnectionRef = ssh_connection_ref()</v>
+ <v>ConnectionRef = connection_ref()</v>
<v>InitialWindowSize = integer()</v>
<v>MaxPacketSize = integer()</v>
<v>Timeout = timeout()</v>
@@ -415,8 +415,8 @@
<fsummary>Environment variables can be passed to the
shell/command to be started later.</fsummary>
<type>
- <v>ConnectionRef = ssh_connection_ref()</v>
- <v>ChannelId = ssh_channel_id()</v>
+ <v>ConnectionRef = connection_ref()</v>
+ <v>ChannelId = channel_id()</v>
<v>Var = string()</v>
<v>Value = string()</v>
<v>Timeout = timeout()</v>
@@ -433,8 +433,8 @@
<fsummary>Requests that the user default shell (typically defined in
/etc/passwd in Unix systems) is to be executed at the server end.</fsummary>
<type>
- <v>ConnectionRef = ssh_connection_ref()</v>
- <v>ChannelId = ssh_channel_id()</v>
+ <v>ConnectionRef = connection_ref()</v>
+ <v>ChannelId = channel_id()</v>
</type>
<desc>
<p>Is to be called by a client channel process to request that the user default
@@ -452,8 +452,8 @@
{error, reason()}</name>
<fsummary>Requests to execute a predefined subsystem on the server.</fsummary>
<type>
- <v>ConnectionRef = ssh_connection_ref()</v>
- <v>ChannelId = ssh_channel_id()</v>
+ <v>ConnectionRef = connection_ref()</v>
+ <v>ChannelId = channel_id()</v>
<v>Subsystem = string()</v>
<v>Timeout = timeout()</v>
</type>
diff --git a/lib/ssh/doc/src/ssh_protocol.xml b/lib/ssh/doc/src/ssh_protocol.xml
index a0032ab449..53f0524b97 100644
--- a/lib/ssh/doc/src/ssh_protocol.xml
+++ b/lib/ssh/doc/src/ssh_protocol.xml
@@ -87,8 +87,10 @@
connection, and all channels are flow-controlled. Typically an
SSH client will open a channel, send data/commands, receive
data/"control information" and when it is done close the
- channel. The <seealso
- marker="ssh_channel">ssh_channel</seealso> behaviour makes it easy to
+ channel. The
+ <seealso marker="ssh_client_channel">ssh_client_channel</seealso> /
+ <seealso marker="ssh_server_channel">ssh_server_channel</seealso> (Replaces ssh_daemon_channel)
+ behaviours makes it easy to
write your own SSH client/server processes that use flow
control. It handles generic parts of SSH channel management and
lets you focus on the application logic.
diff --git a/lib/ssh/doc/src/ssh_server_channel.xml b/lib/ssh/doc/src/ssh_server_channel.xml
new file mode 100644
index 0000000000..af51ec470b
--- /dev/null
+++ b/lib/ssh/doc/src/ssh_server_channel.xml
@@ -0,0 +1,176 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<!DOCTYPE erlref SYSTEM "erlref.dtd">
+
+<erlref>
+ <header>
+ <copyright>
+ <year>2009</year>
+ <year>2016</year>
+ <holder>Ericsson AB, All Rights Reserved</holder>
+ </copyright>
+ <legalnotice>
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
+
+ The Initial Developer of the Original Code is Ericsson AB.
+ </legalnotice>
+ <title>ssh_server_channel</title>
+ <prepared></prepared>
+ <docno></docno>
+ <date></date>
+ <rev></rev>
+ </header>
+ <module>ssh_server_channel</module>
+ <modulesummary>-behaviour(ssh_server_channel). (Replaces ssh_daemon_channel)
+ </modulesummary>
+ <description>
+ <note>
+ <p>This module replaces ssh_daemon_channel.</p>
+ <p>The old module is still available for compatibility, but should not be used for new programs.
+ The old module will not be maintained except for some error corrections
+ </p>
+ </note>
+
+ <p>SSH services (clients and servers) are implemented as channels
+ that are multiplexed over an SSH connection and communicates over
+ the <url href="http://www.ietf.org/rfc/rfc4254.txt"> SSH
+ Connection Protocol</url>. This module provides a callback API
+ that takes care of generic channel aspects for daemons, such as flow control
+ and close messages. It lets the callback functions take care of
+ the service (application) specific parts. This behavior also ensures
+ that the channel process honors the principal of an OTP-process so
+ that it can be part of a supervisor tree. This is a requirement of
+ channel processes implementing a subsystem that will be added to
+ the <c>ssh</c> applications supervisor tree.
+ </p>
+
+ <note><p>When implementing a client subsystem handler, use
+ <seealso marker="ssh_client_channel">-behaviour(ssh_client_channel)</seealso> instead.
+ </p>
+ </note>
+
+ </description>
+
+ <section>
+ <title>Callback Functions</title>
+ <p>
+ The following functions are to be exported from a
+ <c>ssh_server_channel</c> callback module.
+ </p>
+ </section>
+
+ <funcs>
+ <func>
+ <name>Module:init(Args) -> {ok, State} | {ok, State, timeout()} |
+ {stop, Reason}</name>
+ <fsummary>Makes necessary initializations and returns the
+ initial channel state if the initializations succeed.</fsummary>
+ <type>
+ <v>Args = term()</v>
+ <d>Last argument to <c>start_link/4</c>.</d>
+ <v>State = term()</v>
+ <v>Reason = term()</v>
+ </type>
+ <desc>
+ <p>Makes necessary initializations and returns the initial channel
+ state if the initializations succeed.
+ </p>
+ <p>The time-out values that can be returned
+ have the same semantics as in a <seealso marker="stdlib:gen_server">gen_server</seealso>.
+ If the time-out occurs, <seealso marker="#Module:handle_msg-2">handle_msg/2</seealso>
+ is called as <c>handle_msg(timeout, State)</c>.
+ </p>
+ </desc>
+ </func>
+
+ <func>
+ <name>Module:handle_msg(Msg, State) -> {ok, State} |
+ {stop, ChannelId, State}</name>
+
+ <fsummary>Handles other messages than SSH connection protocol,
+ call, or cast messages sent to the channel.</fsummary>
+ <type>
+ <v>Msg = timeout | term()</v>
+ <v>ChannelId = <seealso marker="ssh#type-channel_id">ssh:channel_id()</seealso></v>
+ <v>State = term() </v>
+ </type>
+ <desc>
+ <p>Handles other messages than SSH Connection Protocol, call, or
+ cast messages sent to the channel.
+ </p>
+
+ <p>Possible Erlang 'EXIT' messages is to be handled by this
+ function and all channels are to handle the following message.</p>
+
+ <taglist>
+ <tag><c>{ssh_channel_up, ssh:channel_id(), ssh:connection_ref()}</c></tag>
+ <item><p>This is the first message that the channel receives.
+ This is especially useful if the
+ server wants to send a message to the client without first
+ receiving a message from it. If the message is not
+ useful for your particular scenario, ignore it by
+ immediately returning <c>{ok, State}</c>.
+ </p></item>
+ </taglist>
+ </desc>
+ </func>
+
+ <func>
+ <name>Module:handle_ssh_msg(Msg, State) -> {ok, State} | {stop,
+ ChannelId, State}</name>
+ <fsummary>Handles <c>ssh</c> connection protocol messages.</fsummary>
+ <type>
+ <v>Msg = ssh_connection:event()</v>
+ <v>ChannelId = <seealso marker="ssh#type-channel_id">ssh:channel_id()</seealso></v>
+ <v>State = term()</v>
+ </type>
+ <desc>
+ <p>Handles SSH Connection Protocol messages that may need
+ service-specific attention. For details,
+ see <seealso marker="ssh_connection"> ssh_connection:event()</seealso>.
+ </p>
+
+ <p>The following message is taken care of by the
+ <c>ssh_server_channel</c> behavior.</p>
+
+ <taglist>
+ <tag><c>{closed, ssh:channel_id()}</c></tag>
+ <item><p>The channel behavior sends a close message to the
+ other side, if such a message has not already been sent.
+ Then it terminates the channel with reason <c>normal</c>.</p></item>
+ </taglist>
+ </desc>
+ </func>
+
+ <func>
+ <name>Module:terminate(Reason, State) -> _</name>
+ <fsummary>Does cleaning up before channel process termination.
+ </fsummary>
+ <type>
+ <v>Reason = term()</v>
+ <v>State = term()</v>
+ </type>
+ <desc>
+ <p>This function is called by a channel process when it is
+ about to terminate. Before this function is called, <seealso
+ marker="ssh_connection#close-2"> ssh_connection:close/2
+ </seealso> is called, if it has not been called earlier.
+ This function does any necessary cleaning
+ up. When it returns, the channel process terminates with
+ reason <c>Reason</c>. The return value is ignored.
+ </p>
+ </desc>
+ </func>
+
+ </funcs>
+
+</erlref>
diff --git a/lib/ssh/doc/src/ssh_server_key_api.xml b/lib/ssh/doc/src/ssh_server_key_api.xml
index c6808b95d1..cf3b1d319f 100644
--- a/lib/ssh/doc/src/ssh_server_key_api.xml
+++ b/lib/ssh/doc/src/ssh_server_key_api.xml
@@ -41,7 +41,7 @@
see the <seealso marker="SSH_app"> ssh(6)</seealso> application manual.</p>
</description>
- <section>
+ <!-- section>
<title>DATA TYPES</title>
<p>Type definitions that are used more than once in this module,
@@ -69,22 +69,40 @@
| 'rsa-sha2-256' | 'rsa-sha2-384' | 'rsa-sha2-512'
| 'ecdsa-sha2-nistp256' | 'ecdsa-sha2-nistp384' | 'ecdsa-sha2-nistp521' </c></p></item>
</taglist>
- </section>
+ </section -->
+ <datatypes>
+ <datatype>
+ <name name="daemon_key_cb_options"/>
+ <desc>
+ <p>Options provided to <seealso marker="ssh#daemon-2">ssh:daemon/2,3</seealso>.
+ </p>
+ <p>The option list given in the
+ <seealso marker="ssh#type-key_cb_common_option"><c>key_cb</c></seealso>
+ option is available with the key <c>key_cb_private</c>.
+ </p>
+ </desc>
+ </datatype>
+ </datatypes>
+
<funcs>
<func>
<name>Module:host_key(Algorithm, DaemonOptions) ->
{ok, Key} | {error, Reason}</name>
<fsummary>Fetches the host’s private key.</fsummary>
<type>
- <v>Algorithm = public_key_algorithm()</v>
+ <v>Algorithm = <seealso marker="ssh#type-pubkey_alg">ssh:pubkey_alg()</seealso></v>
<d>Host key algorithm.</d>
- <v>DaemonOptions = proplists:proplist()</v>
- <d>Options provided to <seealso marker="ssh#daemon-2">ssh:daemon/[2,3]</seealso>. The option list given in
- the <c>key_cb</c> option is available with the key <c>key_cb_private</c>.</d>
- <v>Key = private_key() | crypto:engine_key_ref()</v>
+
+ <v>DaemonOptions = <seealso marker="#type-daemon_key_cb_options">daemon_key_cb_options()</seealso></v>
+
+ <v>PrivateKey = <seealso marker="public_key:public_key#type-private_key">public_key:private_key()</seealso>
+ | <seealso marker="crypto:crypto#type-engine_key_ref">crypto:engine_key_ref()</seealso>
+ </v>
+
<d>Private key of the host matching the <c>Algorithm</c>.
It may be a reference to a 'ssh-rsa', rsa-sha2-* or 'ssh-dss' (NOT ecdsa) key stored in a loaded Engine.</d>
+
<v>Reason = term()</v>
</type>
<desc>
@@ -93,16 +111,17 @@
</func>
<func>
- <name>Module:is_auth_key(Key, User, DaemonOptions) -> Result</name>
+ <name>Module:is_auth_key(PublicUserKey, User, DaemonOptions) -> Result</name>
<fsummary>Checks if the user key is authorized.</fsummary>
<type>
- <v>Key = public_key()</v>
+ <v>PublicUserKey = <seealso marker="public_key:public_key#type-public_key">public_key:public_key()</seealso></v>
<d>Normally an RSA, DSA or ECDSA public key, but handling of other public keys can be added</d>
+
<v>User = string()</v>
<d>User owning the public key.</d>
- <v>DaemonOptions = proplists:proplist()</v>
- <d>Options provided to <seealso marker="ssh#daemon-2">ssh:daemon/[2,3]</seealso>. The option list given in
- the <c>key_cb</c> option is available with the key <c>key_cb_private</c>.</d>
+
+ <v>DaemonOptions = <seealso marker="#type-daemon_key_cb_options">daemon_key_cb_options()</seealso></v>
+
<v>Result = boolean()</v>
</type>
<desc>
diff --git a/lib/ssh/doc/src/ssh_sftp.xml b/lib/ssh/doc/src/ssh_sftp.xml
index 129426a6d5..60f643d052 100644
--- a/lib/ssh/doc/src/ssh_sftp.xml
+++ b/lib/ssh/doc/src/ssh_sftp.xml
@@ -59,7 +59,7 @@
</p>
</item>
- <tag><c>ssh_connection_ref() =</c></tag>
+ <tag><c>connection_ref() =</c></tag>
<item><p><c>opaque()</c> - as returned by
<seealso marker="ssh#connect-3"><c>ssh:connect/3</c></seealso></p></item>
@@ -546,7 +546,7 @@
<fsummary>Starts an SFTP client.</fsummary>
<type>
<v>Host = string()</v>
- <v>ConnectionRef = ssh_connection_ref()</v>
+ <v>ConnectionRef = connection_ref()</v>
<v>Port = integer()</v>
<v>TcpSocket = port()</v>
<d>The socket is supposed to be from <seealso marker="kernel:gen_tcp#connect-3">gen_tcp:connect</seealso> or <seealso marker="kernel:gen_tcp#accept-1">gen_tcp:accept</seealso> with option <c>{active,false}</c></d>
diff --git a/lib/ssh/doc/src/ssh_sftpd.xml b/lib/ssh/doc/src/ssh_sftpd.xml
index 1be29b3b29..a25ce123b3 100644
--- a/lib/ssh/doc/src/ssh_sftpd.xml
+++ b/lib/ssh/doc/src/ssh_sftpd.xml
@@ -44,8 +44,7 @@
<item><p><c>"sftp"</c></p></item>
<tag><c>channel_callback() =</c></tag>
<item><p><c>atom()</c> - Name of the Erlang module implementing the subsystem using the
- <c>ssh_channel</c> behavior, see the
- <seealso marker="ssh_channel">ssh_channel(3)</seealso> manual page.</p></item>
+ <seealso marker="ssh_server_channel">ssh_server_channel</seealso> (replaces ssh_daemon_channel) behaviour.</p></item>
<tag><c>channel_init_args() =</c></tag>
<item><p><c>list()</c> - The one given as argument to function <c>subsystem_spec/1</c>.</p></item>
</taglist>
diff --git a/lib/ssh/doc/src/using_ssh.xml b/lib/ssh/doc/src/using_ssh.xml
index ab307624e6..efd2a997f5 100644
--- a/lib/ssh/doc/src/using_ssh.xml
+++ b/lib/ssh/doc/src/using_ssh.xml
@@ -298,6 +298,7 @@ ok = erl_tar:close(HandleRead),
</section>
<section>
+ <marker id="usersguide_creating_a_subsystem"/>
<title>Creating a Subsystem</title>
<p>A small <c>ssh</c> subsystem that echoes N bytes can be implemented as shown
@@ -305,7 +306,7 @@ ok = erl_tar:close(HandleRead),
<code type="erl" >
-module(ssh_echo_server).
--behaviour(ssh_daemon_channel).
+-behaviour(ssh_server_channel). % replaces ssh_daemon_channel
-record(state, {
n,
id,
@@ -383,7 +384,7 @@ terminate(_Reason, _State) ->
{ssh_msg, &lt;0.57.0>, {closed, 0}}
7> {error, closed} = ssh_connection:send(ConnectionRef, ChannelId, "10", infinity).
</code>
-<p>See also <seealso marker="ssh_channel"> ssh_channel(3)</seealso>.</p>
+<p>See also <seealso marker="ssh_client_channel">ssh_client_channel(3)</seealso> (replaces ssh_channel(3)).</p>
</section>
diff --git a/lib/ssh/src/Makefile b/lib/ssh/src/Makefile
index bcd13213b3..5e4efb6b99 100644
--- a/lib/ssh/src/Makefile
+++ b/lib/ssh/src/Makefile
@@ -40,42 +40,44 @@ RELSYSDIR = $(RELEASE_PATH)/lib/ssh-$(VSN)
# Behaviour (api) modules are first so they are compiled when
# the compiler reaches a callback module using them.
BEHAVIOUR_MODULES= \
- ssh_sftpd_file_api \
- ssh_channel \
- ssh_daemon_channel \
ssh_client_key_api \
- ssh_server_key_api
+ ssh_daemon_channel \
+ ssh_server_channel \
+ ssh_server_key_api \
+ ssh_sftpd_file_api \
+ ssh_channel \
+ ssh_client_channel
MODULES= \
ssh \
- ssh_sup \
- sshc_sup \
- sshd_sup \
- ssh_options \
- ssh_connection_sup \
- ssh_connection \
- ssh_connection_handler \
- ssh_dbg \
- ssh_shell \
- ssh_system_sup \
- ssh_subsystem_sup \
- ssh_channel_sup \
- ssh_acceptor_sup \
ssh_acceptor \
+ ssh_acceptor_sup \
ssh_app \
ssh_auth\
ssh_bits \
ssh_cli \
+ ssh_connection \
+ ssh_connection_handler \
+ ssh_connection_sup \
+ ssh_dbg \
ssh_file \
- ssh_io \
ssh_info \
+ ssh_io \
ssh_message \
ssh_no_io \
+ ssh_options \
+ ssh_server_channel_sup \
ssh_sftp \
ssh_sftpd \
ssh_sftpd_file\
+ ssh_shell \
+ ssh_subsystem_sup \
+ ssh_sup \
+ ssh_system_sup \
ssh_transport \
- ssh_xfer
+ ssh_xfer \
+ sshc_sup \
+ sshd_sup
HRL_FILES =
@@ -169,7 +171,7 @@ $(EBIN)/ssh_connection_handler.$(EMULATOR): ssh_connection_handler.erl ssh.hrl \
$(EBIN)/ssh_shell.$(EMULATOR): ssh_shell.erl ssh_connect.hrl
$(EBIN)/ssh_system_sup.$(EMULATOR): ssh_system_sup.erl ssh.hrl
$(EBIN)/ssh_subsystem_sup.$(EMULATOR): ssh_subsystem_sup.erl
-$(EBIN)/ssh_channel_sup.$(EMULATOR): ssh_channel_sup.erl
+$(EBIN)/ssh_server_channel_sup.$(EMULATOR): ssh_server_channel_sup.erl
$(EBIN)/ssh_acceptor_sup.$(EMULATOR): ssh_acceptor_sup.erl ssh.hrl
$(EBIN)/ssh_acceptor.$(EMULATOR): ssh_acceptor.erl ssh.hrl
$(EBIN)/ssh_app.$(EMULATOR): ssh_app.erl
@@ -208,8 +210,10 @@ $(EBIN)/ssh_transport.$(EMULATOR): ssh_transport.erl \
ssh_transport.hrl ssh.hrl
$(EBIN)/ssh_xfer.$(EMULATOR): ssh_xfer.erl ssh.hrl ssh_xfer.hrl
$(EBIN)/ssh_sftpd_file_api.$(EMULATOR): ssh_sftpd_file_api.erl
+$(EBIN)/ssh_client_channel.$(EMULATOR): ssh_client_channel.erl ssh_connect.hrl
$(EBIN)/ssh_channel.$(EMULATOR): ssh_channel.erl ssh_connect.hrl
$(EBIN)/ssh_daemon_channel.$(EMULATOR): ssh_daemon_channel.erl
+$(EBIN)/ssh_server_channel.$(EMULATOR): ssh_server_channel.erl
$(EBIN)/ssh_client_key_api.$(EMULATOR): ssh_client_key_api.erl \
../../public_key/include/public_key.hrl \
../../public_key/include/OTP-PUB-KEY.hrl \
diff --git a/lib/ssh/src/ssh.app.src b/lib/ssh/src/ssh.app.src
index 4a22322333..410061cded 100644
--- a/lib/ssh/src/ssh.app.src
+++ b/lib/ssh/src/ssh.app.src
@@ -12,9 +12,9 @@
ssh_message,
ssh_bits,
ssh_cli,
+ ssh_client_channel,
ssh_client_key_api,
ssh_channel,
- ssh_channel_sup,
ssh_connection,
ssh_connection_handler,
ssh_connection_sup,
@@ -27,6 +27,8 @@
ssh_io,
ssh_info,
ssh_no_io,
+ ssh_server_channel,
+ ssh_server_channel_sup,
ssh_server_key_api,
ssh_sftp,
ssh_sftpd,
diff --git a/lib/ssh/src/ssh.erl b/lib/ssh/src/ssh.erl
index 25d537c624..7ddb1ca5be 100644
--- a/lib/ssh/src/ssh.erl
+++ b/lib/ssh/src/ssh.erl
@@ -41,35 +41,51 @@
shell/1, shell/2, shell/3
]).
+%%% "Deprecated" types export:
+-export_type([ssh_daemon_ref/0, ssh_connection_ref/0, ssh_channel_id/0]).
+-opaque ssh_daemon_ref() :: daemon_ref().
+-opaque ssh_connection_ref() :: connection_ref().
+-opaque ssh_channel_id() :: channel_id().
+
+
%%% Type exports
--export_type([ssh_daemon_ref/0,
- ssh_connection_ref/0,
- ssh_channel_id/0,
+-export_type([daemon_ref/0,
+ connection_ref/0,
+ channel_id/0,
+ client_options/0, client_option/0,
+ daemon_options/0, daemon_option/0,
+ common_options/0,
role/0,
subsystem_spec/0,
- subsystem_name/0,
- channel_callback/0,
- channel_init_args/0,
algs_list/0,
+ double_algs/1,
+ modify_algs_list/0,
alg_entry/0,
- simple_algs/0,
- double_algs/0
+ kex_alg/0,
+ pubkey_alg/0,
+ cipher_alg/0,
+ mac_alg/0,
+ compression_alg/0,
+ ip_port/0
]).
--opaque ssh_daemon_ref() :: daemon_ref() .
--opaque ssh_connection_ref() :: connection_ref() .
--opaque ssh_channel_id() :: channel_id().
+
+-opaque daemon_ref() :: pid() .
+-opaque channel_id() :: non_neg_integer().
+-type connection_ref() :: pid(). % should be -opaque, but that gives problems
%%--------------------------------------------------------------------
--spec start() -> ok | {error, term()}.
--spec start(permanent | transient | temporary) -> ok | {error, term()}.
-%%
%% Description: Starts the ssh application. Default type
%% is temporary. see application(3)
%%--------------------------------------------------------------------
+-spec start() -> ok | {error, term()}.
+
start() ->
start(temporary).
+-spec start(Type) -> ok | {error, term()} when
+ Type :: permanent | transient | temporary .
+
start(Type) ->
case application:ensure_all_started(ssh, Type) of
{ok, _} ->
@@ -79,30 +95,32 @@ start(Type) ->
end.
%%--------------------------------------------------------------------
--spec stop() -> ok | {error, term()}.
-%%
%% Description: Stops the ssh application.
%%--------------------------------------------------------------------
+-spec stop() -> ok | {error, term()}.
+
stop() ->
application:stop(ssh).
%%--------------------------------------------------------------------
--spec connect(inet:socket(), proplists:proplist()) -> ok_error(connection_ref()).
+%% Description: Starts an ssh connection.
+%%--------------------------------------------------------------------
+-spec connect(OpenTcpSocket, Options) -> {ok,connection_ref()} | {error,term()} when
+ OpenTcpSocket :: open_socket(),
+ Options :: client_options().
--spec connect(inet:socket(), proplists:proplist(), timeout()) -> ok_error(connection_ref())
- ; (string(), inet:port_number(), proplists:proplist()) -> ok_error(connection_ref()).
+connect(OpenTcpSocket, Options) when is_port(OpenTcpSocket),
+ is_list(Options) ->
+ connect(OpenTcpSocket, Options, infinity).
--spec connect(string(), inet:port_number(), proplists:proplist(), timeout()) -> ok_error(connection_ref()).
-%%
-%% Description: Starts an ssh connection.
-%%--------------------------------------------------------------------
-connect(Socket, UserOptions) when is_port(Socket),
- is_list(UserOptions) ->
- connect(Socket, UserOptions, infinity).
+-spec connect(open_socket(), client_options(), timeout()) ->
+ {ok,connection_ref()} | {error,term()}
+ ; (host(), inet:port_number(), client_options()) ->
+ {ok,connection_ref()} | {error,term()}.
-connect(Socket, UserOptions, Timeout) when is_port(Socket),
- is_list(UserOptions) ->
+connect(Socket, UserOptions, NegotiationTimeout) when is_port(Socket),
+ is_list(UserOptions) ->
case ssh_options:handle_options(client, UserOptions) of
{error, Error} ->
{error, Error};
@@ -111,16 +129,23 @@ connect(Socket, UserOptions, Timeout) when is_port(Socket),
ok ->
{ok, {Host,_Port}} = inet:sockname(Socket),
Opts = ?PUT_INTERNAL_OPT([{user_pid,self()}, {host,Host}], Options),
- ssh_connection_handler:start_connection(client, Socket, Opts, Timeout);
+ ssh_connection_handler:start_connection(client, Socket, Opts, NegotiationTimeout);
{error,SockError} ->
{error,SockError}
end
end;
-connect(Host, Port, UserOptions) when is_integer(Port),
- Port>0,
- is_list(UserOptions) ->
- connect(Host, Port, UserOptions, infinity).
+connect(Host, Port, Options) when is_integer(Port),
+ Port>0,
+ is_list(Options) ->
+ connect(Host, Port, Options, infinity).
+
+
+-spec connect(Host, Port, Options, NegotiationTimeout) -> {ok,connection_ref()} | {error,term()} when
+ Host :: host(),
+ Port :: inet:port_number(),
+ Options :: client_options(),
+ NegotiationTimeout :: timeout().
connect(Host0, Port, UserOptions, Timeout) when is_integer(Port),
Port>0,
@@ -148,7 +173,8 @@ connect(Host0, Port, UserOptions, Timeout) when is_integer(Port),
end.
%%--------------------------------------------------------------------
--spec close(pid()) -> ok.
+-spec close(ConnectionRef) -> ok | {error,term()} when
+ ConnectionRef :: connection_ref() .
%%
%% Description: Closes an ssh connection.
%%--------------------------------------------------------------------
@@ -156,15 +182,25 @@ close(ConnectionRef) ->
ssh_connection_handler:stop(ConnectionRef).
%%--------------------------------------------------------------------
--spec connection_info(pid(), [atom()]) -> [{atom(), term()}].
-%%
%% Description: Retrieves information about a connection.
%%--------------------------------------------------------------------
-connection_info(ConnectionRef, Options) ->
- ssh_connection_handler:connection_info(ConnectionRef, Options).
+-spec connection_info(ConnectionRef, Keys) -> ConnectionInfo when
+ ConnectionRef :: connection_ref(),
+ Keys :: [client_version | server_version | user | peer | sockname],
+ ConnectionInfo :: [{client_version, Version}
+ | {server_version, Version}
+ | {user,string()}
+ | {peer, {inet:hostname(), ip_port()}}
+ | {sockname, ip_port()}
+ ],
+ Version :: {ProtocolVersion, VersionString::string()},
+ ProtocolVersion :: {Major::pos_integer(), Minor::non_neg_integer()} .
+
+connection_info(Connection, Options) ->
+ ssh_connection_handler:connection_info(Connection, Options).
%%--------------------------------------------------------------------
--spec channel_info(pid(), channel_id(), [atom()]) -> [{atom(), term()}].
+-spec channel_info(connection_ref(), channel_id(), [atom()]) -> proplists:proplist().
%%
%% Description: Retrieves information about a connection.
%%--------------------------------------------------------------------
@@ -172,18 +208,17 @@ channel_info(ConnectionRef, ChannelId, Options) ->
ssh_connection_handler:channel_info(ConnectionRef, ChannelId, Options).
%%--------------------------------------------------------------------
--spec daemon(inet:port_number()) -> ok_error(daemon_ref()).
--spec daemon(inet:port_number()|inet:socket(), proplists:proplist()) -> ok_error(daemon_ref()).
--spec daemon(any | inet:ip_address(), inet:port_number(), proplists:proplist()) -> ok_error(daemon_ref())
- ;(socket, inet:socket(), proplists:proplist()) -> ok_error(daemon_ref())
- .
-
%% Description: Starts a server listening for SSH connections
%% on the given port.
%%--------------------------------------------------------------------
+-spec daemon(inet:port_number()) -> {ok,daemon_ref()} | {error,term()}.
+
daemon(Port) ->
daemon(Port, []).
+
+-spec daemon(inet:port_number()|open_socket(), daemon_options()) -> {ok,daemon_ref()} | {error,term()}.
+
daemon(Socket, UserOptions) when is_port(Socket) ->
try
#{} = Options = ssh_options:handle_options(server, UserOptions),
@@ -226,6 +261,10 @@ daemon(Port, UserOptions) when 0 =< Port, Port =< 65535 ->
daemon(any, Port, UserOptions).
+-spec daemon(any | inet:ip_address(), inet:port_number(), daemon_options()) -> {ok,daemon_ref()} | {error,term()}
+ ;(socket, open_socket(), daemon_options()) -> {ok,daemon_ref()} | {error,term()}
+ .
+
daemon(Host0, Port0, UserOptions0) when 0 =< Port0, Port0 =< 65535,
Host0 == any ; Host0 == loopback ; is_tuple(Host0) ->
try
@@ -267,7 +306,12 @@ daemon(_, _, _) ->
{error, badarg}.
%%--------------------------------------------------------------------
--spec daemon_info(daemon_ref()) -> ok_error( [{atom(), term()}] ).
+-spec daemon_info(Daemon) -> {ok, DaemonInfo} | {error,term()} when
+ Daemon :: daemon_ref(),
+ DaemonInfo :: [ {ip, inet:ip_address()}
+ | {port, inet:port_number()}
+ | {profile, term()}
+ ].
daemon_info(Pid) ->
case catch ssh_system_sup:acceptor_supervisor(Pid) of
@@ -290,16 +334,23 @@ daemon_info(Pid) ->
end.
%%--------------------------------------------------------------------
--spec stop_listener(daemon_ref()) -> ok.
--spec stop_listener(inet:ip_address(), inet:port_number()) -> ok.
-%%
%% Description: Stops the listener, but leaves
%% existing connections started by the listener up and running.
%%--------------------------------------------------------------------
+-spec stop_listener(daemon_ref()) -> ok.
+
stop_listener(SysSup) ->
ssh_system_sup:stop_listener(SysSup).
+
+
+-spec stop_listener(inet:ip_address(), inet:port_number()) -> ok.
+
stop_listener(Address, Port) ->
stop_listener(Address, Port, ?DEFAULT_PROFILE).
+
+
+-spec stop_listener(any|inet:ip_address(), inet:port_number(), term()) -> ok.
+
stop_listener(any, Port, Profile) ->
map_ip(fun(IP) ->
ssh_system_sup:stop_listener(IP, Port, Profile)
@@ -310,17 +361,23 @@ stop_listener(Address, Port, Profile) ->
end, {address,Address}).
%%--------------------------------------------------------------------
--spec stop_daemon(daemon_ref()) -> ok.
--spec stop_daemon(inet:ip_address(), inet:port_number()) -> ok.
--spec stop_daemon(inet:ip_address(), inet:port_number(), atom()) -> ok.
-%%
%% Description: Stops the listener and all connections started by
%% the listener.
%%--------------------------------------------------------------------
+-spec stop_daemon(DaemonRef::daemon_ref()) -> ok.
+
stop_daemon(SysSup) ->
ssh_system_sup:stop_system(SysSup).
+
+
+-spec stop_daemon(inet:ip_address(), inet:port_number()) -> ok.
+
stop_daemon(Address, Port) ->
stop_daemon(Address, Port, ?DEFAULT_PROFILE).
+
+
+-spec stop_daemon(any|inet:ip_address(), inet:port_number(), atom()) -> ok.
+
stop_daemon(any, Port, Profile) ->
map_ip(fun(IP) ->
ssh_system_sup:stop_system(IP, Port, Profile)
@@ -331,33 +388,37 @@ stop_daemon(Address, Port, Profile) ->
end, {address,Address}).
%%--------------------------------------------------------------------
--spec shell(inet:socket() | string()) -> _.
--spec shell(inet:socket() | string(), proplists:proplist()) -> _.
--spec shell(string(), inet:port_number(), proplists:proplist()) -> _.
-
-%% Host = string()
-%% Port = integer()
-%% Options = [{Option, Value}]
-%%
%% Description: Starts an interactive shell to an SSH server on the
%% given <Host>. The function waits for user input,
%% and will not return until the remote shell is ended.(e.g. on
%% exit from the shell)
%%--------------------------------------------------------------------
+-spec shell(open_socket() | host()) -> _.
+
shell(Socket) when is_port(Socket) ->
shell(Socket, []);
shell(Host) ->
shell(Host, ?SSH_DEFAULT_PORT, []).
+
+-spec shell(open_socket() | host(), client_options()) -> _.
+
shell(Socket, Options) when is_port(Socket) ->
start_shell( connect(Socket, Options) );
shell(Host, Options) ->
shell(Host, ?SSH_DEFAULT_PORT, Options).
+
+-spec shell(Host, Port, Options) -> _ when
+ Host :: host(),
+ Port :: inet:port_number(),
+ Options :: client_options() .
+
shell(Host, Port, Options) ->
start_shell( connect(Host, Port, Options) ).
+
start_shell({ok, ConnectionRef}) ->
case ssh_connection:session_channel(ConnectionRef, infinity) of
{ok,ChannelId} ->
@@ -365,11 +426,17 @@ start_shell({ok, ConnectionRef}) ->
Args = [{channel_cb, ssh_shell},
{init_args,[ConnectionRef, ChannelId]},
{cm, ConnectionRef}, {channel_id, ChannelId}],
- {ok, State} = ssh_channel:init([Args]),
- ssh_channel:enter_loop(State);
+ {ok, State} = ssh_client_channel:init([Args]),
+ try
+ ssh_client_channel:enter_loop(State)
+ catch
+ exit:normal ->
+ ok
+ end;
Error ->
Error
end;
+
start_shell(Error) ->
Error.
@@ -380,7 +447,7 @@ default_algorithms() ->
ssh_transport:default_algorithms().
%%--------------------------------------------------------------------
--spec chk_algos_opts(list(any())) -> algs_list() .
+-spec chk_algos_opts(client_options()|daemon_options()) -> internal_options() | {error,term()}.
%%--------------------------------------------------------------------
chk_algos_opts(Opts) ->
case lists:foldl(
diff --git a/lib/ssh/src/ssh.hrl b/lib/ssh/src/ssh.hrl
index 0e118ac13f..a3d9a1b1cb 100644
--- a/lib/ssh/src/ssh.hrl
+++ b/lib/ssh/src/ssh.hrl
@@ -98,35 +98,267 @@
%% Types
--type role() :: client | server .
--type ok_error(SuccessType) :: {ok, SuccessType} | {error, any()} .
--type daemon_ref() :: pid() .
+-type role() :: client | server .
+
+-type host() :: string() | inet:ip_address() | loopback .
+-type open_socket() :: gen_tcp:socket().
+
+-type subsystem_spec() :: {Name::string(), mod_args()} .
+
+-type algs_list() :: list( alg_entry() ).
+-type alg_entry() :: {kex, [kex_alg()]}
+ | {public_key, [pubkey_alg()]}
+ | {cipher, double_algs(cipher_alg())}
+ | {mac, double_algs(mac_alg())}
+ | {compression, double_algs(compression_alg())} .
+
+-type kex_alg() :: 'diffie-hellman-group-exchange-sha1' |
+ 'diffie-hellman-group-exchange-sha256' |
+ 'diffie-hellman-group1-sha1' |
+ 'diffie-hellman-group14-sha1' |
+ 'diffie-hellman-group14-sha256' |
+ 'diffie-hellman-group16-sha512' |
+ 'diffie-hellman-group18-sha512' |
+ 'ecdh-sha2-nistp256' |
+ 'ecdh-sha2-nistp384' |
+ 'ecdh-sha2-nistp521'
+ .
+
+-type pubkey_alg() :: 'ecdsa-sha2-nistp256' |
+ 'ecdsa-sha2-nistp384' |
+ 'ecdsa-sha2-nistp521' |
+ 'rsa-sha2-256' |
+ 'rsa-sha2-512' |
+ 'ssh-dss' |
+ 'ssh-rsa'
+ .
+
+-type cipher_alg() :: '3des-cbc' |
+ 'AEAD_AES_128_GCM' |
+ 'AEAD_AES_256_GCM' |
+ 'aes128-cbc' |
+ 'aes128-ctr' |
+ 'aes192-ctr' |
+ 'aes256-ctr' |
+ .
+
+-type mac_alg() :: 'AEAD_AES_128_GCM' |
+ 'AEAD_AES_256_GCM' |
+ 'hmac-sha1' |
+ 'hmac-sha2-256' |
+ 'hmac-sha2-512'
+ .
+
+-type compression_alg() :: 'none' |
+ 'zlib' |
+ .
+
+-type double_algs(AlgType) :: list( {client2server,[AlgType]} | {server2client,[AlgType]} )
+ | [AlgType].
+
+-type modify_algs_list() :: list( {append,algs_list()} | {prepend,algs_list()} | {rm,algs_list()} ) .
+
+-type internal_options() :: ssh_options:private_options().
+-type socket_options() :: [gen_tcp:connect_option() | gen_tcp:listen_option()].
+
+-type client_options() :: [ client_option() ] .
+-type daemon_options() :: [ daemon_option() ].
+
+
+-type common_options() :: [ common_option() ].
+-type common_option() ::
+ user_dir_common_option()
+ | profile_common_option()
+ | max_idle_time_common_option()
+ | key_cb_common_option()
+ | disconnectfun_common_option()
+ | unexpectedfun_common_option()
+ | ssh_msg_debug_fun_common_option()
+ | rekey_limit_common_option()
+ | id_string_common_option()
+ | preferred_algorithms_common_option()
+ | modify_algorithms_common_option()
+ | auth_methods_common_option()
+ | inet_common_option()
+ | fd_common_option()
+ .
+
+-define(COMMON_OPTION, common_option()).
+
+
+-type user_dir_common_option() :: {user_dir, false | string()}.
+-type profile_common_option() :: {profile, atom() }.
+-type max_idle_time_common_option() :: {idle_time, timeout()}.
+-type rekey_limit_common_option() :: {rekey_limit, non_neg_integer() }.
+
+-type key_cb_common_option() :: {key_cb, Module::atom() | {Module::atom(),Opts::[term()]} } .
+-type disconnectfun_common_option() ::
+ {disconnectfun, fun((Reason::term()) -> void | any()) }.
+-type unexpectedfun_common_option() ::
+ {unexpectedfun, fun((Message::term(),{Host::term(),Port::term()}) -> report | skip ) }.
+-type ssh_msg_debug_fun_common_option() ::
+ {ssh_msg_debug_fun, fun((ssh:connection_ref(),AlwaysDisplay::boolean(),Msg::binary(),LanguageTag::binary()) -> any()) } .
+
+-type id_string_common_option() :: {id_string, string() | random | {random,Nmin::pos_integer(),Nmax::pos_integer()} }.
+-type preferred_algorithms_common_option():: {preferred_algorithms, algs_list()}.
+-type modify_algorithms_common_option() :: {modify_algorithms, modify_algs_list()}.
+-type auth_methods_common_option() :: {auth_methods, string() }.
+
+-type inet_common_option() :: {inet, inet | inet6} .
+-type fd_common_option() :: {fd, gen_tcp:socket()} .
+
+
+-type opaque_common_options() ::
+ {transport, {atom(),atom(),atom()} }
+ | {vsn, {non_neg_integer(),non_neg_integer()} }
+ | {tstflg, list(term())}
+ | {user_dir_fun, fun()}
+ | {max_random_length_padding, non_neg_integer()} .
+
+
+
+-type client_option() ::
+ pref_public_key_algs_client_option()
+ | pubkey_passphrase_client_options()
+ | host_accepting_client_options()
+ | authentication_client_options()
+ | diffie_hellman_group_exchange_client_option()
+ | connect_timeout_client_option()
+ | recv_ext_info_client_option()
+ | opaque_client_options()
+ | gen_tcp:connect_option()
+ | ?COMMON_OPTION .
+
+-type opaque_client_options() ::
+ {keyboard_interact_fun, fun((term(),term(),term()) -> term())}
+ | opaque_common_options().
+
+-type pref_public_key_algs_client_option() :: {pref_public_key_algs, [pubkey_alg()] } .
+
+-type pubkey_passphrase_client_options() :: {dsa_pass_phrase, string()}
+ | {rsa_pass_phrase, string()}
+ | {ecdsa_pass_phrase, string()} .
+
+-type host_accepting_client_options() ::
+ {silently_accept_hosts, accept_hosts()}
+ | {user_interaction, boolean()}
+ | {save_accepted_host, boolean()}
+ | {quiet_mode, boolean()} .
+
+-type accept_hosts() :: boolean()
+ | accept_callback()
+ | {HashAlgoSpec::fp_digest_alg(), accept_callback()}.
+
+-type fp_digest_alg() :: 'md5' |
+ 'sha' |
+ 'sha224' |
+ 'sha256' |
+ 'sha384' |
+ 'sha512'
+ .
+
+-type accept_callback() :: fun((PeerName::string(), fingerprint() ) -> boolean()) .
+-type fingerprint() :: string() | [string()].
+
+-type authentication_client_options() ::
+ {user, string()}
+ | {password, string()} .
+
+-type diffie_hellman_group_exchange_client_option() ::
+ {dh_gex_limits, {Min::pos_integer(), I::pos_integer(), Max::pos_integer()} } .
+
+-type connect_timeout_client_option() :: {connect_timeout, timeout()} .
+
+-type recv_ext_info_client_option() :: {recv_ext_info, boolean()} .
+
+
+
+-type daemon_option() ::
+ subsystem_daemon_option()
+ | shell_daemon_option()
+ | exec_daemon_option()
+ | ssh_cli_daemon_option()
+ | authentication_daemon_options()
+ | diffie_hellman_group_exchange_daemon_option()
+ | negotiation_timeout_daemon_option()
+ | hardening_daemon_options()
+ | callbacks_daemon_options()
+ | send_ext_info_daemon_option()
+ | opaque_daemon_options()
+ | gen_tcp:listen_option()
+ | ?COMMON_OPTION .
--type subsystem_spec() :: {subsystem_name(), {channel_callback(), channel_init_args()}} .
--type subsystem_name() :: string() .
--type channel_callback() :: atom() .
--type channel_init_args() :: list() .
+-type subsystem_daemon_option() :: {subsystems, subsystem_spec()}.
--type algs_list() :: list( alg_entry() ).
--type alg_entry() :: {kex, simple_algs()}
- | {public_key, simple_algs()}
- | {cipher, double_algs()}
- | {mac, double_algs()}
- | {compression, double_algs()} .
--type simple_algs() :: list( atom() ) .
--type double_algs() :: list( {client2server,simple_algs()} | {server2client,simple_algs()} )
- | simple_algs() .
+-type shell_daemon_option() :: {shell, mod_fun_args() | 'shell_fun/1'() | 'shell_fun/2'() }.
+-type 'shell_fun/1'() :: fun((User::string()) -> pid()) .
+-type 'shell_fun/2'() :: fun((User::string(), PeerAddr::inet:ip_address()) -> pid()).
--type options() :: #{socket_options := socket_options(),
- internal_options := internal_options(),
- option_key() => any()
- }.
+-type exec_daemon_option() :: {exec, 'exec_fun/1'() | 'exec_fun/2'() | 'exec_fun/3'() }.
--type socket_options() :: proplists:proplist().
--type internal_options() :: #{option_key() => any()}.
+-type 'exec_fun/1'() :: fun((Cmd::string()) -> exec_result()) .
+-type 'exec_fun/2'() :: fun((Cmd::string(), User::string()) -> exec_result()) .
+-type 'exec_fun/3'() :: fun((Cmd::string(), User::string(), ClientAddr::ip_port()) -> exec_result()) .
+-type exec_result() :: {ok,Result::term()} | {error,Reason::term()} .
--type option_key() :: atom().
+-type ssh_cli_daemon_option() :: {ssh_cli, mod_args() | no_cli }.
+-type send_ext_info_daemon_option() :: {send_ext_info, boolean()} .
+
+-type authentication_daemon_options() ::
+ {system_dir, string()}
+ | {auth_method_kb_interactive_data, prompt_texts() }
+ | {user_passwords, [{UserName::string(),Pwd::string()}]}
+ | {password, string()}
+ | {pwdfun, pwdfun_2() | pwdfun_4()} .
+
+-type prompt_texts() ::
+ kb_int_tuple()
+ | kb_int_fun_3()
+ .
+
+-type kb_int_fun_3() :: fun((Peer::ip_port(), User::string(), Service::string()) -> kb_int_tuple()).
+-type kb_int_tuple() :: {Name::string(), Instruction::string(), Prompt::string(), Echo::boolean()}.
+
+-type pwdfun_2() :: fun((User::string(), Password::string()) -> boolean()) .
+-type pwdfun_4() :: fun((User::string(),
+ Password::string(),
+ PeerAddress::ip_port(),
+ State::any()) ->
+ boolean() | disconnect | {boolean(),NewState::any()}
+ ) .
+
+-type diffie_hellman_group_exchange_daemon_option() ::
+ {dh_gex_groups, [explicit_group()] | explicit_group_file() | ssh_moduli_file()}
+ | {dh_gex_limits, {Min::pos_integer(), Max::pos_integer()} } .
+
+-type explicit_group() :: {Size::pos_integer(),G::pos_integer(),P::pos_integer()} .
+-type explicit_group_file() :: {file,string()} .
+-type ssh_moduli_file() :: {ssh_moduli_file,string()}.
+
+-type negotiation_timeout_daemon_option() :: {negotiation_timeout, timeout()} .
+
+-type hardening_daemon_options() ::
+ {max_sessions, pos_integer()}
+ | {max_channels, pos_integer()}
+ | {parallel_login, boolean()}
+ | {minimal_remote_max_packet_size, pos_integer()}.
+
+-type callbacks_daemon_options() ::
+ {failfun, fun((User::string(), PeerAddress::inet:ip_address(), Reason::term()) -> _)}
+ | {connectfun, fun((User::string(), PeerAddress::inet:ip_address(), Method::string()) ->_)} .
+
+-type opaque_daemon_options() ::
+ {infofun, fun()}
+ | opaque_common_options().
+
+-type ip_port() :: {inet:ip_address(), inet:port_number()} .
+
+-type mod_args() :: {Module::atom(), Args::list()} .
+-type mod_fun_args() :: {Module::atom(), Function::atom(), Args::list()} .
%% Records
@@ -134,8 +366,7 @@
{
role :: client | role(),
peer :: undefined |
- {inet:hostname(),
- {inet:ip_address(),inet:port_number()}}, %% string version of peer address
+ {inet:hostname(),ip_port()}, %% string version of peer address
local, %% Local sockname. Need this AFTER a socket is closed by i.e. a crash
diff --git a/lib/ssh/src/ssh_acceptor_sup.erl b/lib/ssh/src/ssh_acceptor_sup.erl
index fc564a359b..10fd4452bf 100644
--- a/lib/ssh/src/ssh_acceptor_sup.erl
+++ b/lib/ssh/src/ssh_acceptor_sup.erl
@@ -36,8 +36,6 @@
-define(DEFAULT_TIMEOUT, 50000).
--spec init( [term()] ) -> {ok,{supervisor:sup_flags(),[supervisor:child_spec()]}} | ignore .
-
%%%=========================================================================
%%% API
%%%=========================================================================
diff --git a/lib/ssh/src/ssh_channel.erl b/lib/ssh/src/ssh_channel.erl
index b90e571448..81c495a815 100644
--- a/lib/ssh/src/ssh_channel.erl
+++ b/lib/ssh/src/ssh_channel.erl
@@ -50,407 +50,43 @@
{ok, NewState :: term()} | {error, Reason :: term()}.
-callback handle_msg(Msg ::term(), State :: term()) ->
- {ok, State::term()} | {stop, ChannelId::integer(), State::term()}.
+ {ok, State::term()} | {stop, ChannelId::ssh:channel_id(), State::term()}.
--callback handle_ssh_msg({ssh_cm, ConnectionRef::term(), SshMsg::term()},
+-callback handle_ssh_msg({ssh_cm, ConnectionRef::ssh:connection_ref(), SshMsg::term()},
State::term()) -> {ok, State::term()} |
- {stop, ChannelId::integer(),
+ {stop, ChannelId::ssh:channel_id(),
State::term()}.
--behaviour(gen_server).
-
%%% API
-export([start/4, start/5, start_link/4, start_link/5, call/2, call/3,
cast/2, reply/2, enter_loop/1]).
-%% gen_server callbacks
--export([init/1, handle_call/3, handle_cast/2, handle_info/2,
- terminate/2, code_change/3]).
-
-%% Internal application API
--export([cache_create/0, cache_lookup/2, cache_update/2,
- cache_delete/1, cache_delete/2, cache_foldl/3,
- cache_info/2, cache_find/2,
- get_print_info/1]).
-
--export([dbg_trace/3]).
-
--record(state, {
- cm,
- channel_cb,
- channel_state,
- channel_id,
- close_sent = false
- }).
-
%%====================================================================
%% API
%%====================================================================
call(ChannelPid, Msg) ->
- call(ChannelPid, Msg, infinity).
+ ssh_client_channel:call(ChannelPid, Msg).
call(ChannelPid, Msg, TimeOute) ->
- try gen_server:call(ChannelPid, Msg, TimeOute) of
- Result ->
- Result
- catch
- exit:{noproc, _} ->
- {error, closed};
- exit:{normal, _} ->
- {error, closed};
- exit:{shutdown, _} ->
- {error, closed};
- exit:{{shutdown, _}, _} ->
- {error, closed};
- exit:{timeout, _} ->
- {error, timeout}
- end.
+ ssh_client_channel:call(ChannelPid, Msg, TimeOute).
cast(ChannelPid, Msg) ->
- gen_server:cast(ChannelPid, Msg).
-
+ ssh_client_channel:cast(ChannelPid, Msg).
reply(From, Msg) ->
- gen_server:reply(From, Msg).
+ ssh_client_channel:reply(From, Msg).
-%%====================================================================
-%% Internal application API
-%%====================================================================
-
-%%--------------------------------------------------------------------
-%% Function: start_link() -> {ok,Pid} | ignore | {error,Error}
-%% Description: Starts the server
-%%--------------------------------------------------------------------
start(ConnectionManager, ChannelId, CallBack, CbInitArgs) ->
- start(ConnectionManager, ChannelId, CallBack, CbInitArgs, undefined).
+ ssh_client_channel:start(ConnectionManager, ChannelId, CallBack, CbInitArgs).
start(ConnectionManager, ChannelId, CallBack, CbInitArgs, Exec) ->
- Options = [{channel_cb, CallBack},
- {channel_id, ChannelId},
- {init_args, CbInitArgs},
- {cm, ConnectionManager},
- {exec, Exec}],
- gen_server:start(?MODULE, [Options], []).
+ ssh_client_channel:start(ConnectionManager, ChannelId, CallBack, CbInitArgs, Exec).
start_link(ConnectionManager, ChannelId, CallBack, CbInitArgs) ->
- start_link(ConnectionManager, ChannelId, CallBack, CbInitArgs, undefined).
+ ssh_client_channel:start_link(ConnectionManager, ChannelId, CallBack, CbInitArgs).
start_link(ConnectionManager, ChannelId, CallBack, CbInitArgs, Exec) ->
- Options = [{channel_cb, CallBack},
- {channel_id, ChannelId},
- {init_args, CbInitArgs},
- {cm, ConnectionManager},
- {exec, Exec}],
- gen_server:start_link(?MODULE, [Options], []).
+ ssh_client_channel:start_link(ConnectionManager, ChannelId, CallBack, CbInitArgs, Exec).
enter_loop(State) ->
- gen_server:enter_loop(?MODULE, [], State).
-
-%%====================================================================
-%% gen_server callbacks
-%%====================================================================
-
-%%--------------------------------------------------------------------
-%% Function: init(Args) -> {ok, State} |
-%% {ok, State, Timeout} |
-%% ignore |
-%% {stop, Reason}
-%% Description: Initiates the server
-%%--------------------------------------------------------------------
-init([Options]) ->
- Cb = proplists:get_value(channel_cb, Options),
- ConnectionManager = proplists:get_value(cm, Options),
- ChannelId = proplists:get_value(channel_id, Options),
- process_flag(trap_exit, true),
- try Cb:init(channel_cb_init_args(Options)) of
- {ok, ChannelState} ->
- State = #state{cm = ConnectionManager,
- channel_cb = Cb,
- channel_id = ChannelId,
- channel_state = ChannelState},
- self() ! {ssh_channel_up, ChannelId, ConnectionManager},
- {ok, State};
- {ok, ChannelState, Timeout} ->
- State = #state{cm = ConnectionManager,
- channel_cb = Cb,
- channel_id = ChannelId,
- channel_state = ChannelState},
- self() ! {ssh_channel_up, ChannelId, ConnectionManager},
- {ok, State, Timeout};
- {stop, Why} ->
- {stop, Why}
- catch
- _:Reason ->
- {stop, Reason}
- end.
-
-channel_cb_init_args(Options) ->
- case proplists:get_value(exec, Options) of
- undefined ->
- proplists:get_value(init_args, Options);
- Exec ->
- proplists:get_value(init_args, Options) ++ [Exec]
- end.
-
-%%--------------------------------------------------------------------
-%% Function: %% handle_call(Request, From, State) -> {reply, Reply, State} |
-%% {reply, Reply, State, Timeout} |
-%% {noreply, State} |
-%% {noreply, State, Timeout} |
-%% {stop, Reason, Reply, State} |
-%% {stop, Reason, State}
-%% Description: Handling call messages
-%%--------------------------------------------------------------------
-handle_call(get_print_info, _From, State) ->
- Reply =
- {{State#state.cm,
- State#state.channel_id},
- io_lib:format('CB=~p',[State#state.channel_cb])
- },
- {reply, Reply, State};
-
-handle_call(Request, From, #state{channel_cb = Module,
- channel_state = ChannelState} = State) ->
- try Module:handle_call(Request, From, ChannelState) of
- Result ->
- handle_cb_result(Result, State)
- catch
- error:{undef, _} ->
- {noreply, State}
- end.
-
-
-%%--------------------------------------------------------------------
-%% Function: handle_cast(Msg, State) -> {noreply, State} |
-%% {noreply, State, Timeout} |
-%% {stop, Reason, State}
-%% Description: Handling cast messages
-%%--------------------------------------------------------------------
-handle_cast(Msg, #state{channel_cb = Module,
- channel_state = ChannelState} = State) ->
-
- try Module:handle_cast(Msg, ChannelState) of
- Result ->
- handle_cb_result(Result, State)
- catch
- error:{undef, _} ->
- {noreply, State}
- end.
-
-%%--------------------------------------------------------------------
-%% Function: handle_info(Info, State) -> {noreply, State} |
-%% {noreply, State, Timeout} |
-%% {stop, Reason, State}
-%% Description: Handling all non call/cast messages
-%%--------------------------------------------------------------------
-handle_info({ssh_cm, ConnectionManager, {closed, _ChannelId}},
- #state{cm = ConnectionManager,
- close_sent = true} = State) ->
- {stop, normal, State};
-handle_info({ssh_cm, ConnectionManager, {closed, ChannelId}},
- #state{cm = ConnectionManager,
- close_sent = false} = State) ->
- %% To be on the safe side, i.e. the manager has already been terminated.
- (catch ssh_connection:close(ConnectionManager, ChannelId)),
- {stop, normal, State#state{close_sent = true}};
-
-handle_info({ssh_cm, _, _} = Msg, #state{cm = ConnectionManager,
- channel_cb = Module,
- channel_state = ChannelState0} = State) ->
- case Module:handle_ssh_msg(Msg, ChannelState0) of
- {ok, ChannelState} ->
- adjust_window(Msg),
- {noreply, State#state{channel_state = ChannelState}};
- {ok, ChannelState, Timeout} ->
- adjust_window(Msg),
- {noreply, State#state{channel_state = ChannelState}, Timeout};
- {stop, ChannelId, ChannelState} ->
- catch ssh_connection:close(ConnectionManager, ChannelId),
- {stop, normal, State#state{close_sent = true,
- channel_state = ChannelState}}
- end;
-
-handle_info(Msg, #state{cm = ConnectionManager, channel_cb = Module,
- channel_state = ChannelState0} = State) ->
- case Module:handle_msg(Msg, ChannelState0) of
- {ok, ChannelState} ->
- {noreply, State#state{channel_state = ChannelState}};
- {ok, ChannelState, Timeout} ->
- {noreply, State#state{channel_state = ChannelState}, Timeout};
- {stop, Reason, ChannelState} when is_atom(Reason)->
- {stop, Reason, State#state{close_sent = true,
- channel_state = ChannelState}};
- {stop, ChannelId, ChannelState} ->
- Reason =
- case Msg of
- {'EXIT', _Pid, shutdown} ->
- shutdown;
- _ ->
- normal
- end,
- (catch ssh_connection:close(ConnectionManager, ChannelId)),
- {stop, Reason, State#state{close_sent = true,
- channel_state = ChannelState}}
- end.
-
-%%--------------------------------------------------------------------
-%% Function: terminate(Reason, State) -> void()
-%% Description: This function is called by a gen_server when it is about to
-%% terminate. It should be the opposite of Module:init/1 and do any necessary
-%% cleaning up. When it returns, the gen_server terminates with Reason.
-%% The return value is ignored.
-%%--------------------------------------------------------------------
-terminate(Reason, #state{cm = ConnectionManager,
- channel_id = ChannelId,
- close_sent = false} = State) ->
- catch ssh_connection:close(ConnectionManager, ChannelId),
- terminate(Reason, State#state{close_sent = true});
-terminate(_, #state{channel_cb = Cb, channel_state = ChannelState}) ->
- catch Cb:terminate(Cb, ChannelState),
- ok.
-
-%%--------------------------------------------------------------------
-%% Func: code_change(OldVsn, State, Extra) -> {ok, NewState}
-%% Description: Convert process state when code is changed
-%%--------------------------------------------------------------------
-code_change(OldVsn, #state{channel_cb = Module,
- channel_state = ChannelState0} = State, Extra) ->
- {ok, ChannelState} = Module:code_change(OldVsn, ChannelState0, Extra),
- {ok, State#state{channel_state = ChannelState}}.
-
-%%====================================================================
-%% Internal application API
-%%====================================================================
-cache_create() ->
- ets:new(cm_tab, [set,{keypos, #channel.local_id}]).
-
-cache_lookup(Cache, Key) ->
- case ets:lookup(Cache, Key) of
- [Channel] ->
- Channel;
- [] ->
- undefined
- end.
-
-cache_update(Cache, #channel{local_id = Id} = Entry) when Id =/= undefined ->
- ets:insert(Cache, Entry).
-
-cache_delete(Cache, Key) ->
- ets:delete(Cache, Key).
-
-cache_delete(Cache) ->
- ets:delete(Cache).
-
-cache_foldl(Fun, Acc, Cache) ->
- ets:foldl(Fun, Acc, Cache).
-
-cache_info(num_entries, Cache) ->
- proplists:get_value(size, ets:info(Cache)).
-
-cache_find(ChannelPid, Cache) ->
- case ets:match_object(Cache, #channel{user = ChannelPid}) of
- [] ->
- undefined;
- [Channel] ->
- Channel
- end.
-
-get_print_info(Pid) ->
- call(Pid, get_print_info, 1000).
-
-%%--------------------------------------------------------------------
-%%% Internal functions
-%%--------------------------------------------------------------------
-handle_cb_result({reply, Reply, ChannelState}, State) ->
- {reply, Reply, State#state{channel_state = ChannelState}};
-handle_cb_result({reply, Reply, ChannelState, Timeout}, State) ->
- {reply, Reply,State#state{channel_state = ChannelState}, Timeout};
-handle_cb_result({noreply, ChannelState}, State) ->
- {noreply, State#state{channel_state = ChannelState}};
-handle_cb_result({noreply, ChannelState, Timeout}, State) ->
- {noreply, State#state{channel_state = ChannelState}, Timeout};
-handle_cb_result({stop, Reason, Reply, ChannelState}, State) ->
- {stop, Reason, Reply, State#state{channel_state = ChannelState}};
-handle_cb_result({stop, Reason, ChannelState}, State) ->
- {stop, Reason, State#state{channel_state = ChannelState}}.
-
-adjust_window({ssh_cm, ConnectionManager,
- {data, ChannelId, _, Data}}) ->
- ssh_connection:adjust_window(ConnectionManager, ChannelId, size(Data));
-adjust_window(_) ->
- ok.
-
-
-%%%################################################################
-%%%#
-%%%# Tracing
-%%%#
-
-dbg_trace(points, _, _) -> [terminate, channels, channel_events];
-
-
-dbg_trace(flags, channels, A) -> [c] ++ dbg_trace(flags, terminate, A);
-dbg_trace(on, channels, A) -> dbg:tp(?MODULE, init, 1, x),
- dbg_trace(on, terminate, A);
-dbg_trace(off, channels, A) -> dbg:ctpg(?MODULE, init, 1),
- dbg_trace(off, terminate, A);
-dbg_trace(format, channels, {call, {?MODULE,init, [[KVs]]}}) ->
- ["Server Channel Starting:\n",
- io_lib:format("Connection: ~p, ChannelId: ~p, CallBack: ~p\nCallBack init args = ~p",
- [proplists:get_value(K,KVs) || K <- [cm, channel_id, channel_cb]]
- ++ [channel_cb_init_args(KVs)])
- ];
-dbg_trace(format, channels, {return_from, {?MODULE,init,1}, {stop,Reason}}) ->
- ["Server Channel Start FAILED!\n",
- io_lib:format("Reason = ~p", [Reason])
- ];
-dbg_trace(format, channels, F) ->
- dbg_trace(format, terminate, F);
-
-
-dbg_trace(flags, terminate, _) -> [c];
-dbg_trace(on, terminate, _) -> dbg:tp(?MODULE, terminate, 2, x);
-dbg_trace(off, terminate, _) -> dbg:ctpg(?MODULE, terminate, 2);
-dbg_trace(format, terminate, {call, {?MODULE,terminate, [Reason, State]}}) ->
- ["Server Channel Terminating:\n",
- io_lib:format("Reason: ~p,~nState:~n~s", [Reason, wr_record(State)])
- ];
-
-dbg_trace(flags, channel_events, _) -> [c];
-dbg_trace(on, channel_events, _) -> dbg:tp(?MODULE, handle_call, 3, x),
- dbg:tp(?MODULE, handle_cast, 2, x),
- dbg:tp(?MODULE, handle_info, 2, x);
-dbg_trace(off, channel_events, _) -> dbg:ctpg(?MODULE, handle_call, 3),
- dbg:ctpg(?MODULE, handle_cast, 2),
- dbg:ctpg(?MODULE, handle_info, 2);
-dbg_trace(format, channel_events, {call, {?MODULE,handle_call, [Call,From,State]}}) ->
- [hdr("is called", State),
- io_lib:format("From: ~p~nCall: ~p~n", [From, Call])
- ];
-dbg_trace(format, channel_events, {return_from, {?MODULE,handle_call,3}, Ret}) ->
- ["Server Channel call returned:\n",
- io_lib:format("~p~n", [ssh_dbg:reduce_state(Ret)])
- ];
-dbg_trace(format, channel_events, {call, {?MODULE,handle_cast, [Cast,State]}}) ->
- [hdr("got cast", State),
- io_lib:format("Cast: ~p~n", [Cast])
- ];
-dbg_trace(format, channel_events, {return_from, {?MODULE,handle_cast,2}, Ret}) ->
- ["Server Channel cast returned:\n",
- io_lib:format("~p~n", [ssh_dbg:reduce_state(Ret)])
- ];
-dbg_trace(format, channel_events, {call, {?MODULE,handle_info, [Info,State]}}) ->
- [hdr("got info", State),
- io_lib:format("Info: ~p~n", [Info])
- ];
-dbg_trace(format, channel_events, {return_from, {?MODULE,handle_info,2}, Ret}) ->
- ["Server Channel info returned:\n",
- io_lib:format("~p~n", [ssh_dbg:reduce_state(Ret)])
- ].
-
-hdr(Title, S) ->
- io_lib:format("Server Channel (Id=~p, CB=~p) ~s:\n", [S#state.channel_id, S#state.channel_cb, Title]).
-
-?wr_record(state).
-
-
+ ssh_client_channel:enter_loop(State).
diff --git a/lib/ssh/src/ssh_cli.erl b/lib/ssh/src/ssh_cli.erl
index 26c7cb45aa..fcc1d3d59f 100644
--- a/lib/ssh/src/ssh_cli.erl
+++ b/lib/ssh/src/ssh_cli.erl
@@ -25,12 +25,12 @@
-module(ssh_cli).
--behaviour(ssh_daemon_channel).
+-behaviour(ssh_server_channel).
-include("ssh.hrl").
-include("ssh_connect.hrl").
-%% ssh_channel callbacks
+%% ssh_server_channel callbacks
-export([init/1, handle_ssh_msg/2, handle_msg/2, terminate/2]).
-export([dbg_trace/3]).
@@ -47,23 +47,8 @@
}).
%%====================================================================
-%% ssh_channel callbacks
+%% ssh_server_channel callbacks
%%====================================================================
--spec init(Args :: term()) ->
- {ok, State :: term()} | {ok, State :: term(), timeout() | hibernate} |
- {stop, Reason :: term()} | ignore.
-
--spec terminate(Reason :: (normal | shutdown | {shutdown, term()} |
- term()),
- State :: term()) ->
- term().
-
--spec handle_msg(Msg ::term(), State :: term()) ->
- {ok, State::term()} | {stop, ChannelId::integer(), State::term()}.
--spec handle_ssh_msg({ssh_cm, ConnectionRef::term(), SshMsg::term()},
- State::term()) -> {ok, State::term()} |
- {stop, ChannelId::integer(),
- State::term()}.
%%--------------------------------------------------------------------
%% Function: init(Args) -> {ok, State}
diff --git a/lib/ssh/src/ssh_client_channel.erl b/lib/ssh/src/ssh_client_channel.erl
new file mode 100644
index 0000000000..f20007baaf
--- /dev/null
+++ b/lib/ssh/src/ssh_client_channel.erl
@@ -0,0 +1,456 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2008-2016. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+
+-module(ssh_client_channel).
+
+-include("ssh.hrl").
+-include("ssh_connect.hrl").
+
+-callback init(Args :: term()) ->
+ {ok, State :: term()} | {ok, State :: term(), timeout() | hibernate} |
+ {stop, Reason :: term()} | ignore.
+-callback handle_call(Request :: term(), From :: {pid(), Tag :: term()},
+ State :: term()) ->
+ {reply, Reply :: term(), NewState :: term()} |
+ {reply, Reply :: term(), NewState :: term(), timeout() | hibernate} |
+ {noreply, NewState :: term()} |
+ {noreply, NewState :: term(), timeout() | hibernate} |
+ {stop, Reason :: term(), Reply :: term(), NewState :: term()} |
+ {stop, Reason :: term(), NewState :: term()}.
+-callback handle_cast(Request :: term(), State :: term()) ->
+ {noreply, NewState :: term()} |
+ {noreply, NewState :: term(), timeout() | hibernate} |
+ {stop, Reason :: term(), NewState :: term()}.
+
+-callback terminate(Reason :: (normal | shutdown | {shutdown, term()} |
+ term()),
+ State :: term()) ->
+ term().
+-callback code_change(OldVsn :: (term() | {down, term()}), State :: term(),
+ Extra :: term()) ->
+ {ok, NewState :: term()} | {error, Reason :: term()}.
+
+-callback handle_msg(Msg ::term(), State :: term()) ->
+ {ok, State::term()} | {stop, ChannelId::ssh:channel_id(), State::term()}.
+
+-callback handle_ssh_msg({ssh_cm, ConnectionRef::ssh:connection_ref(), SshMsg::term()},
+ State::term()) -> {ok, State::term()} |
+ {stop, ChannelId::ssh:channel_id(),
+ State::term()}.
+-behaviour(gen_server).
+
+%%% API
+-export([start/4, start/5, start_link/4, start_link/5, call/2, call/3,
+ cast/2, reply/2, enter_loop/1]).
+
+%% gen_server callbacks
+-export([init/1, handle_call/3, handle_cast/2, handle_info/2,
+ terminate/2, code_change/3]).
+
+%% Internal application API
+-export([cache_create/0, cache_lookup/2, cache_update/2,
+ cache_delete/1, cache_delete/2, cache_foldl/3,
+ cache_info/2, cache_find/2,
+ get_print_info/1]).
+
+-export([dbg_trace/3]).
+
+-record(state, {
+ cm,
+ channel_cb,
+ channel_state,
+ channel_id,
+ close_sent = false
+ }).
+
+%%====================================================================
+%% API
+%%====================================================================
+
+call(ChannelPid, Msg) ->
+ call(ChannelPid, Msg, infinity).
+
+call(ChannelPid, Msg, TimeOute) ->
+ try gen_server:call(ChannelPid, Msg, TimeOute) of
+ Result ->
+ Result
+ catch
+ exit:{noproc, _} ->
+ {error, closed};
+ exit:{normal, _} ->
+ {error, closed};
+ exit:{shutdown, _} ->
+ {error, closed};
+ exit:{{shutdown, _}, _} ->
+ {error, closed};
+ exit:{timeout, _} ->
+ {error, timeout}
+ end.
+
+cast(ChannelPid, Msg) ->
+ gen_server:cast(ChannelPid, Msg).
+
+
+reply(From, Msg) ->
+ gen_server:reply(From, Msg).
+
+%%====================================================================
+%% Internal application API
+%%====================================================================
+
+%%--------------------------------------------------------------------
+%% Function: start_link() -> {ok,Pid} | ignore | {error,Error}
+%% Description: Starts the server
+%%--------------------------------------------------------------------
+start(ConnectionManager, ChannelId, CallBack, CbInitArgs) ->
+ start(ConnectionManager, ChannelId, CallBack, CbInitArgs, undefined).
+
+start(ConnectionManager, ChannelId, CallBack, CbInitArgs, Exec) ->
+ Options = [{channel_cb, CallBack},
+ {channel_id, ChannelId},
+ {init_args, CbInitArgs},
+ {cm, ConnectionManager},
+ {exec, Exec}],
+ gen_server:start(?MODULE, [Options], []).
+
+start_link(ConnectionManager, ChannelId, CallBack, CbInitArgs) ->
+ start_link(ConnectionManager, ChannelId, CallBack, CbInitArgs, undefined).
+
+start_link(ConnectionManager, ChannelId, CallBack, CbInitArgs, Exec) ->
+ Options = [{channel_cb, CallBack},
+ {channel_id, ChannelId},
+ {init_args, CbInitArgs},
+ {cm, ConnectionManager},
+ {exec, Exec}],
+ gen_server:start_link(?MODULE, [Options], []).
+
+enter_loop(State) ->
+ gen_server:enter_loop(?MODULE, [], State).
+
+%%====================================================================
+%% gen_server callbacks
+%%====================================================================
+
+%%--------------------------------------------------------------------
+%% Function: init(Args) -> {ok, State} |
+%% {ok, State, Timeout} |
+%% ignore |
+%% {stop, Reason}
+%% Description: Initiates the server
+%%--------------------------------------------------------------------
+init([Options]) ->
+ Cb = proplists:get_value(channel_cb, Options),
+ ConnectionManager = proplists:get_value(cm, Options),
+ ChannelId = proplists:get_value(channel_id, Options),
+ process_flag(trap_exit, true),
+ try Cb:init(channel_cb_init_args(Options)) of
+ {ok, ChannelState} ->
+ State = #state{cm = ConnectionManager,
+ channel_cb = Cb,
+ channel_id = ChannelId,
+ channel_state = ChannelState},
+ self() ! {ssh_channel_up, ChannelId, ConnectionManager},
+ {ok, State};
+ {ok, ChannelState, Timeout} ->
+ State = #state{cm = ConnectionManager,
+ channel_cb = Cb,
+ channel_id = ChannelId,
+ channel_state = ChannelState},
+ self() ! {ssh_channel_up, ChannelId, ConnectionManager},
+ {ok, State, Timeout};
+ {stop, Why} ->
+ {stop, Why}
+ catch
+ _:Reason ->
+ {stop, Reason}
+ end.
+
+channel_cb_init_args(Options) ->
+ case proplists:get_value(exec, Options) of
+ undefined ->
+ proplists:get_value(init_args, Options);
+ Exec ->
+ proplists:get_value(init_args, Options) ++ [Exec]
+ end.
+
+%%--------------------------------------------------------------------
+%% Function: %% handle_call(Request, From, State) -> {reply, Reply, State} |
+%% {reply, Reply, State, Timeout} |
+%% {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, Reply, State} |
+%% {stop, Reason, State}
+%% Description: Handling call messages
+%%--------------------------------------------------------------------
+handle_call(get_print_info, _From, State) ->
+ Reply =
+ {{State#state.cm,
+ State#state.channel_id},
+ io_lib:format('CB=~p',[State#state.channel_cb])
+ },
+ {reply, Reply, State};
+
+handle_call(Request, From, #state{channel_cb = Module,
+ channel_state = ChannelState} = State) ->
+ try Module:handle_call(Request, From, ChannelState) of
+ Result ->
+ handle_cb_result(Result, State)
+ catch
+ error:{undef, _} ->
+ {noreply, State}
+ end.
+
+
+%%--------------------------------------------------------------------
+%% Function: handle_cast(Msg, State) -> {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State}
+%% Description: Handling cast messages
+%%--------------------------------------------------------------------
+handle_cast(Msg, #state{channel_cb = Module,
+ channel_state = ChannelState} = State) ->
+
+ try Module:handle_cast(Msg, ChannelState) of
+ Result ->
+ handle_cb_result(Result, State)
+ catch
+ error:{undef, _} ->
+ {noreply, State}
+ end.
+
+%%--------------------------------------------------------------------
+%% Function: handle_info(Info, State) -> {noreply, State} |
+%% {noreply, State, Timeout} |
+%% {stop, Reason, State}
+%% Description: Handling all non call/cast messages
+%%--------------------------------------------------------------------
+handle_info({ssh_cm, ConnectionManager, {closed, _ChannelId}},
+ #state{cm = ConnectionManager,
+ close_sent = true} = State) ->
+ {stop, normal, State};
+handle_info({ssh_cm, ConnectionManager, {closed, ChannelId}},
+ #state{cm = ConnectionManager,
+ close_sent = false} = State) ->
+ %% To be on the safe side, i.e. the manager has already been terminated.
+ (catch ssh_connection:close(ConnectionManager, ChannelId)),
+ {stop, normal, State#state{close_sent = true}};
+
+handle_info({ssh_cm, _, _} = Msg, #state{cm = ConnectionManager,
+ channel_cb = Module,
+ channel_state = ChannelState0} = State) ->
+ case Module:handle_ssh_msg(Msg, ChannelState0) of
+ {ok, ChannelState} ->
+ adjust_window(Msg),
+ {noreply, State#state{channel_state = ChannelState}};
+ {ok, ChannelState, Timeout} ->
+ adjust_window(Msg),
+ {noreply, State#state{channel_state = ChannelState}, Timeout};
+ {stop, ChannelId, ChannelState} ->
+ catch ssh_connection:close(ConnectionManager, ChannelId),
+ {stop, normal, State#state{close_sent = true,
+ channel_state = ChannelState}}
+ end;
+
+handle_info(Msg, #state{cm = ConnectionManager, channel_cb = Module,
+ channel_state = ChannelState0} = State) ->
+ case Module:handle_msg(Msg, ChannelState0) of
+ {ok, ChannelState} ->
+ {noreply, State#state{channel_state = ChannelState}};
+ {ok, ChannelState, Timeout} ->
+ {noreply, State#state{channel_state = ChannelState}, Timeout};
+ {stop, Reason, ChannelState} when is_atom(Reason)->
+ {stop, Reason, State#state{close_sent = true,
+ channel_state = ChannelState}};
+ {stop, ChannelId, ChannelState} ->
+ Reason =
+ case Msg of
+ {'EXIT', _Pid, shutdown} ->
+ shutdown;
+ _ ->
+ normal
+ end,
+ (catch ssh_connection:close(ConnectionManager, ChannelId)),
+ {stop, Reason, State#state{close_sent = true,
+ channel_state = ChannelState}}
+ end.
+
+%%--------------------------------------------------------------------
+%% Function: terminate(Reason, State) -> void()
+%% Description: This function is called by a gen_server when it is about to
+%% terminate. It should be the opposite of Module:init/1 and do any necessary
+%% cleaning up. When it returns, the gen_server terminates with Reason.
+%% The return value is ignored.
+%%--------------------------------------------------------------------
+terminate(Reason, #state{cm = ConnectionManager,
+ channel_id = ChannelId,
+ close_sent = false} = State) ->
+ catch ssh_connection:close(ConnectionManager, ChannelId),
+ terminate(Reason, State#state{close_sent = true});
+terminate(_, #state{channel_cb = Cb, channel_state = ChannelState}) ->
+ catch Cb:terminate(Cb, ChannelState),
+ ok.
+
+%%--------------------------------------------------------------------
+%% Func: code_change(OldVsn, State, Extra) -> {ok, NewState}
+%% Description: Convert process state when code is changed
+%%--------------------------------------------------------------------
+code_change(OldVsn, #state{channel_cb = Module,
+ channel_state = ChannelState0} = State, Extra) ->
+ {ok, ChannelState} = Module:code_change(OldVsn, ChannelState0, Extra),
+ {ok, State#state{channel_state = ChannelState}}.
+
+%%====================================================================
+%% Internal application API
+%%====================================================================
+cache_create() ->
+ ets:new(cm_tab, [set,{keypos, #channel.local_id}]).
+
+cache_lookup(Cache, Key) ->
+ case ets:lookup(Cache, Key) of
+ [Channel] ->
+ Channel;
+ [] ->
+ undefined
+ end.
+
+cache_update(Cache, #channel{local_id = Id} = Entry) when Id =/= undefined ->
+ ets:insert(Cache, Entry).
+
+cache_delete(Cache, Key) ->
+ ets:delete(Cache, Key).
+
+cache_delete(Cache) ->
+ ets:delete(Cache).
+
+cache_foldl(Fun, Acc, Cache) ->
+ ets:foldl(Fun, Acc, Cache).
+
+cache_info(num_entries, Cache) ->
+ proplists:get_value(size, ets:info(Cache)).
+
+cache_find(ChannelPid, Cache) ->
+ case ets:match_object(Cache, #channel{user = ChannelPid}) of
+ [] ->
+ undefined;
+ [Channel] ->
+ Channel
+ end.
+
+get_print_info(Pid) ->
+ call(Pid, get_print_info, 1000).
+
+%%--------------------------------------------------------------------
+%%% Internal functions
+%%--------------------------------------------------------------------
+handle_cb_result({reply, Reply, ChannelState}, State) ->
+ {reply, Reply, State#state{channel_state = ChannelState}};
+handle_cb_result({reply, Reply, ChannelState, Timeout}, State) ->
+ {reply, Reply,State#state{channel_state = ChannelState}, Timeout};
+handle_cb_result({noreply, ChannelState}, State) ->
+ {noreply, State#state{channel_state = ChannelState}};
+handle_cb_result({noreply, ChannelState, Timeout}, State) ->
+ {noreply, State#state{channel_state = ChannelState}, Timeout};
+handle_cb_result({stop, Reason, Reply, ChannelState}, State) ->
+ {stop, Reason, Reply, State#state{channel_state = ChannelState}};
+handle_cb_result({stop, Reason, ChannelState}, State) ->
+ {stop, Reason, State#state{channel_state = ChannelState}}.
+
+adjust_window({ssh_cm, ConnectionManager,
+ {data, ChannelId, _, Data}}) ->
+ ssh_connection:adjust_window(ConnectionManager, ChannelId, size(Data));
+adjust_window(_) ->
+ ok.
+
+
+%%%################################################################
+%%%#
+%%%# Tracing
+%%%#
+
+dbg_trace(points, _, _) -> [terminate, channels, channel_events];
+
+
+dbg_trace(flags, channels, A) -> [c] ++ dbg_trace(flags, terminate, A);
+dbg_trace(on, channels, A) -> dbg:tp(?MODULE, init, 1, x),
+ dbg_trace(on, terminate, A);
+dbg_trace(off, channels, A) -> dbg:ctpg(?MODULE, init, 1),
+ dbg_trace(off, terminate, A);
+dbg_trace(format, channels, {call, {?MODULE,init, [[KVs]]}}) ->
+ ["Server Channel Starting:\n",
+ io_lib:format("Connection: ~p, ChannelId: ~p, CallBack: ~p\nCallBack init args = ~p",
+ [proplists:get_value(K,KVs) || K <- [cm, channel_id, channel_cb]]
+ ++ [channel_cb_init_args(KVs)])
+ ];
+dbg_trace(format, channels, {return_from, {?MODULE,init,1}, {stop,Reason}}) ->
+ ["Server Channel Start FAILED!\n",
+ io_lib:format("Reason = ~p", [Reason])
+ ];
+dbg_trace(format, channels, F) ->
+ dbg_trace(format, terminate, F);
+
+
+dbg_trace(flags, terminate, _) -> [c];
+dbg_trace(on, terminate, _) -> dbg:tp(?MODULE, terminate, 2, x);
+dbg_trace(off, terminate, _) -> dbg:ctpg(?MODULE, terminate, 2);
+dbg_trace(format, terminate, {call, {?MODULE,terminate, [Reason, State]}}) ->
+ ["Server Channel Terminating:\n",
+ io_lib:format("Reason: ~p,~nState:~n~s", [Reason, wr_record(State)])
+ ];
+
+dbg_trace(flags, channel_events, _) -> [c];
+dbg_trace(on, channel_events, _) -> dbg:tp(?MODULE, handle_call, 3, x),
+ dbg:tp(?MODULE, handle_cast, 2, x),
+ dbg:tp(?MODULE, handle_info, 2, x);
+dbg_trace(off, channel_events, _) -> dbg:ctpg(?MODULE, handle_call, 3),
+ dbg:ctpg(?MODULE, handle_cast, 2),
+ dbg:ctpg(?MODULE, handle_info, 2);
+dbg_trace(format, channel_events, {call, {?MODULE,handle_call, [Call,From,State]}}) ->
+ [hdr("is called", State),
+ io_lib:format("From: ~p~nCall: ~p~n", [From, Call])
+ ];
+dbg_trace(format, channel_events, {return_from, {?MODULE,handle_call,3}, Ret}) ->
+ ["Server Channel call returned:\n",
+ io_lib:format("~p~n", [ssh_dbg:reduce_state(Ret)])
+ ];
+dbg_trace(format, channel_events, {call, {?MODULE,handle_cast, [Cast,State]}}) ->
+ [hdr("got cast", State),
+ io_lib:format("Cast: ~p~n", [Cast])
+ ];
+dbg_trace(format, channel_events, {return_from, {?MODULE,handle_cast,2}, Ret}) ->
+ ["Server Channel cast returned:\n",
+ io_lib:format("~p~n", [ssh_dbg:reduce_state(Ret)])
+ ];
+dbg_trace(format, channel_events, {call, {?MODULE,handle_info, [Info,State]}}) ->
+ [hdr("got info", State),
+ io_lib:format("Info: ~p~n", [Info])
+ ];
+dbg_trace(format, channel_events, {return_from, {?MODULE,handle_info,2}, Ret}) ->
+ ["Server Channel info returned:\n",
+ io_lib:format("~p~n", [ssh_dbg:reduce_state(Ret)])
+ ].
+
+hdr(Title, S) ->
+ io_lib:format("Server Channel (Id=~p, CB=~p) ~s:\n", [S#state.channel_id, S#state.channel_cb, Title]).
+
+?wr_record(state).
+
+
diff --git a/lib/ssh/src/ssh_client_key_api.erl b/lib/ssh/src/ssh_client_key_api.erl
index 6e994ff292..d0d8ab25d6 100644
--- a/lib/ssh/src/ssh_client_key_api.erl
+++ b/lib/ssh/src/ssh_client_key_api.erl
@@ -23,26 +23,25 @@
-include_lib("public_key/include/public_key.hrl").
-include("ssh.hrl").
--export_type([algorithm/0]).
-
--type algorithm() :: 'ssh-rsa'
- | 'ssh-dss'
- | 'ecdsa-sha2-nistp256'
- | 'ecdsa-sha2-nistp384'
- | 'ecdsa-sha2-nistp521'
- .
-
--callback is_host_key(PublicKey :: public_key:public_key(),
- Host :: string(),
- Algorithm :: algorithm(),
- ConnectOptions :: proplists:proplist()) ->
+-export_type([client_key_cb_options/0]).
+
+-type client_key_cb_options() :: [{key_cb_private,term()} | ssh:client_option()].
+
+-callback is_host_key(Key :: public_key:public_key(),
+ Host :: string(),
+ Algorithm :: ssh:pubkey_alg(),
+ Options :: client_key_cb_options()
+ ) ->
boolean().
--callback user_key(Algorithm :: algorithm(),
- ConnectOptions :: proplists:proplist()) ->
- {ok, PrivateKey::public_key:private_key()} | {error, term()}.
+-callback user_key(Algorithm :: ssh:pubkey_alg(),
+ Options :: client_key_cb_options()
+ ) ->
+ {ok, PrivateKey :: public_key:private_key()} | {error, string()}.
--callback add_host_key(Host :: string(), PublicKey :: public_key:public_key(),
- Options :: proplists:proplist()) ->
+-callback add_host_key(Host :: string(),
+ PublicKey :: public_key:public_key(),
+ Options :: client_key_cb_options()
+ ) ->
ok | {error, Error::term()}.
diff --git a/lib/ssh/src/ssh_connect.hrl b/lib/ssh/src/ssh_connect.hrl
index a8de5f9a2f..3c61638285 100644
--- a/lib/ssh/src/ssh_connect.hrl
+++ b/lib/ssh/src/ssh_connect.hrl
@@ -22,10 +22,6 @@
%%% Description : SSH connection protocol
--type channel_id() :: pos_integer().
--type connection_ref() :: pid().
-
-
-define(DEFAULT_PACKET_SIZE, 65536).
-define(DEFAULT_WINDOW_SIZE, 10*?DEFAULT_PACKET_SIZE).
diff --git a/lib/ssh/src/ssh_connection.erl b/lib/ssh/src/ssh_connection.erl
index 2b8780a991..ed03b4e2ed 100644
--- a/lib/ssh/src/ssh_connection.erl
+++ b/lib/ssh/src/ssh_connection.erl
@@ -64,29 +64,32 @@
bound_channel/3, encode_ip/1
]).
+-type connection_ref() :: ssh:connection_ref().
+-type channel_id() :: ssh:channel_id().
+
%%--------------------------------------------------------------------
%%% API
%%--------------------------------------------------------------------
%%--------------------------------------------------------------------
--spec session_channel(connection_ref(), timeout()) -> {ok, channel_id()} | {error, timeout | closed}.
--spec session_channel(connection_ref(), integer(), integer(), timeout()) -> {ok, channel_id()} | {error, timeout | closed}.
-
%% Description: Opens a channel for a ssh session. A session is a
%% remote execution of a program. The program may be a shell, an
%% application, a system command, or some built-in subsystem.
%% --------------------------------------------------------------------
+-spec session_channel(connection_ref(), timeout()) ->
+ {ok, channel_id()} | {error, timeout | closed}.
+
session_channel(ConnectionHandler, Timeout) ->
- session_channel(ConnectionHandler,
- ?DEFAULT_WINDOW_SIZE, ?DEFAULT_PACKET_SIZE,
- Timeout).
+ session_channel(ConnectionHandler, ?DEFAULT_WINDOW_SIZE, ?DEFAULT_PACKET_SIZE, Timeout).
-session_channel(ConnectionHandler, InitialWindowSize,
- MaxPacketSize, Timeout) ->
+-spec session_channel(connection_ref(), integer(), integer(), timeout()) ->
+ {ok, channel_id()} | {error, timeout | closed}.
+
+session_channel(ConnectionHandler, InitialWindowSize, MaxPacketSize, Timeout) ->
case ssh_connection_handler:open_channel(ConnectionHandler, "session", <<>>,
- InitialWindowSize,
- MaxPacketSize, Timeout) of
+ InitialWindowSize,
+ MaxPacketSize, Timeout) of
{open, Channel} ->
{ok, Channel};
Error ->
@@ -94,55 +97,63 @@ session_channel(ConnectionHandler, InitialWindowSize,
end.
%%--------------------------------------------------------------------
--spec exec(connection_ref(), channel_id(), string(), timeout()) ->
- success | failure | {error, timeout | closed}.
-
%% Description: Will request that the server start the
%% execution of the given command.
%%--------------------------------------------------------------------
+-spec exec(connection_ref(), channel_id(), string(), timeout()) ->
+ success | failure | {error, timeout | closed}.
+
exec(ConnectionHandler, ChannelId, Command, TimeOut) ->
ssh_connection_handler:request(ConnectionHandler, self(), ChannelId, "exec",
true, [?string(Command)], TimeOut).
%%--------------------------------------------------------------------
--spec shell(connection_ref(), channel_id()) -> _.
-
%% Description: Will request that the user's default shell (typically
%% defined in /etc/passwd in UNIX systems) be started at the other
%% end.
%%--------------------------------------------------------------------
+-spec shell(connection_ref(), channel_id()) ->
+ ok | success | failure | {error, timeout}.
+
shell(ConnectionHandler, ChannelId) ->
ssh_connection_handler:request(ConnectionHandler, self(), ChannelId,
"shell", false, <<>>, 0).
%%--------------------------------------------------------------------
--spec subsystem(connection_ref(), channel_id(), string(), timeout()) ->
- success | failure | {error, timeout | closed}.
%%
%% Description: Executes a predefined subsystem.
%%--------------------------------------------------------------------
+-spec subsystem(connection_ref(), channel_id(), string(), timeout()) ->
+ success | failure | {error, timeout | closed}.
+
subsystem(ConnectionHandler, ChannelId, SubSystem, TimeOut) ->
ssh_connection_handler:request(ConnectionHandler, self(),
ChannelId, "subsystem",
true, [?string(SubSystem)], TimeOut).
%%--------------------------------------------------------------------
--spec send(connection_ref(), channel_id(), iodata()) ->
- ok | {error, closed}.
--spec send(connection_ref(), channel_id(), integer()| iodata(), timeout() | iodata()) ->
- ok | {error, timeout} | {error, closed}.
--spec send(connection_ref(), channel_id(), integer(), iodata(), timeout()) ->
- ok | {error, timeout} | {error, closed}.
-%%
-%%
%% Description: Sends channel data.
%%--------------------------------------------------------------------
+-spec send(connection_ref(), channel_id(), iodata()) ->
+ ok | {error, timeout | closed}.
send(ConnectionHandler, ChannelId, Data) ->
send(ConnectionHandler, ChannelId, 0, Data, infinity).
+
+
+-spec send(connection_ref(), channel_id(), integer()| iodata(), timeout() | iodata()) ->
+ ok | {error, timeout | closed}.
+
send(ConnectionHandler, ChannelId, Data, TimeOut) when is_integer(TimeOut) ->
send(ConnectionHandler, ChannelId, 0, Data, TimeOut);
+
send(ConnectionHandler, ChannelId, Data, infinity) ->
send(ConnectionHandler, ChannelId, 0, Data, infinity);
+
send(ConnectionHandler, ChannelId, Type, Data) ->
send(ConnectionHandler, ChannelId, Type, Data, infinity).
+
+
+-spec send(connection_ref(), channel_id(), integer(), iodata(), timeout()) ->
+ ok | {error, timeout | closed}.
+
send(ConnectionHandler, ChannelId, Type, Data, TimeOut) ->
ssh_connection_handler:send(ConnectionHandler, ChannelId,
Type, Data, TimeOut).
@@ -156,7 +167,7 @@ send_eof(ConnectionHandler, Channel) ->
ssh_connection_handler:send_eof(ConnectionHandler, Channel).
%%--------------------------------------------------------------------
--spec adjust_window(connection_ref(), channel_id(), integer()) -> ok | {error, closed}.
+-spec adjust_window(connection_ref(), channel_id(), integer()) -> ok.
%%
%%
%% Description: Adjusts the ssh flowcontrol window.
@@ -198,17 +209,18 @@ reply_request(_,false, _, _) ->
ok.
%%--------------------------------------------------------------------
--spec ptty_alloc(connection_ref(), channel_id(), proplists:proplist()) ->
- success | failiure | {error, closed}.
--spec ptty_alloc(connection_ref(), channel_id(), proplists:proplist(), timeout()) ->
- success | failiure | {error, timeout} | {error, closed}.
-
-%%
-%%
%% Description: Sends a ssh connection protocol pty_req.
%%--------------------------------------------------------------------
+-spec ptty_alloc(connection_ref(), channel_id(), proplists:proplist()) ->
+ success | failure | {error, timeout}.
+
ptty_alloc(ConnectionHandler, Channel, Options) ->
ptty_alloc(ConnectionHandler, Channel, Options, infinity).
+
+
+-spec ptty_alloc(connection_ref(), channel_id(), proplists:proplist(), timeout()) ->
+ success | failure | {error, timeout | closed}.
+
ptty_alloc(ConnectionHandler, Channel, Options0, TimeOut) ->
TermData = backwards_compatible(Options0, []), % FIXME
{Width, PixWidth} = pty_default_dimensions(width, TermData),
@@ -259,7 +271,7 @@ channel_data(ChannelId, DataType, Data, Connection, From) when is_list(Data)->
channel_data(ChannelId, DataType, Data,
#connection{channel_cache = Cache} = Connection,
From) ->
- case ssh_channel:cache_lookup(Cache, ChannelId) of
+ case ssh_client_channel:cache_lookup(Cache, ChannelId) of
#channel{remote_id = Id, sent_close = false} = Channel0 ->
{SendList, Channel} =
update_send_window(Channel0#channel{flow_control = From}, DataType,
@@ -291,9 +303,9 @@ handle_msg(#ssh_msg_channel_open_confirmation{recipient_channel = ChannelId,
#connection{channel_cache = Cache} = Connection0, _) ->
#channel{remote_id = undefined} = Channel =
- ssh_channel:cache_lookup(Cache, ChannelId),
+ ssh_client_channel:cache_lookup(Cache, ChannelId),
- ssh_channel:cache_update(Cache, Channel#channel{
+ ssh_client_channel:cache_update(Cache, Channel#channel{
remote_id = RemoteId,
recv_packet_size = max(32768, % rfc4254/5.2
min(PacketSz, Channel#channel.recv_packet_size)
@@ -307,8 +319,8 @@ handle_msg(#ssh_msg_channel_open_failure{recipient_channel = ChannelId,
description = Descr,
lang = Lang},
#connection{channel_cache = Cache} = Connection0, _) ->
- Channel = ssh_channel:cache_lookup(Cache, ChannelId),
- ssh_channel:cache_delete(Cache, ChannelId),
+ Channel = ssh_client_channel:cache_lookup(Cache, ChannelId),
+ ssh_client_channel:cache_delete(Cache, ChannelId),
reply_msg(Channel, Connection0, {open_error, Reason, Descr, Lang});
handle_msg(#ssh_msg_channel_success{recipient_channel = ChannelId}, Connection, _) ->
@@ -323,10 +335,10 @@ handle_msg(#ssh_msg_channel_eof{recipient_channel = ChannelId}, Connection, _) -
handle_msg(#ssh_msg_channel_close{recipient_channel = ChannelId},
#connection{channel_cache = Cache} = Connection0, _) ->
- case ssh_channel:cache_lookup(Cache, ChannelId) of
+ case ssh_client_channel:cache_lookup(Cache, ChannelId) of
#channel{sent_close = Closed, remote_id = RemoteId,
flow_control = FlowControl} = Channel ->
- ssh_channel:cache_delete(Cache, ChannelId),
+ ssh_client_channel:cache_delete(Cache, ChannelId),
{CloseMsg, Connection} =
reply_msg(Channel, Connection0, {closed, ChannelId}),
ConnReplyMsgs =
@@ -367,7 +379,7 @@ handle_msg(#ssh_msg_channel_window_adjust{recipient_channel = ChannelId,
bytes_to_add = Add},
#connection{channel_cache = Cache} = Connection, _) ->
#channel{send_window_size = Size, remote_id = RemoteId} =
- Channel0 = ssh_channel:cache_lookup(Cache, ChannelId),
+ Channel0 = ssh_client_channel:cache_lookup(Cache, ChannelId),
{SendList, Channel} = %% TODO: Datatype 0 ?
update_send_window(Channel0#channel{send_window_size = Size + Add},
@@ -443,7 +455,7 @@ handle_msg(#ssh_msg_channel_request{recipient_channel = ChannelId,
?BOOLEAN(_Core),
?DEC_BIN(Err, _ErrLen),
?DEC_BIN(Lang, _LangLen)>> = Data,
- Channel = ssh_channel:cache_lookup(Cache, ChannelId),
+ Channel = ssh_client_channel:cache_lookup(Cache, ChannelId),
RemoteId = Channel#channel.remote_id,
{Reply, Connection} = reply_msg(Channel, Connection0,
{exit_signal, ChannelId,
@@ -488,7 +500,7 @@ handle_msg(#ssh_msg_channel_request{recipient_channel = ChannelId,
<<?DEC_BIN(SsName,_SsLen)>> = Data,
#channel{remote_id = RemoteId} = Channel0 =
- ssh_channel:cache_lookup(Cache, ChannelId),
+ ssh_client_channel:cache_lookup(Cache, ChannelId),
ReplyMsg = {subsystem, ChannelId, WantReply, binary_to_list(SsName)},
@@ -496,7 +508,7 @@ handle_msg(#ssh_msg_channel_request{recipient_channel = ChannelId,
{ok, Pid} = start_subsystem(SsName, Connection, Channel0, ReplyMsg),
erlang:monitor(process, Pid),
Channel = Channel0#channel{user = Pid},
- ssh_channel:cache_update(Cache, Channel),
+ ssh_client_channel:cache_update(Cache, Channel),
Reply = {connection_reply,
channel_success_msg(RemoteId)},
{[Reply], Connection}
@@ -576,7 +588,7 @@ handle_msg(#ssh_msg_channel_request{recipient_channel = ChannelId,
want_reply = WantReply},
#connection{channel_cache = Cache} = Connection, _) ->
if WantReply == true ->
- case ssh_channel:cache_lookup(Cache, ChannelId) of
+ case ssh_client_channel:cache_lookup(Cache, ChannelId) of
#channel{remote_id = RemoteId} ->
FailMsg = channel_failure_msg(RemoteId),
{[{connection_reply, FailMsg}], Connection};
@@ -619,14 +631,14 @@ handle_msg(#ssh_msg_disconnect{code = Code,
%%%
handle_stop(#connection{channel_cache = Cache} = Connection0) ->
{Connection, Replies} =
- ssh_channel:cache_foldl(
+ ssh_client_channel:cache_foldl(
fun(Channel, {Connection1, Acc}) ->
{Reply, Connection2} =
reply_msg(Channel, Connection1,
{closed, Channel#channel.local_id}),
{Connection2, Reply ++ Acc}
end, {Connection0, []}, Cache),
- ssh_channel:cache_delete(Cache),
+ ssh_client_channel:cache_delete(Cache),
{Replies, Connection}.
%%%----------------------------------------------------------------
@@ -767,7 +779,7 @@ setup_session(#connection{channel_cache = Cache,
send_buf = queue:new(),
remote_id = RemoteId
},
- ssh_channel:cache_update(Cache, Channel),
+ ssh_client_channel:cache_update(Cache, Channel),
OpenConfMsg = channel_open_confirmation_msg(RemoteId, NewChannelID,
?DEFAULT_WINDOW_SIZE,
?DEFAULT_PACKET_SIZE),
@@ -810,14 +822,14 @@ start_channel(Cb, Id, Args, SubSysSup, Exec, Opts) ->
ChannelSup = ssh_subsystem_sup:channel_supervisor(SubSysSup),
case max_num_channels_not_exceeded(ChannelSup, Opts) of
true ->
- ssh_channel_sup:start_child(ChannelSup, Cb, Id, Args, Exec);
+ ssh_server_channel_sup:start_child(ChannelSup, Cb, Id, Args, Exec);
false ->
throw(max_num_channels_exceeded)
end.
max_num_channels_not_exceeded(ChannelSup, Opts) ->
MaxNumChannels = ?GET_OPT(max_channels, Opts),
- NumChannels = length([x || {_,_,worker,[ssh_channel]} <-
+ NumChannels = length([x || {_,_,worker,[ssh_server_channel]} <-
supervisor:which_children(ChannelSup)]),
%% Note that NumChannels is BEFORE starting a new one
NumChannels < MaxNumChannels.
@@ -856,7 +868,7 @@ update_send_window(#channel{send_buf = SendBuffer} = Channel, DataType, Data,
do_update_send_window(Channel0, Cache) ->
{SendMsgs, Channel} = get_window(Channel0, []),
- ssh_channel:cache_update(Cache, Channel),
+ ssh_client_channel:cache_update(Cache, Channel),
{SendMsgs, Channel}.
get_window(#channel{send_window_size = 0
@@ -907,13 +919,13 @@ flow_control(Channel, Cache) ->
flow_control([window_adjusted], Channel, Cache).
flow_control([], Channel, Cache) ->
- ssh_channel:cache_update(Cache, Channel),
+ ssh_client_channel:cache_update(Cache, Channel),
[];
flow_control([_|_], #channel{flow_control = From,
send_buf = Buffer} = Channel, Cache) when From =/= undefined ->
case queue:is_empty(Buffer) of
true ->
- ssh_channel:cache_update(Cache, Channel#channel{flow_control = undefined}),
+ ssh_client_channel:cache_update(Cache, Channel#channel{flow_control = undefined}),
[{flow_control, Cache, Channel, From, ok}];
false ->
[]
@@ -1157,14 +1169,14 @@ backwards_compatible([Value| Rest], Acc) ->
handle_cli_msg(C0, ChId, Reply0) ->
Cache = C0#connection.channel_cache,
- Ch0 = ssh_channel:cache_lookup(Cache, ChId),
+ Ch0 = ssh_client_channel:cache_lookup(Cache, ChId),
case Ch0#channel.user of
undefined ->
case (catch start_cli(C0, ChId)) of
{ok, Pid} ->
erlang:monitor(process, Pid),
Ch = Ch0#channel{user = Pid},
- ssh_channel:cache_update(Cache, Ch),
+ ssh_client_channel:cache_update(Cache, Ch),
reply_msg(Ch, C0, Reply0);
_Other ->
Reply = {connection_reply, channel_failure_msg(Ch0#channel.remote_id)},
@@ -1182,10 +1194,10 @@ handle_cli_msg(C0, ChId, Reply0) ->
%%%
channel_data_reply_msg(ChannelId, Connection, DataType, Data) ->
- case ssh_channel:cache_lookup(Connection#connection.channel_cache, ChannelId) of
+ case ssh_client_channel:cache_lookup(Connection#connection.channel_cache, ChannelId) of
#channel{recv_window_size = Size} = Channel ->
WantedSize = Size - size(Data),
- ssh_channel:cache_update(Connection#connection.channel_cache,
+ ssh_client_channel:cache_update(Connection#connection.channel_cache,
Channel#channel{recv_window_size = WantedSize}),
reply_msg(Channel, Connection, {data, ChannelId, DataType, Data});
undefined ->
@@ -1194,7 +1206,7 @@ channel_data_reply_msg(ChannelId, Connection, DataType, Data) ->
reply_msg(ChId, C, Reply) when is_integer(ChId) ->
- reply_msg(ssh_channel:cache_lookup(C#connection.channel_cache, ChId), C, Reply);
+ reply_msg(ssh_client_channel:cache_lookup(C#connection.channel_cache, ChId), C, Reply);
reply_msg(Channel, Connection, {open, _} = Reply) ->
request_reply_or_data(Channel, Connection, Reply);
diff --git a/lib/ssh/src/ssh_connection_handler.erl b/lib/ssh/src/ssh_connection_handler.erl
index 033f11f4a1..57641cf74c 100644
--- a/lib/ssh/src/ssh_connection_handler.erl
+++ b/lib/ssh/src/ssh_connection_handler.erl
@@ -60,6 +60,9 @@
get_print_info/1
]).
+-type connection_ref() :: ssh:connection_ref().
+-type channel_id() :: ssh:channel_id().
+
%%% Behaviour callbacks
-export([init/1, callback_mode/0, handle_event/4, terminate/3,
format_status/2, code_change/4]).
@@ -88,8 +91,8 @@
%%====================================================================
%%--------------------------------------------------------------------
-spec start_link(role(),
- inet:socket(),
- ssh_options:options()
+ gen_tcp:socket(),
+ internal_options()
) -> {ok, pid()}.
%% . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
start_link(Role, Socket, Options) ->
@@ -118,8 +121,8 @@ stop(ConnectionHandler)->
%%--------------------------------------------------------------------
-spec start_connection(role(),
- inet:socket(),
- ssh_options:options(),
+ gen_tcp:socket(),
+ internal_options(),
timeout()
) -> {ok, connection_ref()} | {error, term()}.
%% . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
@@ -359,7 +362,7 @@ alg(ConnectionHandler) ->
| undefined, % ex: tcp_closed
ssh_params :: #ssh{}
| undefined,
- socket :: inet:socket()
+ socket :: gen_tcp:socket()
| undefined,
decrypted_data_buffer = <<>> :: binary()
| undefined,
@@ -370,7 +373,6 @@ alg(ConnectionHandler) ->
| undefined,
last_size_rekey = 0 :: non_neg_integer(),
event_queue = [] :: list(),
-% opts :: ssh_options:options(),
inet_initial_recbuf_size :: pos_integer()
| undefined
}).
@@ -380,8 +382,8 @@ alg(ConnectionHandler) ->
%%====================================================================
%%--------------------------------------------------------------------
-spec init_connection_handler(role(),
- inet:socket(),
- ssh_options:options()
+ gen_tcp:socket(),
+ internal_options()
) -> no_return().
%% . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . .
init_connection_handler(Role, Socket, Opts) ->
@@ -412,7 +414,7 @@ init([Role,Socket,Opts]) ->
case inet:peername(Socket) of
{ok, PeerAddr} ->
{Protocol, Callback, CloseTag} = ?GET_OPT(transport, Opts),
- C = #connection{channel_cache = ssh_channel:cache_create(),
+ C = #connection{channel_cache = ssh_client_channel:cache_create(),
channel_id_seed = 0,
port_bindings = [],
requests = [],
@@ -1107,13 +1109,13 @@ handle_event(cast, _, StateName, _) when not ?CONNECTED(StateName) ->
{keep_state_and_data, [postpone]};
handle_event(cast, {adjust_window,ChannelId,Bytes}, StateName, D) when ?CONNECTED(StateName) ->
- case ssh_channel:cache_lookup(cache(D), ChannelId) of
+ case ssh_client_channel:cache_lookup(cache(D), ChannelId) of
#channel{recv_window_size = WinSize,
recv_window_pending = Pending,
recv_packet_size = PktSize} = Channel
when (WinSize-Bytes) >= 2*PktSize ->
%% The peer can send at least two more *full* packet, no hurry.
- ssh_channel:cache_update(cache(D),
+ ssh_client_channel:cache_update(cache(D),
Channel#channel{recv_window_pending = Pending + Bytes}),
keep_state_and_data;
@@ -1121,7 +1123,7 @@ handle_event(cast, {adjust_window,ChannelId,Bytes}, StateName, D) when ?CONNECTE
recv_window_pending = Pending,
remote_id = Id} = Channel ->
%% Now we have to update the window - we can't receive so many more pkts
- ssh_channel:cache_update(cache(D),
+ ssh_client_channel:cache_update(cache(D),
Channel#channel{recv_window_size =
WinSize + Bytes + Pending,
recv_window_pending = 0}),
@@ -1133,7 +1135,7 @@ handle_event(cast, {adjust_window,ChannelId,Bytes}, StateName, D) when ?CONNECTE
end;
handle_event(cast, {reply_request,success,ChannelId}, StateName, D) when ?CONNECTED(StateName) ->
- case ssh_channel:cache_lookup(cache(D), ChannelId) of
+ case ssh_client_channel:cache_lookup(cache(D), ChannelId) of
#channel{remote_id = RemoteId} ->
Msg = ssh_connection:channel_success_msg(RemoteId),
update_inet_buffers(D#data.socket),
@@ -1176,7 +1178,7 @@ handle_event({call,From}, {connection_info, Options}, _, D) ->
{keep_state_and_data, [{reply,From,Info}]};
handle_event({call,From}, {channel_info,ChannelId,Options}, _, D) ->
- case ssh_channel:cache_lookup(cache(D), ChannelId) of
+ case ssh_client_channel:cache_lookup(cache(D), ChannelId) of
#channel{} = Channel ->
Info = fold_keys(Options, fun chann_info/2, Channel),
{keep_state_and_data, [{reply,From,Info}]};
@@ -1186,14 +1188,14 @@ handle_event({call,From}, {channel_info,ChannelId,Options}, _, D) ->
handle_event({call,From}, {info, all}, _, D) ->
- Result = ssh_channel:cache_foldl(fun(Channel, Acc) ->
+ Result = ssh_client_channel:cache_foldl(fun(Channel, Acc) ->
[Channel | Acc]
end,
[], cache(D)),
{keep_state_and_data, [{reply, From, {ok,Result}}]};
handle_event({call,From}, {info, ChannelPid}, _, D) ->
- Result = ssh_channel:cache_foldl(
+ Result = ssh_client_channel:cache_foldl(
fun(Channel, Acc) when Channel#channel.user == ChannelPid ->
[Channel | Acc];
(_, Acc) ->
@@ -1239,7 +1241,7 @@ handle_event({call,From}, {data, ChannelId, Type, Data, Timeout}, StateName, D0)
handle_event({call,From}, {eof, ChannelId}, StateName, D0)
when ?CONNECTED(StateName) ->
- case ssh_channel:cache_lookup(cache(D0), ChannelId) of
+ case ssh_client_channel:cache_lookup(cache(D0), ChannelId) of
#channel{remote_id = Id, sent_close = false} ->
D = send_msg(ssh_connection:channel_eof_msg(Id), D0),
{keep_state, D, [{reply,From,ok}]};
@@ -1257,7 +1259,7 @@ handle_event({call,From},
InitialWindowSize,
MaxPacketSize, Data),
D1),
- ssh_channel:cache_update(cache(D2),
+ ssh_client_channel:cache_update(cache(D2),
#channel{type = Type,
sys = "none",
user = ChannelPid,
@@ -1272,7 +1274,7 @@ handle_event({call,From},
handle_event({call,From}, {send_window, ChannelId}, StateName, D)
when ?CONNECTED(StateName) ->
- Reply = case ssh_channel:cache_lookup(cache(D), ChannelId) of
+ Reply = case ssh_client_channel:cache_lookup(cache(D), ChannelId) of
#channel{send_window_size = WinSize,
send_packet_size = Packsize} ->
{ok, {WinSize, Packsize}};
@@ -1283,7 +1285,7 @@ handle_event({call,From}, {send_window, ChannelId}, StateName, D)
handle_event({call,From}, {recv_window, ChannelId}, StateName, D)
when ?CONNECTED(StateName) ->
- Reply = case ssh_channel:cache_lookup(cache(D), ChannelId) of
+ Reply = case ssh_client_channel:cache_lookup(cache(D), ChannelId) of
#channel{recv_window_size = WinSize,
recv_packet_size = Packsize} ->
{ok, {WinSize, Packsize}};
@@ -1294,10 +1296,10 @@ handle_event({call,From}, {recv_window, ChannelId}, StateName, D)
handle_event({call,From}, {close, ChannelId}, StateName, D0)
when ?CONNECTED(StateName) ->
- case ssh_channel:cache_lookup(cache(D0), ChannelId) of
+ case ssh_client_channel:cache_lookup(cache(D0), ChannelId) of
#channel{remote_id = Id} = Channel ->
D1 = send_msg(ssh_connection:channel_close_msg(Id), D0),
- ssh_channel:cache_update(cache(D1), Channel#channel{sent_close = true}),
+ ssh_client_channel:cache_update(cache(D1), Channel#channel{sent_close = true}),
{keep_state, cache_request_idle_timer_check(D1), [{reply,From,ok}]};
undefined ->
{keep_state_and_data, [{reply,From,ok}]}
@@ -1535,7 +1537,6 @@ terminate(shutdown, _StateName, D0) ->
D = send_msg(#ssh_msg_disconnect{code = ?SSH_DISCONNECT_BY_APPLICATION,
description = "Terminated (shutdown) by supervisor"},
D0),
- stop_subsystem(D),
close_transport(D);
terminate(kill, _StateName, D) ->
@@ -1858,7 +1859,7 @@ is_usable_user_pubkey(A, Ssh) ->
%%%----------------------------------------------------------------
handle_request(ChannelPid, ChannelId, Type, Data, WantReply, From, D) ->
- case ssh_channel:cache_lookup(cache(D), ChannelId) of
+ case ssh_client_channel:cache_lookup(cache(D), ChannelId) of
#channel{remote_id = Id,
sent_close = false} = Channel ->
update_sys(cache(D), Channel, Type, ChannelPid),
@@ -1873,7 +1874,7 @@ handle_request(ChannelPid, ChannelId, Type, Data, WantReply, From, D) ->
end.
handle_request(ChannelId, Type, Data, WantReply, From, D) ->
- case ssh_channel:cache_lookup(cache(D), ChannelId) of
+ case ssh_client_channel:cache_lookup(cache(D), ChannelId) of
#channel{remote_id = Id,
sent_close = false} ->
send_msg(ssh_connection:channel_request_msg(Id, Type, WantReply, Data),
@@ -1889,10 +1890,10 @@ handle_request(ChannelId, Type, Data, WantReply, From, D) ->
%%%----------------------------------------------------------------
handle_channel_down(ChannelPid, D) ->
Cache = cache(D),
- ssh_channel:cache_foldl(
+ ssh_client_channel:cache_foldl(
fun(#channel{user=U,
local_id=Id}, Acc) when U == ChannelPid ->
- ssh_channel:cache_delete(Cache, Id),
+ ssh_client_channel:cache_delete(Cache, Id),
Acc;
(_,Acc) ->
Acc
@@ -1901,7 +1902,7 @@ handle_channel_down(ChannelPid, D) ->
update_sys(Cache, Channel, Type, ChannelPid) ->
- ssh_channel:cache_update(Cache,
+ ssh_client_channel:cache_update(Cache,
Channel#channel{sys = Type, user = ChannelPid}).
add_request(false, _ChannelId, _From, State) ->
@@ -1978,7 +1979,7 @@ conn_info(sockname, #data{ssh_params=S}) -> S#ssh.local;
%% dbg options ( = not documented):
conn_info(socket, D) -> D#data.socket;
conn_info(chan_ids, D) ->
- ssh_channel:cache_foldl(fun(#channel{local_id=Id}, Acc) ->
+ ssh_client_channel:cache_foldl(fun(#channel{local_id=Id}, Acc) ->
[Id | Acc]
end, [], cache(D)).
@@ -2069,7 +2070,7 @@ get_repl({channel_data,Pid,Data}, Acc) ->
get_repl({channel_request_reply,From,Data}, {CallRepls,S}) ->
{[{reply,From,Data}|CallRepls], S};
get_repl({flow_control,Cache,Channel,From,Msg}, {CallRepls,S}) ->
- ssh_channel:cache_update(Cache, Channel#channel{flow_control = undefined}),
+ ssh_client_channel:cache_update(Cache, Channel#channel{flow_control = undefined}),
{[{reply,From,Msg}|CallRepls], S};
get_repl({flow_control,From,Msg}, {CallRepls,S}) ->
{[{reply,From,Msg}|CallRepls], S};
@@ -2145,7 +2146,7 @@ cache_init_idle_timer(D) ->
cache_check_set_idle_timer(D = #data{idle_timer_ref = undefined,
idle_timer_value = IdleTime}) ->
%% No timer set - shall we set one?
- case ssh_channel:cache_info(num_entries, cache(D)) of
+ case ssh_client_channel:cache_info(num_entries, cache(D)) of
0 when IdleTime == infinity ->
%% No. Meaningless to set a timer that fires in an infinite time...
D;
diff --git a/lib/ssh/src/ssh_daemon_channel.erl b/lib/ssh/src/ssh_daemon_channel.erl
index 6ca93eff44..fdb6c10971 100644
--- a/lib/ssh/src/ssh_daemon_channel.erl
+++ b/lib/ssh/src/ssh_daemon_channel.erl
@@ -25,7 +25,7 @@
-module(ssh_daemon_channel).
-%% API to special server side channel that can be pluged into the erlang ssh daemeon
+%% API to server side channel that can be pluged into the erlang ssh daemeon
-callback init(Args :: term()) ->
{ok, State :: term()} | {ok, State :: term(), timeout() | hibernate} |
{stop, Reason :: term()} | ignore.
@@ -36,34 +36,20 @@
term().
-callback handle_msg(Msg ::term(), State :: term()) ->
- {ok, State::term()} | {stop, ChannelId::integer(), State::term()}.
--callback handle_ssh_msg({ssh_cm, ConnectionRef::term(), SshMsg::term()},
+ {ok, State::term()} | {stop, ChannelId::ssh:channel_id(), State::term()}.
+-callback handle_ssh_msg({ssh_cm, ConnectionRef::ssh:connection_ref(), SshMsg::term()},
State::term()) -> {ok, State::term()} |
- {stop, ChannelId::integer(),
+ {stop, ChannelId::ssh:channel_id(),
State::term()}.
-%%% API
--export([start/4, start/5, start_link/4, start_link/5, enter_loop/1]).
-
-%% gen_server callbacks
--export([init/1, terminate/2]).
-
-start(ConnectionManager, ChannelId, CallBack, CbInitArgs) ->
- ssh_channel:start(ConnectionManager, ChannelId, CallBack, CbInitArgs, undefined).
-
-start(ConnectionManager, ChannelId, CallBack, CbInitArgs, Exec) ->
- ssh_channel:start(ConnectionManager, ChannelId, CallBack, CbInitArgs, Exec).
-
-start_link(ConnectionManager, ChannelId, CallBack, CbInitArgs) ->
- ssh_channel:start_link(ConnectionManager, ChannelId, CallBack, CbInitArgs, undefined).
+%%% Internal API
+-export([start_link/5,
+ get_print_info/1
+ ]).
start_link(ConnectionManager, ChannelId, CallBack, CbInitArgs, Exec) ->
- ssh_channel:start_link(ConnectionManager, ChannelId, CallBack, CbInitArgs, Exec).
+ ssh_server_channel:start_link(ConnectionManager, ChannelId, CallBack, CbInitArgs, Exec).
-enter_loop(State) ->
- ssh_channel:enter_loop(State).
-init(Args) ->
- ssh_channel:init(Args).
-terminate(Reason, State) ->
- ssh_channel:terminate(Reason, State).
+get_print_info(Pid) ->
+ ssh_server_channel:get_print_info(Pid).
diff --git a/lib/ssh/src/ssh_file.erl b/lib/ssh/src/ssh_file.erl
index 33792da38f..9cab2fe0bd 100644
--- a/lib/ssh/src/ssh_file.erl
+++ b/lib/ssh/src/ssh_file.erl
@@ -45,27 +45,6 @@
%%% API
-%%% client
--spec add_host_key(string(),
- public_key:public_key(),
- proplists:proplist()) -> ok | {error,term()}.
-
--spec is_host_key(public_key:public_key(),
- string(),
- ssh_client_key_api:algorithm(),
- proplists:proplist()) -> boolean().
-
--spec user_key(ssh_client_key_api:algorithm(),
- proplists:proplist()) -> {ok, public_key:private_key()} | {error,term()}.
-
-%%% server
--spec host_key(ssh_server_key_api:algorithm(),
- proplists:proplist()) -> {ok, public_key:private_key()} | {error,term()}.
-
--spec is_auth_key(public_key:public_key(),
- string(), proplists:proplist()) -> boolean().
-
-
%% Used by server
host_key(Algorithm, Opts) ->
File = file_name(system, file_base_name(Algorithm), Opts),
diff --git a/lib/ssh/src/ssh_info.erl b/lib/ssh/src/ssh_info.erl
index d464def6fa..ee244f1432 100644
--- a/lib/ssh/src/ssh_info.erl
+++ b/lib/ssh/src/ssh_info.erl
@@ -140,15 +140,15 @@ print_system_sup({{ssh_acceptor_sup,_LocalHost,_LocalPort,_Profile}, Pid, superv
-print_channels({{server,ssh_channel_sup,_,_},Pid,supervisor,[ssh_channel_sup]}) when is_pid(Pid) ->
+print_channels({{server,ssh_server_channel_sup,_,_},Pid,supervisor,[ssh_server_channel_sup]}) when is_pid(Pid) ->
Children = supervisor:which_children(Pid),
- ChannelPids = [P || {R,P,worker,[ssh_channel]} <- Children,
+ ChannelPids = [P || {R,P,worker,[ssh_server_channel]} <- Children,
is_pid(P),
is_reference(R)],
case ChannelPids of
[] -> io_lib:format(?INDENT?INDENT"No channels~n",[]);
[Ch1Pid|_] ->
- {{ConnManager,_}, _Str} = ssh_channel:get_print_info(Ch1Pid),
+ {{ConnManager,_}, _Str} = ssh_server_channel:get_print_info(Ch1Pid),
{{_,Remote},_} = ssh_connection_handler:get_print_info(ConnManager),
[io_lib:format(?INDENT?INDENT"Remote: ~s ConnectionRef = ~p~n",[fmt_host_port(Remote),ConnManager]),
lists:map(fun print_ch/1, ChannelPids)
@@ -159,7 +159,7 @@ print_channels({{server,ssh_connection_sup,_,_},Pid,supervisor,[ssh_connection_s
print_ch(Pid) ->
try
- {{ConnManager,ChannelID}, Str} = ssh_channel:get_print_info(Pid),
+ {{ConnManager,ChannelID}, Str} = ssh_server_channel:get_print_info(Pid),
{_LocalRemote,StrM} = ssh_connection_handler:get_print_info(ConnManager),
io_lib:format(?INDENT?INDENT?INDENT"ch ~p ~p: ~s ~s~n",[ChannelID, Pid, StrM, Str])
catch
diff --git a/lib/ssh/src/ssh_options.erl b/lib/ssh/src/ssh_options.erl
index c05293d1ae..4dd9082250 100644
--- a/lib/ssh/src/ssh_options.erl
+++ b/lib/ssh/src/ssh_options.erl
@@ -32,7 +32,7 @@
handle_options/2
]).
--export_type([options/0
+-export_type([private_options/0
]).
%%%================================================================
@@ -47,16 +47,23 @@
default => any()
}.
+-type option_key() :: atom().
+
-type option_declarations() :: #{ {option_key(),def} := option_declaration() }.
-type error() :: {error,{eoptions,any()}} .
+-type private_options() :: #{socket_options := socket_options(),
+ internal_options := internal_options(),
+ option_key() => any()
+ }.
+
%%%================================================================
%%%
%%% Get an option
%%%
--spec get_value(option_class(), option_key(), options(),
+-spec get_value(option_class(), option_key(), private_options(),
atom(), non_neg_integer()) -> any() | no_return().
get_value(Class, Key, Opts, _CallerMod, _CallerLine) when is_map(Opts) ->
@@ -69,7 +76,7 @@ get_value(Class, Key, Opts, _CallerMod, _CallerLine) ->
error({bad_options,Class, Key, Opts, _CallerMod, _CallerLine}).
--spec get_value(option_class(), option_key(), options(), fun(() -> any()),
+-spec get_value(option_class(), option_key(), private_options(), fun(() -> any()),
atom(), non_neg_integer()) -> any() | no_return().
get_value(socket_options, Key, Opts, DefFun, _CallerMod, _CallerLine) when is_map(Opts) ->
@@ -91,8 +98,8 @@ get_value(Class, Key, Opts, _DefFun, _CallerMod, _CallerLine) ->
%%% Put an option
%%%
--spec put_value(option_class(), option_in(), options(),
- atom(), non_neg_integer()) -> options().
+-spec put_value(option_class(), option_in(), private_options(),
+ atom(), non_neg_integer()) -> private_options().
put_value(user_options, KeyVal, Opts, _CallerMod, _CallerLine) when is_map(Opts) ->
put_user_value(KeyVal, Opts);
@@ -131,8 +138,8 @@ put_socket_value(A, SockOpts) when is_atom(A) ->
%%% Delete an option
%%%
--spec delete_key(option_class(), option_key(), options(),
- atom(), non_neg_integer()) -> options().
+-spec delete_key(option_class(), option_key(), private_options(),
+ atom(), non_neg_integer()) -> private_options().
delete_key(internal_options, Key, Opts, _CallerMod, _CallerLine) when is_map(Opts) ->
InternalOpts = maps:get(internal_options,Opts),
@@ -144,9 +151,7 @@ delete_key(internal_options, Key, Opts, _CallerMod, _CallerLine) when is_map(Opt
%%% Initialize the options
%%%
--spec handle_options(role(), proplists:proplist()) -> options() | error() .
-
--spec handle_options(role(), proplists:proplist(), options()) -> options() | error() .
+-spec handle_options(role(), client_options()|daemon_options()) -> private_options() | error() .
handle_options(Role, PropList0) ->
handle_options(Role, PropList0, #{socket_options => [],
@@ -155,7 +160,7 @@ handle_options(Role, PropList0) ->
}).
handle_options(Role, PropList0, Opts0) when is_map(Opts0),
- is_list(PropList0) ->
+ is_list(PropList0) ->
PropList1 = proplists:unfold(PropList0),
try
OptionDefinitions = default(Role),
diff --git a/lib/ssh/src/ssh_server_channel.erl b/lib/ssh/src/ssh_server_channel.erl
new file mode 100644
index 0000000000..f1c9a85639
--- /dev/null
+++ b/lib/ssh/src/ssh_server_channel.erl
@@ -0,0 +1,55 @@
+%%
+%% %CopyrightBegin%
+%%
+%% Copyright Ericsson AB 2013-2016. All Rights Reserved.
+%%
+%% Licensed under the Apache License, Version 2.0 (the "License");
+%% you may not use this file except in compliance with the License.
+%% You may obtain a copy of the License at
+%%
+%% http://www.apache.org/licenses/LICENSE-2.0
+%%
+%% Unless required by applicable law or agreed to in writing, software
+%% distributed under the License is distributed on an "AS IS" BASIS,
+%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+%% See the License for the specific language governing permissions and
+%% limitations under the License.
+%%
+%% %CopyrightEnd%
+%%
+
+%%
+%% Description: a gen_server implementing a simple
+%% terminal (using the group module) for a CLI
+%% over SSH
+
+-module(ssh_server_channel).
+
+%% API to server side channel that can be pluged into the erlang ssh daemeon
+-callback init(Args :: term()) ->
+ {ok, State :: term()} | {ok, State :: term(), timeout() | hibernate} |
+ {stop, Reason :: term()} | ignore.
+
+-callback terminate(Reason :: (normal | shutdown | {shutdown, term()} |
+ term()),
+ State :: term()) ->
+ term().
+
+-callback handle_msg(Msg ::term(), State :: term()) ->
+ {ok, State::term()} | {stop, ChannelId::ssh:channel_id(), State::term()}.
+-callback handle_ssh_msg({ssh_cm, ConnectionRef::ssh:connection_ref(), SshMsg::term()},
+ State::term()) -> {ok, State::term()} |
+ {stop, ChannelId::ssh:channel_id(),
+ State::term()}.
+
+%%% Internal API
+-export([start_link/5,
+ get_print_info/1
+ ]).
+
+start_link(ConnectionManager, ChannelId, CallBack, CbInitArgs, Exec) ->
+ ssh_client_channel:start_link(ConnectionManager, ChannelId, CallBack, CbInitArgs, Exec).
+
+
+get_print_info(Pid) ->
+ ssh_client_channel:get_print_info(Pid).
diff --git a/lib/ssh/src/ssh_channel_sup.erl b/lib/ssh/src/ssh_server_channel_sup.erl
index 8444533fd1..70799db714 100644
--- a/lib/ssh/src/ssh_channel_sup.erl
+++ b/lib/ssh/src/ssh_server_channel_sup.erl
@@ -22,7 +22,7 @@
%%----------------------------------------------------------------------
%% Purpose: Ssh channel supervisor.
%%----------------------------------------------------------------------
--module(ssh_channel_sup).
+-module(ssh_server_channel_sup).
-behaviour(supervisor).
@@ -40,18 +40,16 @@ start_link(Args) ->
start_child(Sup, Callback, Id, Args, Exec) ->
ChildSpec =
#{id => make_ref(),
- start => {ssh_channel, start_link, [self(), Id, Callback, Args, Exec]},
+ start => {ssh_server_channel, start_link, [self(), Id, Callback, Args, Exec]},
restart => temporary,
type => worker,
- modules => [ssh_channel]
+ modules => [ssh_server_channel]
},
supervisor:start_child(Sup, ChildSpec).
%%%=========================================================================
%%% Supervisor callback
%%%=========================================================================
--spec init( [term()] ) -> {ok,{supervisor:sup_flags(),[supervisor:child_spec()]}} | ignore .
-
init(_Args) ->
RestartStrategy = one_for_one,
MaxR = 10,
diff --git a/lib/ssh/src/ssh_server_key_api.erl b/lib/ssh/src/ssh_server_key_api.erl
index 3f1b886fa7..a285bf9475 100644
--- a/lib/ssh/src/ssh_server_key_api.erl
+++ b/lib/ssh/src/ssh_server_key_api.erl
@@ -23,16 +23,18 @@
-include_lib("public_key/include/public_key.hrl").
-include("ssh.hrl").
--export_type([algorithm/0]).
+-export_type([daemon_key_cb_options/0]).
--type algorithm() :: ssh_client_key_api:algorithm().
+-type daemon_key_cb_options() :: [{key_cb_private,term()} | ssh:daemon_option()].
--callback host_key(Algorithm :: algorithm(),
- DaemonOptions :: proplists:proplist()) ->
+-callback host_key(Algorithm :: ssh:pubkey_alg(),
+ DaemonOptions :: daemon_key_cb_options()
+ ) ->
{ok, PrivateKey :: public_key:private_key()} | {error, term()}.
-callback is_auth_key(PublicKey :: public_key:public_key(),
User :: string(),
- DaemonOptions :: proplists:proplist()) ->
+ DaemonOptions :: daemon_key_cb_options()
+ ) ->
boolean().
diff --git a/lib/ssh/src/ssh_sftp.erl b/lib/ssh/src/ssh_sftp.erl
index f00c0aed1f..5984713ec9 100644
--- a/lib/ssh/src/ssh_sftp.erl
+++ b/lib/ssh/src/ssh_sftp.erl
@@ -24,7 +24,7 @@
-module(ssh_sftp).
--behaviour(ssh_channel).
+-behaviour(ssh_client_channel).
-include_lib("kernel/include/file.hrl").
-include("ssh.hrl").
@@ -47,7 +47,7 @@
recv_window/1, list_dir/2, read_file/2, write_file/3,
recv_window/2, list_dir/3, read_file/3, write_file/4]).
-%% ssh_channel callbacks
+%% ssh_client_channel callbacks
-export([init/1, handle_call/3, handle_cast/2, code_change/3, handle_msg/2, handle_ssh_msg/2, terminate/2]).
%% TODO: Should be placed elsewhere ssh_sftpd should not call functions in ssh_sftp!
-export([info_to_attr/1, attr_to_info/1]).
@@ -123,7 +123,7 @@ start_channel(Cm, UserOptions) when is_pid(Cm) ->
{_SshOpts, ChanOpts, SftpOpts} = handle_options(UserOptions),
case ssh_xfer:attach(Cm, [], ChanOpts) of
{ok, ChannelId, Cm} ->
- case ssh_channel:start(Cm, ChannelId,
+ case ssh_client_channel:start(Cm, ChannelId,
?MODULE, [Cm, ChannelId, SftpOpts]) of
{ok, Pid} ->
case wait_for_version_negotiation(Pid, Timeout) of
@@ -151,7 +151,7 @@ start_channel(Host, Port, UserOptions) ->
proplists:get_value(timeout, SftpOpts, infinity)),
case ssh_xfer:connect(Host, Port, SshOpts, ChanOpts, Timeout) of
{ok, ChannelId, Cm} ->
- case ssh_channel:start(Cm, ChannelId, ?MODULE, [Cm,ChannelId,SftpOpts]) of
+ case ssh_client_channel:start(Cm, ChannelId, ?MODULE, [Cm,ChannelId,SftpOpts]) of
{ok, Pid} ->
case wait_for_version_negotiation(Pid, Timeout) of
ok ->
@@ -825,7 +825,7 @@ handle_msg({ssh_channel_up, _, _}, #state{opts = Options, xf = Xf} = State) ->
%% Version negotiation timed out
handle_msg({timeout, undefined, From},
#state{xf = #ssh_xfer{channel = ChannelId}} = State) ->
- ssh_channel:reply(From, {error, timeout}),
+ ssh_client_channel:reply(From, {error, timeout}),
{stop, ChannelId, State};
handle_msg({timeout, Id, From}, #state{req_list = ReqList0} = State) ->
@@ -834,7 +834,7 @@ handle_msg({timeout, Id, From}, #state{req_list = ReqList0} = State) ->
{ok, State};
_ ->
ReqList = lists:keydelete(Id, 1, ReqList0),
- ssh_channel:reply(From, {error, timeout}),
+ ssh_client_channel:reply(From, {error, timeout}),
{ok, State#state{req_list = ReqList}}
end;
@@ -882,7 +882,7 @@ handle_options([Opt|Rest], Sftp, Chan, Ssh) ->
handle_options(Rest, Sftp, Chan, [Opt|Ssh]).
call(Pid, Msg, TimeOut) ->
- ssh_channel:call(Pid, {{timeout, TimeOut}, Msg}, infinity).
+ ssh_client_channel:call(Pid, {{timeout, TimeOut}, Msg}, infinity).
handle_reply(State, <<?UINT32(Len),Reply:Len/binary,Rest/binary>>) ->
do_handle_reply(State, Reply, Rest);
@@ -901,7 +901,7 @@ do_handle_reply(#state{xf = Xf} = State,
true ->
ok
end,
- ssh_channel:reply(From, ok)
+ ssh_client_channel:reply(From, ok)
end,
State#state{xf = Xf#ssh_xfer{vsn = Version, ext = Ext}, rep_buf = Rest};
@@ -949,7 +949,7 @@ async_reply(ReqID, Reply, _From={To,_}, State) ->
State.
sync_reply(Reply, From, State) ->
- catch (ssh_channel:reply(From, Reply)),
+ catch (ssh_client_channel:reply(From, Reply)),
State.
open2(OrigReqID,FileName,Handle,Mode,Async,From,State) ->
diff --git a/lib/ssh/src/ssh_sftpd.erl b/lib/ssh/src/ssh_sftpd.erl
index 945e9f457b..cb2eab1fec 100644
--- a/lib/ssh/src/ssh_sftpd.erl
+++ b/lib/ssh/src/ssh_sftpd.erl
@@ -24,7 +24,7 @@
-module(ssh_sftpd).
--behaviour(ssh_daemon_channel).
+-behaviour(ssh_server_channel).
-include_lib("kernel/include/file.hrl").
@@ -58,21 +58,7 @@
%%====================================================================
%% API
%%====================================================================
--spec init(Args :: term()) ->
- {ok, State :: term()} | {ok, State :: term(), timeout() | hibernate} |
- {stop, Reason :: term()} | ignore.
-
--spec terminate(Reason :: (normal | shutdown | {shutdown, term()} |
- term()),
- State :: term()) ->
- term().
-
--spec handle_msg(Msg ::term(), State :: term()) ->
- {ok, State::term()} | {stop, ChannelId::integer(), State::term()}.
--spec handle_ssh_msg({ssh_cm, ConnectionRef::term(), SshMsg::term()},
- State::term()) -> {ok, State::term()} |
- {stop, ChannelId::integer(),
- State::term()}.
+-spec subsystem_spec(list()) -> subsystem_spec().
subsystem_spec(Options) ->
{"sftp", {?MODULE, Options}}.
diff --git a/lib/ssh/src/ssh_shell.erl b/lib/ssh/src/ssh_shell.erl
index 085534592d..084daa6821 100644
--- a/lib/ssh/src/ssh_shell.erl
+++ b/lib/ssh/src/ssh_shell.erl
@@ -27,9 +27,9 @@
%%% As this is an user interactive client it behaves like a daemon
%%% channel inspite of it being a client.
--behaviour(ssh_daemon_channel).
+-behaviour(ssh_server_channel).
-%% ssh_channel callbacks
+%% ssh_server_channel callbacks
-export([init/1, handle_msg/2, handle_ssh_msg/2, terminate/2]).
%% Spawn export
@@ -46,23 +46,8 @@
).
%%====================================================================
-%% ssh_channel callbacks
+%% ssh_server_channel callbacks
%%====================================================================
--spec init(Args :: term()) ->
- {ok, State :: term()} | {ok, State :: term(), timeout() | hibernate} |
- {stop, Reason :: term()} | ignore.
-
--spec terminate(Reason :: (normal | shutdown | {shutdown, term()} |
- term()),
- State :: term()) ->
- term().
-
--spec handle_msg(Msg ::term(), State :: term()) ->
- {ok, State::term()} | {stop, ChannelId::integer(), State::term()}.
--spec handle_ssh_msg({ssh_cm, ConnectionRef::term(), SshMsg::term()},
- State::term()) -> {ok, State::term()} |
- {stop, ChannelId::integer(),
- State::term()}.
%%--------------------------------------------------------------------
%% Function: init(Args) -> {ok, State}
diff --git a/lib/ssh/src/ssh_subsystem_sup.erl b/lib/ssh/src/ssh_subsystem_sup.erl
index 77da240a66..f80be7f20b 100644
--- a/lib/ssh/src/ssh_subsystem_sup.erl
+++ b/lib/ssh/src/ssh_subsystem_sup.erl
@@ -48,7 +48,7 @@ connection_supervisor(SupPid) ->
channel_supervisor(SupPid) ->
Children = supervisor:which_children(SupPid),
- ssh_channel_sup(Children).
+ ssh_server_channel_sup(Children).
%%%=========================================================================
%%% Supervisor callback
@@ -78,8 +78,8 @@ ssh_connection_child_spec(Role, Address, Port, _Profile, Options) ->
}.
ssh_channel_child_spec(Role, Address, Port, _Profile, Options) ->
- #{id => id(Role, ssh_channel_sup, Address, Port),
- start => {ssh_channel_sup, start_link, [Options]},
+ #{id => id(Role, ssh_server_channel_sup, Address, Port),
+ start => {ssh_server_channel_sup, start_link, [Options]},
restart => temporary,
type => supervisor
}.
@@ -92,10 +92,10 @@ ssh_connection_sup([{_, Child, _, [ssh_connection_sup]} | _]) ->
ssh_connection_sup([_ | Rest]) ->
ssh_connection_sup(Rest).
-ssh_channel_sup([{_, Child, _, [ssh_channel_sup]} | _]) ->
+ssh_server_channel_sup([{_, Child, _, [ssh_server_channel_sup]} | _]) ->
Child;
-ssh_channel_sup([_ | Rest]) ->
- ssh_channel_sup(Rest).
+ssh_server_channel_sup([_ | Rest]) ->
+ ssh_server_channel_sup(Rest).
diff --git a/lib/ssh/test/property_test/ssh_eqc_subsys.erl b/lib/ssh/test/property_test/ssh_eqc_subsys.erl
index 30b254b9c0..e7de3ea068 100644
--- a/lib/ssh/test/property_test/ssh_eqc_subsys.erl
+++ b/lib/ssh/test/property_test/ssh_eqc_subsys.erl
@@ -21,7 +21,7 @@
-module(ssh_eqc_subsys).
--behaviour(ssh_daemon_channel).
+-behaviour(ssh_server_channel).
-export([init/1, handle_msg/2, handle_ssh_msg/2, terminate/2]).
diff --git a/lib/ssh/test/ssh_bench_dev_null.erl b/lib/ssh/test/ssh_bench_dev_null.erl
index 5166247714..f9da80b6d7 100644
--- a/lib/ssh/test/ssh_bench_dev_null.erl
+++ b/lib/ssh/test/ssh_bench_dev_null.erl
@@ -22,7 +22,7 @@
%%% Description: Example ssh server
-module(ssh_bench_dev_null).
--behaviour(ssh_daemon_channel).
+-behaviour(ssh_server_channel).
-record(state, {
cm,
diff --git a/lib/ssh/test/ssh_echo_server.erl b/lib/ssh/test/ssh_echo_server.erl
index 5387d21efd..d03fe9543e 100644
--- a/lib/ssh/test/ssh_echo_server.erl
+++ b/lib/ssh/test/ssh_echo_server.erl
@@ -22,7 +22,7 @@
%%% Description: Example ssh server
-module(ssh_echo_server).
--behaviour(ssh_daemon_channel).
+-behaviour(ssh_server_channel).
-record(state, {
n,
id,
diff --git a/lib/ssh/test/ssh_options_SUITE.erl b/lib/ssh/test/ssh_options_SUITE.erl
index 12a85c40aa..86a8ac5aa8 100644
--- a/lib/ssh/test/ssh_options_SUITE.erl
+++ b/lib/ssh/test/ssh_options_SUITE.erl
@@ -1227,7 +1227,7 @@ max_sessions(Config, ParallelLogin, Connect0) when is_function(Connect0,2) ->
[_|_] = Connections,
%% Now try one more than alowed:
- ct:log("Info Report might come here...",[]),
+ ct:pal("Info Report expected here (if not disabled) ...",[]),
try Connect(Host,Port)
of
_ConnectionRef1 ->
@@ -1235,8 +1235,7 @@ max_sessions(Config, ParallelLogin, Connect0) when is_function(Connect0,2) ->
{fail,"Too many connections accepted"}
catch
error:{badmatch,{error,"Connection closed"}} ->
- %% Step 2 ok: could not set up max_sessions+1 connections
- %% This is expected
+ ct:log("Step 2 ok: could not set up too many connections. Good.",[]),
%% Now stop one connection and try to open one more
ok = ssh:close(hd(Connections)),
try_to_connect(Connect, Host, Port, Pid)
@@ -1249,16 +1248,15 @@ max_sessions(Config, ParallelLogin, Connect0) when is_function(Connect0,2) ->
try_to_connect(Connect, Host, Port, Pid) ->
- {ok,Tref} = timer:send_after(3000, timeout_no_connection), % give the supervisors some time...
+ {ok,Tref} = timer:send_after(30000, timeout_no_connection), % give the supervisors some time...
try_to_connect(Connect, Host, Port, Pid, Tref, 1). % will take max 3300 ms after 11 tries
try_to_connect(Connect, Host, Port, Pid, Tref, N) ->
try Connect(Host,Port)
of
_ConnectionRef1 ->
- %% Step 3 ok: could set up one more connection after killing one
- %% Thats good.
timer:cancel(Tref),
+ ct:log("Step 3 ok: could set up one more connection after killing one. Thats good.",[]),
ssh:stop_daemon(Pid),
receive % flush.
timeout_no_connection -> ok
diff --git a/lib/ssh/test/ssh_peername_sockname_server.erl b/lib/ssh/test/ssh_peername_sockname_server.erl
index 8731d80f62..5e35fd6612 100644
--- a/lib/ssh/test/ssh_peername_sockname_server.erl
+++ b/lib/ssh/test/ssh_peername_sockname_server.erl
@@ -26,7 +26,7 @@
%% ssh connection.
--behaviour(ssh_daemon_channel).
+-behaviour(ssh_server_channel).
-record(state, {}).
-export([init/1, handle_msg/2, handle_ssh_msg/2, terminate/2]).
diff --git a/lib/ssh/test/ssh_sup_SUITE.erl b/lib/ssh/test/ssh_sup_SUITE.erl
index b145066c36..b81f66948d 100644
--- a/lib/ssh/test/ssh_sup_SUITE.erl
+++ b/lib/ssh/test/ssh_sup_SUITE.erl
@@ -256,8 +256,8 @@ killed_acceptor_restarts(Config) ->
ok = ssh:stop_daemon(DaemonPid),
?wait_match(undefined, process_info(DaemonPid), 1000, 30),
- {error,closed} = ssh:connection_info(C1,[client_version]),
- {error,closed} = ssh:connection_info(C2,[client_version]).
+ ?wait_match({error,closed}, ssh:connection_info(C1,[client_version]), 1000, 5),
+ ?wait_match({error,closed}, ssh:connection_info(C2,[client_version]), 1000, 5).
%%-------------------------------------------------------------------------
shell_channel_tree(Config) ->
@@ -290,7 +290,7 @@ shell_channel_tree(Config) ->
{ok, ChannelId0} = ssh_connection:session_channel(ConnectionRef, infinity),
ok = ssh_connection:shell(ConnectionRef,ChannelId0),
- ?wait_match([{_, GroupPid,worker,[ssh_channel]}],
+ ?wait_match([{_, GroupPid,worker,[ssh_server_channel]}],
supervisor:which_children(ChannelSup),
[GroupPid]),
{links,GroupLinks} = erlang:process_info(GroupPid, links),
@@ -339,9 +339,9 @@ chk_empty_con_daemon(Daemon) ->
?wait_match([{{server,ssh_connection_sup, _,_},
ConnectionSup, supervisor,
[ssh_connection_sup]},
- {{server,ssh_channel_sup,_ ,_},
+ {{server,ssh_server_channel_sup,_ ,_},
ChannelSup,supervisor,
- [ssh_channel_sup]}],
+ [ssh_server_channel_sup]}],
supervisor:which_children(SubSysSup),
[ConnectionSup,ChannelSup]),
?wait_match([{{ssh_acceptor_sup,_,_,_},_,worker,[ssh_acceptor]}],
@@ -372,9 +372,9 @@ check_sshd_system_tree(Daemon, Config) ->
?wait_match([{{server,ssh_connection_sup, _,_},
ConnectionSup, supervisor,
[ssh_connection_sup]},
- {{server,ssh_channel_sup,_ ,_},
+ {{server,ssh_server_channel_sup,_ ,_},
ChannelSup,supervisor,
- [ssh_channel_sup]}],
+ [ssh_server_channel_sup]}],
supervisor:which_children(SubSysSup),
[ConnectionSup,ChannelSup]),
@@ -388,7 +388,7 @@ check_sshd_system_tree(Daemon, Config) ->
ssh_sftp:start_channel(Client),
- ?wait_match([{_, _,worker,[ssh_channel]}],
+ ?wait_match([{_, _,worker,[ssh_server_channel]}],
supervisor:which_children(ChannelSup)),
ssh:close(Client).
diff --git a/lib/ssl/src/ssl_connection.erl b/lib/ssl/src/ssl_connection.erl
index f816979b9f..3f8c1f97f9 100644
--- a/lib/ssl/src/ssl_connection.erl
+++ b/lib/ssl/src/ssl_connection.erl
@@ -827,6 +827,14 @@ certify(internal, #certificate_request{},
Alg == srp_dss; Alg == srp_rsa; Alg == srp_anon ->
handle_own_alert(?ALERT_REC(?FATAL, ?HANDSHAKE_FAILURE),
Version, ?FUNCTION_NAME, State);
+certify(internal, #certificate_request{},
+ #state{session = #session{own_certificate = undefined},
+ role = client} = State0, Connection) ->
+ %% The client does not have a certificate and will send an empty reply, the server may fail
+ %% or accept the connection by its own preference. No signature algorihms needed as there is
+ %% no certificate to verify.
+ {Record, State} = Connection:next_record(State0),
+ Connection:next_event(?FUNCTION_NAME, Record, State#state{client_certificate_requested = true});
certify(internal, #certificate_request{} = CertRequest,
#state{session = #session{own_certificate = Cert},
role = client,
diff --git a/lib/ssl/src/ssl_connection.hrl b/lib/ssl/src/ssl_connection.hrl
index d315c393b4..a61ff747ae 100644
--- a/lib/ssl/src/ssl_connection.hrl
+++ b/lib/ssl/src/ssl_connection.hrl
@@ -61,7 +61,7 @@
client_certificate_requested = false :: boolean(),
key_algorithm :: ssl_cipher:key_algo(),
hashsign_algorithm = {undefined, undefined},
- cert_hashsign_algorithm,
+ cert_hashsign_algorithm = {undefined, undefined},
public_key_info :: ssl_handshake:public_key_info() | 'undefined',
private_key :: public_key:private_key() | secret_printout() | 'undefined',
diffie_hellman_params:: #'DHParameter'{} | undefined | secret_printout(),
diff --git a/lib/ssl/src/ssl_handshake.erl b/lib/ssl/src/ssl_handshake.erl
index 54eb920bda..8ddd4623c1 100644
--- a/lib/ssl/src/ssl_handshake.erl
+++ b/lib/ssl/src/ssl_handshake.erl
@@ -1091,12 +1091,6 @@ select_hashsign(_, Cert, _, _, Version) ->
%%
%% Description: Handles signature algorithms selection for certificate requests (client)
%%--------------------------------------------------------------------
-select_hashsign(#certificate_request{}, undefined, _, {Major, Minor}) when Major >= 3 andalso Minor >= 3->
- %% There client does not have a certificate and will send an empty reply, the server may fail
- %% or accept the connection by its own preference. No signature algorihms needed as there is
- %% no certificate to verify.
- {undefined, undefined};
-
select_hashsign(#certificate_request{hashsign_algorithms = #hash_sign_algos{hash_sign_algos = HashSigns},
certificate_types = Types}, Cert, SupportedHashSigns,
{Major, Minor}) when Major >= 3 andalso Minor >= 3->
diff --git a/lib/ssl/test/ssl_certificate_verify_SUITE.erl b/lib/ssl/test/ssl_certificate_verify_SUITE.erl
index 0bc265fa10..1de4c89d7f 100644
--- a/lib/ssl/test/ssl_certificate_verify_SUITE.erl
+++ b/lib/ssl/test/ssl_certificate_verify_SUITE.erl
@@ -40,14 +40,22 @@
%%--------------------------------------------------------------------
all() ->
[
- {group, tls},
- {group, dtls}
+ {group, 'tlsv1.2'},
+ {group, 'tlsv1.1'},
+ {group, 'tlsv1'},
+ {group, 'sslv3'},
+ {group, 'dtlsv1.2'},
+ {group, 'dtlsv1'}
].
groups() ->
[
- {tls, [], all_protocol_groups()},
- {dtls, [], all_protocol_groups()},
+ {'tlsv1.2', [], all_protocol_groups()},
+ {'tlsv1.1', [], all_protocol_groups()},
+ {'tlsv1', [], all_protocol_groups()},
+ {'sslv3', [], all_protocol_groups()},
+ {'dtlsv1.2', [], all_protocol_groups()},
+ {'dtlsv1', [], all_protocol_groups()},
{active, [], tests()},
{active_once, [], tests()},
{passive, [], tests()},
@@ -65,6 +73,7 @@ tests() ->
verify_none,
server_require_peer_cert_ok,
server_require_peer_cert_fail,
+ server_require_peer_cert_empty_ok,
server_require_peer_cert_partial_chain,
server_require_peer_cert_allow_partial_chain,
server_require_peer_cert_do_not_allow_partial_chain,
@@ -104,24 +113,6 @@ end_per_suite(_Config) ->
ssl:stop(),
application:stop(crypto).
-init_per_group(tls, Config0) ->
- Version = tls_record:protocol_version(tls_record:highest_protocol_version([])),
- ssl:stop(),
- application:load(ssl),
- application:set_env(ssl, protocol_version, Version),
- ssl:start(),
- Config = ssl_test_lib:init_tls_version(Version, Config0),
- [{version, tls_record:protocol_version(Version)} | Config];
-
-init_per_group(dtls, Config0) ->
- Version = dtls_record:protocol_version(dtls_record:highest_protocol_version([])),
- ssl:stop(),
- application:load(ssl),
- application:set_env(ssl, protocol_version, Version),
- ssl:start(),
- Config = ssl_test_lib:init_tls_version(Version, Config0),
- [{version, dtls_record:protocol_version(Version)} | Config];
-
init_per_group(active, Config) ->
[{active, true}, {receive_function, send_recv_result_active} | Config];
init_per_group(active_once, Config) ->
@@ -130,15 +121,24 @@ init_per_group(passive, Config) ->
[{active, false}, {receive_function, send_recv_result} | Config];
init_per_group(error_handling, Config) ->
[{active, false}, {receive_function, send_recv_result} | Config];
+init_per_group(GroupName, Config) ->
+ case ssl_test_lib:is_tls_version(GroupName) of
+ true ->
+ case ssl_test_lib:sufficient_crypto_support(GroupName) of
+ true ->
+ [{version, GroupName} | ssl_test_lib:init_tls_version(GroupName, Config)];
+ false ->
+ {skip, "Missing crypto support"}
+ end
+ end.
-init_per_group(_, Config) ->
- Config.
-
-end_per_group(GroupName, Config) when GroupName == tls;
- GroupName == dtls ->
- ssl_test_lib:clean_tls_version(Config);
-end_per_group(_GroupName, Config) ->
- Config.
+end_per_group(GroupName, Config) ->
+ case ssl_test_lib:is_tls_version(GroupName) of
+ true ->
+ ssl_test_lib:clean_tls_version(Config);
+ false ->
+ Config
+ end.
init_per_testcase(_TestCase, Config) ->
ssl:stop(),
@@ -306,6 +306,35 @@ server_require_peer_cert_fail(Config) when is_list(Config) ->
end.
%%--------------------------------------------------------------------
+server_require_peer_cert_empty_ok() ->
+ [{doc,"Test server option fail_if_no_peer_cert when peer sends cert"}].
+
+server_require_peer_cert_empty_ok(Config) when is_list(Config) ->
+ ServerOpts = [{verify, verify_peer}, {fail_if_no_peer_cert, false}
+ | ssl_test_lib:ssl_options(server_rsa_opts, Config)],
+ ClientOpts0 = ssl_test_lib:ssl_options(client_rsa_opts, Config),
+ Active = proplists:get_value(active, Config),
+ ReceiveFunction = proplists:get_value(receive_function, Config),
+ {ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
+
+ ClientOpts = proplists:delete(keyfile, proplists:delete(certfile, ClientOpts0)),
+
+ Server = ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
+ {from, self()},
+ {mfa, {ssl_test_lib, ReceiveFunction, []}},
+ {options, [{active, Active} | ServerOpts]}]),
+ Port = ssl_test_lib:inet_port(Server),
+ Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
+ {host, Hostname},
+ {from, self()},
+ {mfa, {ssl_test_lib, ReceiveFunction, []}},
+ {options, [{active, Active} | ClientOpts]}]),
+
+ ssl_test_lib:check_result(Server, ok, Client, ok),
+ ssl_test_lib:close(Server),
+ ssl_test_lib:close(Client).
+
+%%--------------------------------------------------------------------
server_require_peer_cert_partial_chain() ->
[{doc, "Client sends an incompleate chain, by default not acceptable."}].
@@ -930,6 +959,7 @@ client_with_cert_cipher_suites_handshake(Config) when is_list(Config) ->
Config, "_sign_only_extensions"),
ClientOpts = ssl_test_lib:ssl_options(ClientOpts0, Config),
ServerOpts = ssl_test_lib:ssl_options(ServerOpts0, Config),
+ TLSVersion = ssl_test_lib:protocol_version(Config, tuple),
{ClientNode, ServerNode, Hostname} = ssl_test_lib:run_where(Config),
Server = ssl_test_lib:start_server([{node, ServerNode}, {port, 0},
@@ -938,7 +968,7 @@ client_with_cert_cipher_suites_handshake(Config) when is_list(Config) ->
send_recv_result_active, []}},
{options, [{active, true},
{ciphers,
- ssl_test_lib:rsa_non_signed_suites(proplists:get_value(version, Config))}
+ ssl_test_lib:rsa_non_signed_suites(TLSVersion)}
| ServerOpts]}]),
Port = ssl_test_lib:inet_port(Server),
Client = ssl_test_lib:start_client([{node, ClientNode}, {port, Port},
diff --git a/lib/stdlib/doc/src/calendar.xml b/lib/stdlib/doc/src/calendar.xml
index 65b3edcdf6..8f2b6b747a 100644
--- a/lib/stdlib/doc/src/calendar.xml
+++ b/lib/stdlib/doc/src/calendar.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1996</year><year>2016</year>
+ <year>1996</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -317,6 +317,30 @@
</func>
<func>
+ <name name="rfc3339_to_system_time" arity="1"/>
+ <name name="rfc3339_to_system_time" arity="2"/>
+ <fsummary>Convert from RFC 3339 timestamp to system time.</fsummary>
+ <type name="rfc3339_string"/>
+ <type name="rfc3339_time_unit"/>
+ <desc>
+ <p>Converts an RFC 3339 timestamp into system time.</p>
+ <p>Valid option:</p>
+ <taglist>
+ <tag><c>{unit, Unit}</c></tag>
+ <item><p>The time unit of the return value.
+ The default is <c>second</c>.</p>
+ </item>
+ </taglist>
+ <pre>
+1> <input>calendar:rfc3339_to_system_time("2018-02-01T16:17:58+01:00").</input>
+1517498278
+2> <input>calendar:rfc3339_to_system_time("2018-02-01 15:18:02.088Z",
+ [{unit, nanosecond}]).</input>
+1517498282088000000</pre>
+ </desc>
+ </func>
+
+ <func>
<name name="seconds_to_daystime" arity="1"/>
<fsummary>Compute days and time from seconds.</fsummary>
<desc>
@@ -339,6 +363,68 @@
</func>
<func>
+ <name name="system_time_to_local_time" arity="2"/>
+ <fsummary>Convert system time to local date and time.</fsummary>
+ <desc>
+ <p>Converts a specified system time into local date and time.</p>
+ </desc>
+ </func>
+
+ <func>
+ <name name="system_time_to_rfc3339" arity="1"/>
+ <name name="system_time_to_rfc3339" arity="2"/>
+ <fsummary>Convert from system to RFC 3339 timestamp.</fsummary>
+ <type name="offset"/>
+ <type name="rfc3339_string"/>
+ <type name="rfc3339_time_unit"/>
+ <desc>
+ <p>Converts a system time into RFC 3339 timestamp.</p>
+ <p>Valid options:</p>
+ <taglist>
+ <tag><c>{offset, Offset}</c></tag>
+ <item><p>The offset, either a string or an integer, to be
+ included in the formatted string.
+ An empty string, which is the default, is interpreted
+ as local time. A non-empty string is included as is.
+ The time unit of the integer is the same as the one
+ of <c><anno>Time</anno></c>.</p>
+ </item>
+ <tag><c>{time_designator, Character}</c></tag>
+ <item><p>The character used as time designator, that is,
+ the date and time separator. The default is <c>$T</c>.</p>
+ </item>
+ <tag><c>{unit, Unit}</c></tag>
+ <item><p>The time unit of <c><anno>Time</anno></c>. The
+ default is <c>second</c>. If some other unit is given
+ (<c>millisecond</c>, <c>microsecond</c>, or
+ <c>nanosecond</c>), the formatted string includes a
+ fraction of a second.</p>
+ </item>
+ </taglist>
+ <pre>
+1> <input>calendar:system_time_to_rfc3339(erlang:system_time(second)).</input>
+"2018-04-23T14:56:28+02:00"
+2> <input>calendar:system_time_to_rfc3339(erlang:system_time(second),
+ [{offset, "-02:00"}]).</input>
+"2018-04-23T10:56:52-02:00"
+3> <input>calendar:system_time_to_rfc3339(erlang:system_time(second),
+ [{offset, -7200}]).</input>
+"2018-04-23T10:57:05-02:00"
+4> <input>calendar:system_time_to_rfc3339(erlang:system_time(millisecond),
+ [{unit, millisecond}, {time_designator, $\s}, {offset, "Z"}]).</input>
+"2018-04-23 12:57:20.482Z"</pre>
+ </desc>
+ </func>
+
+ <func>
+ <name name="system_time_to_universal_time" arity="2"/>
+ <fsummary>Convert system time to universal date and time.</fsummary>
+ <desc>
+ <p>Converts a specified system time into universal date and time.</p>
+ </desc>
+ </func>
+
+ <func>
<name name="time_difference" arity="2"/>
<fsummary>Compute the difference between two times (deprecated).
</fsummary>
diff --git a/lib/stdlib/doc/src/io_lib.xml b/lib/stdlib/doc/src/io_lib.xml
index bc1d77ac83..4a2b425e8e 100644
--- a/lib/stdlib/doc/src/io_lib.xml
+++ b/lib/stdlib/doc/src/io_lib.xml
@@ -4,7 +4,7 @@
<erlref>
<header>
<copyright>
- <year>1996</year><year>2017</year>
+ <year>1996</year><year>2018</year>
<holder>Ericsson AB. All Rights Reserved.</holder>
</copyright>
<legalnotice>
@@ -52,6 +52,9 @@
</desc>
</datatype>
<datatype>
+ <name name="chars_limit"/>
+ </datatype>
+ <datatype>
<name name="depth"/>
</datatype>
<datatype>
@@ -153,6 +156,27 @@
</func>
<func>
+ <name name="format" arity="3"/>
+ <name name="fwrite" arity="3"/>
+ <fsummary>Write formatted output.</fsummary>
+ <desc>
+ <p>Returns a character list that represents <c><anno>Data</anno></c>
+ formatted in accordance with <c><anno>Format</anno></c> in
+ the same way as
+ <seealso marker="#fwrite/2"><c>fwrite/2</c></seealso> and
+ <seealso marker="#format/2"><c>format/2</c></seealso>,
+ but takes an extra argument, a list of options.</p>
+ <p>Available options:</p>
+ <taglist>
+ <tag><c><anno>CharsLimit</anno></c></tag>
+ <item>
+ <p>A soft limit on the number of characters returned.</p>
+ </item>
+ </taglist>
+ </desc>
+ </func>
+
+ <func>
<name name="fread" arity="2"/>
<fsummary>Read formatted input.</fsummary>
<desc>
@@ -361,17 +385,24 @@
<fsummary>Write a term.</fsummary>
<desc>
<p>Returns a character list that represents <c><anno>Term</anno></c>.
- Argument <c><anno>Depth</anno></c> controls the depth of the
+ Option <c><anno>Depth</anno></c> controls the depth of the
structures written. When the specified depth is reached,
everything below this level is replaced by "<c>...</c>".
<c><anno>Depth</anno></c> defaults to -1, which means
- no limitation.</p>
+ no limitation. Option <c><anno>CharsLimit</anno></c> puts a
+ soft limit on the number of characters returned. When the
+ number of characters is reached, remaining structures are
+ replaced by "<c>...</c>". <c><anno>CharsLimit</anno></c>
+ defaults to -1, which means no limit on the number of
+ characters returned.</p>
<p><em>Example:</em></p>
<pre>
1> <input>lists:flatten(io_lib:write({1,[2],[3],[4,5],6,7,8,9})).</input>
"{1,[2],[3],[4,5],6,7,8,9}"
2> <input>lists:flatten(io_lib:write({1,[2],[3],[4,5],6,7,8,9}, 5)).</input>
-"{1,[2],[3],[...],...}"</pre>
+"{1,[2],[3],[...],...}"
+3> <input>lists:flatten(io_lib:write({[1,2,3],[4,5],6,7,8,9}, [{chars_limit,20}])).</input>
+"{[1,2|...],[4|...],...}"</pre>
</desc>
</func>
diff --git a/lib/stdlib/doc/src/sys.xml b/lib/stdlib/doc/src/sys.xml
index 64d8789016..59e5bb6cb5 100644
--- a/lib/stdlib/doc/src/sys.xml
+++ b/lib/stdlib/doc/src/sys.xml
@@ -102,7 +102,7 @@
then treated in the debug function. For example, <c>trace</c>
formats the system events to the terminal.
</p>
- <p>Three predefined system events are used when a
+ <p>Four predefined system events are used when a
process receives or sends a message. The process can also define its
own system events. It is always up to the process itself
to format these events.</p>
@@ -276,7 +276,9 @@
<p><c><anno>Func</anno></c> is called whenever a system event is
generated. This function is to return <c>done</c>, or a new
<c>Func</c> state. In the first case, the function is removed. It is
- also removed if the function fails.</p>
+ also removed if the function fails. If one debug function should be
+ installed more times, a unique <c><anno>FuncId</anno></c> must be
+ specified for each installation.</p>
</desc>
</func>
@@ -330,8 +332,8 @@
<fsummary>Remove a debug function from the process.</fsummary>
<desc>
<p>Removes an installed debug function from the
- process. <c><anno>Func</anno></c> must be the same as previously
- installed.</p>
+ process. <c><anno>Func</anno></c> or <c><anno>FuncId</anno></c> must be
+ the same as previously installed.</p>
</desc>
</func>
diff --git a/lib/stdlib/src/Makefile b/lib/stdlib/src/Makefile
index 8b156929d7..dc3735055a 100644
--- a/lib/stdlib/src/Makefile
+++ b/lib/stdlib/src/Makefile
@@ -238,6 +238,13 @@ $(EBIN)/erl_tar.beam: ../../kernel/include/file.hrl erl_tar.hrl
$(EBIN)/file_sorter.beam: ../../kernel/include/file.hrl
$(EBIN)/filelib.beam: ../../kernel/include/file.hrl
$(EBIN)/filename.beam: ../../kernel/include/file.hrl
+$(EBIN)/gen_event.beam: ../../kernel/include/logger.hrl
+$(EBIN)/gen_fsm.beam: ../../kernel/include/logger.hrl
+$(EBIN)/gen_server.beam: ../../kernel/include/logger.hrl
+$(EBIN)/gen_statem.beam: ../../kernel/include/logger.hrl
+$(EBIN)/proc_lib.beam: ../../kernel/include/logger.hrl
$(EBIN)/qlc_pt.beam: ../include/ms_transform.hrl
$(EBIN)/shell.beam: ../../kernel/include/file.hrl
+$(EBIN)/supervisor.beam: ../../kernel/include/logger.hrl
+$(EBIN)/supervisor_bridge.beam: ../../kernel/include/logger.hrl
$(EBIN)/zip.beam: ../include/zip.hrl ../../kernel/include/file.hrl
diff --git a/lib/stdlib/src/calendar.erl b/lib/stdlib/src/calendar.erl
index 55a0cfc9a1..9a600c1972 100644
--- a/lib/stdlib/src/calendar.erl
+++ b/lib/stdlib/src/calendar.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -39,8 +39,14 @@
now_to_datetime/1, % = now_to_universal_time/1
now_to_local_time/1,
now_to_universal_time/1,
+ rfc3339_to_system_time/1,
+ rfc3339_to_system_time/2,
seconds_to_daystime/1,
seconds_to_time/1,
+ system_time_to_local_time/2,
+ system_time_to_universal_time/2,
+ system_time_to_rfc3339/1,
+ system_time_to_rfc3339/2,
time_difference/2,
time_to_seconds/1,
universal_time/0,
@@ -55,10 +61,13 @@
-define(SECONDS_PER_DAY, 86400).
-define(DAYS_PER_YEAR, 365).
-define(DAYS_PER_LEAP_YEAR, 366).
--define(DAYS_PER_4YEARS, 1461).
--define(DAYS_PER_100YEARS, 36524).
--define(DAYS_PER_400YEARS, 146097).
+%% -define(DAYS_PER_4YEARS, 1461).
+%% -define(DAYS_PER_100YEARS, 36524).
+%% -define(DAYS_PER_400YEARS, 146097).
-define(DAYS_FROM_0_TO_1970, 719528).
+-define(DAYS_FROM_0_TO_10000, 2932897).
+-define(SECONDS_FROM_0_TO_1970, (?DAYS_FROM_0_TO_1970*?SECONDS_PER_DAY)).
+-define(SECONDS_FROM_0_TO_10000, (?DAYS_FROM_0_TO_10000*?SECONDS_PER_DAY)).
%%----------------------------------------------------------------------
%% Types
@@ -83,6 +92,13 @@
-type datetime1970() :: {{year1970(),month(),day()},time()}.
-type yearweeknum() :: {year(),weeknum()}.
+-type rfc3339_string() :: [byte(), ...].
+%% By design 'native' is not supported:
+-type rfc3339_time_unit() :: 'microsecond'
+ | 'millisecond'
+ | 'nanosecond'
+ | 'second'.
+
%%----------------------------------------------------------------------
%% All dates are according the the Gregorian calendar. In this module
@@ -309,8 +325,7 @@ local_time_to_universal_time_dst(DateTime) ->
-spec now_to_datetime(Now) -> datetime1970() when
Now :: erlang:timestamp().
now_to_datetime({MSec, Sec, _uSec}) ->
- Sec0 = MSec*1000000 + Sec + ?DAYS_FROM_0_TO_1970*?SECONDS_PER_DAY,
- gregorian_seconds_to_datetime(Sec0).
+ system_time_to_datetime(MSec*1000000 + Sec).
-spec now_to_universal_time(Now) -> datetime1970() when
Now :: erlang:timestamp().
@@ -328,6 +343,33 @@ now_to_local_time({MSec, Sec, _uSec}) ->
erlang:universaltime_to_localtime(
now_to_universal_time({MSec, Sec, _uSec})).
+-spec rfc3339_to_system_time(DateTimeString) -> integer() when
+ DateTimeString :: rfc3339_string().
+
+rfc3339_to_system_time(DateTimeString) ->
+ rfc3339_to_system_time(DateTimeString, []).
+
+-spec rfc3339_to_system_time(DateTimeString, Options) -> integer() when
+ DateTimeString :: rfc3339_string(),
+ Options :: [Option],
+ Option :: {'unit', rfc3339_time_unit()}.
+
+rfc3339_to_system_time(DateTimeString, Options) ->
+ Unit = proplists:get_value(unit, Options, second),
+ %% _T is the character separating the date and the time:
+ {DateStr, [_T|TimeStr]} = lists:split(10, DateTimeString),
+ {TimeStr2, TimeStr3} = lists:split(8, TimeStr),
+ {ok, [Hour, Min, Sec], []} = io_lib:fread("~d:~d:~d", TimeStr2),
+ {ok, [Year, Month, Day], []} = io_lib:fread("~d-~d-~d", DateStr),
+ DateTime = {{Year, Month, Day}, {Hour, Min, Sec}},
+ IsFractionChar = fun(C) -> C >= $0 andalso C =< $9 orelse C =:= $. end,
+ {FractionStr, UtcOffset} = lists:splitwith(IsFractionChar, TimeStr3),
+ Time = datetime_to_system_time(DateTime),
+ Secs = Time - offset_adjustment(Time, second, UtcOffset),
+ check(DateTimeString, Options, Secs),
+ ScaledEpoch = erlang:convert_time_unit(Secs, second, Unit),
+ ScaledEpoch + copy_sign(fraction(Unit, FractionStr), ScaledEpoch).
+
%% seconds_to_daystime(Secs) = {Days, {Hour, Minute, Second}}
@@ -363,6 +405,55 @@ seconds_to_time(Secs) when Secs >= 0, Secs < ?SECONDS_PER_DAY ->
Second = Secs1 rem ?SECONDS_PER_MINUTE,
{Hour, Minute, Second}.
+-spec system_time_to_local_time(Time, TimeUnit) -> datetime() when
+ Time :: integer(),
+ TimeUnit :: erlang:time_unit().
+
+system_time_to_local_time(Time, TimeUnit) ->
+ UniversalDate = system_time_to_universal_time(Time, TimeUnit),
+ erlang:universaltime_to_localtime(UniversalDate).
+
+-spec system_time_to_universal_time(Time, TimeUnit) -> datetime() when
+ Time :: integer(),
+ TimeUnit :: erlang:time_unit().
+
+system_time_to_universal_time(Time, TimeUnit) ->
+ Secs = erlang:convert_time_unit(Time, TimeUnit, second),
+ system_time_to_datetime(Secs).
+
+-spec system_time_to_rfc3339(Time) -> DateTimeString when
+ Time :: integer(),
+ DateTimeString :: rfc3339_string().
+
+system_time_to_rfc3339(Time) ->
+ system_time_to_rfc3339(Time, []).
+
+-type offset() :: [byte()] | (Time :: integer()).
+-spec system_time_to_rfc3339(Time, Options) -> DateTimeString when
+ Time :: integer(), % Since Epoch
+ Options :: [Option],
+ Option :: {'offset', offset()}
+ | {'time_designator', byte()}
+ | {'unit', rfc3339_time_unit()},
+ DateTimeString :: rfc3339_string().
+
+system_time_to_rfc3339(Time, Options) ->
+ Unit = proplists:get_value(unit, Options, second),
+ OffsetOption = proplists:get_value(offset, Options, ""),
+ T = proplists:get_value(time_designator, Options, $T),
+ AdjustmentSecs = offset_adjustment(Time, Unit, OffsetOption),
+ Offset = offset(OffsetOption, AdjustmentSecs),
+ Adjustment = erlang:convert_time_unit(AdjustmentSecs, second, Unit),
+ AdjustedTime = Time + Adjustment,
+ Factor = factor(Unit),
+ Secs = AdjustedTime div Factor,
+ check(Time, Options, Secs),
+ DateTime = system_time_to_datetime(Secs),
+ {{Year, Month, Day}, {Hour, Min, Sec}} = DateTime,
+ FractionStr = fraction_str(Factor, AdjustedTime),
+ flat_fwrite("~4.10.0B-~2.10.0B-~2.10.0B~c~2.10.0B:~2.10.0B:~2.10.0B~s~s",
+ [Year, Month, Day, T, Hour, Min, Sec, FractionStr, Offset]).
+
%% time_difference(T1, T2) = Tdiff
%%
%% Returns the difference between two {Date, Time} structures.
@@ -550,3 +641,85 @@ df(Year, _) ->
true -> 1;
false -> 0
end.
+
+check(_Arg, _Options, Secs) when Secs >= - ?SECONDS_FROM_0_TO_1970,
+ Secs < ?SECONDS_FROM_0_TO_10000 ->
+ ok;
+check(Arg, Options, _Secs) ->
+ erlang:error({badarg, [Arg, Options]}).
+
+datetime_to_system_time(DateTime) ->
+ datetime_to_gregorian_seconds(DateTime) - ?SECONDS_FROM_0_TO_1970.
+
+system_time_to_datetime(Seconds) ->
+ gregorian_seconds_to_datetime(Seconds + ?SECONDS_FROM_0_TO_1970).
+
+offset(OffsetOption, Secs0) when OffsetOption =:= "";
+ is_integer(OffsetOption) ->
+ Sign = case Secs0 < 0 of
+ true -> $-;
+ false -> $+
+ end,
+ Secs = abs(Secs0),
+ Hour = Secs div 3600,
+ Min = (Secs rem 3600) div 60,
+ io_lib:fwrite("~c~2.10.0B:~2.10.0B", [Sign, Hour, Min]);
+offset(OffsetOption, _Secs) ->
+ OffsetOption.
+
+offset_adjustment(Time, Unit, OffsetString) when is_list(OffsetString) ->
+ offset_string_adjustment(Time, Unit, OffsetString);
+offset_adjustment(_Time, Unit, Offset) when is_integer(Offset) ->
+ erlang:convert_time_unit(Offset, Unit, second).
+
+offset_string_adjustment(Time, Unit, "") ->
+ local_offset(Time, Unit);
+offset_string_adjustment(_Time, _Unit, "Z") ->
+ 0;
+offset_string_adjustment(_Time, _Unit, "z") ->
+ 0;
+offset_string_adjustment(_Time, _Unit, [Sign|Tz]) ->
+ {ok, [Hour, Min], []} = io_lib:fread("~d:~d", Tz),
+ Adjustment = 3600 * Hour + 60 * Min,
+ case Sign of
+ $- -> -Adjustment;
+ $+ -> Adjustment
+ end.
+
+local_offset(SystemTime, Unit) ->
+ LocalTime = system_time_to_local_time(SystemTime, Unit),
+ UniversalTime = system_time_to_universal_time(SystemTime, Unit),
+ LocalSecs = datetime_to_gregorian_seconds(LocalTime),
+ UniversalSecs = datetime_to_gregorian_seconds(UniversalTime),
+ LocalSecs - UniversalSecs.
+
+fraction_str(Factor, Time) ->
+ case Time rem Factor of
+ 0 ->
+ "";
+ Fraction ->
+ FS = io_lib:fwrite(".~*..0B", [log10(Factor), abs(Fraction)]),
+ string:trim(FS, trailing, "0")
+ end.
+
+fraction(second, _) ->
+ 0;
+fraction(_, "") ->
+ 0;
+fraction(Unit, FractionStr) ->
+ round(factor(Unit) * list_to_float([$0|FractionStr])).
+
+copy_sign(N1, N2) when N2 < 0 -> -N1;
+copy_sign(N1, _N2) -> N1.
+
+factor(second) -> 1;
+factor(millisecond) -> 1000;
+factor(microsecond) -> 1000000;
+factor(nanosecond) -> 1000000000.
+
+log10(1000) -> 3;
+log10(1000000) -> 6;
+log10(1000000000) -> 9.
+
+flat_fwrite(F, S) ->
+ lists:flatten(io_lib:fwrite(F, S)).
diff --git a/lib/stdlib/src/erl_lint.erl b/lib/stdlib/src/erl_lint.erl
index 79dc6ce180..e9ac2fcdff 100644
--- a/lib/stdlib/src/erl_lint.erl
+++ b/lib/stdlib/src/erl_lint.erl
@@ -2,7 +2,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -2095,6 +2095,10 @@ is_gexpr({cons,_L,H,T}, Info) -> is_gexpr_list([H,T], Info);
is_gexpr({tuple,_L,Es}, Info) -> is_gexpr_list(Es, Info);
%%is_gexpr({struct,_L,_Tag,Es}, Info) ->
%% is_gexpr_list(Es, Info);
+is_gexpr({map,_L,Es}, Info) ->
+ is_map_fields(Es, Info);
+is_gexpr({map,_L,Src,Es}, Info) ->
+ is_gexpr(Src, Info) andalso is_map_fields(Es, Info);
is_gexpr({record_index,_L,_Name,Field}, Info) ->
is_gexpr(Field, Info);
is_gexpr({record_field,_L,Rec,_Name,Field}, Info) ->
@@ -2137,6 +2141,14 @@ is_gexpr_op(Op, A) ->
is_gexpr_list(Es, Info) -> all(fun (E) -> is_gexpr(E, Info) end, Es).
+is_map_fields([{Tag,_,K,V}|Fs], Info) when Tag =:= map_field_assoc;
+ Tag =:= map_field_exact ->
+ is_gexpr(K, Info) andalso
+ is_gexpr(V, Info) andalso
+ is_map_fields(Fs, Info);
+is_map_fields([], _Info) -> true;
+is_map_fields(_T, _Info) -> false.
+
is_gexpr_fields(Fs, L, Name, {RDs,_}=Info) ->
IFs = case dict:find(Name, RDs) of
{ok,{_Line,Fields}} -> Fs ++ init_fields(Fs, L, Fields);
diff --git a/lib/stdlib/src/gen_event.erl b/lib/stdlib/src/gen_event.erl
index 73e4457bd0..53042251cc 100644
--- a/lib/stdlib/src/gen_event.erl
+++ b/lib/stdlib/src/gen_event.erl
@@ -47,16 +47,19 @@
system_replace_state/2,
format_status/2]).
+%% logger callback
+-export([format_log/1]).
+
-export_type([handler/0, handler_args/0, add_handler_ret/0,
del_handler_ret/0]).
--import(error_logger, [error_msg/2]).
-
-record(handler, {module :: atom(),
id = false,
state,
supervised = false :: 'false' | pid()}).
+-include("logger.hrl").
+
%%%=========================================================================
%%% API
%%%=========================================================================
@@ -583,9 +586,13 @@ server_update(Handler1, Func, Event, SName) ->
remove, SName, normal),
no;
{'EXIT', {undef, [{Mod1, handle_info, [_,_], _}|_]}} ->
- error_logger:warning_msg("** Undefined handle_info in ~tp~n"
- "** Unhandled message: ~tp~n", [Mod1, Event]),
- {ok, Handler1};
+ ?LOG_WARNING(#{label=>{gen_event,no_handle_info},
+ module=>Mod1,
+ message=>Event},
+ #{domain=>[beam,erlang,otp],
+ report_cb=>fun gen_event:format_log/1,
+ error_logger=>#{tag=>warning_msg}}), % warningmap??
+ {ok, Handler1};
Other ->
do_terminate(Mod1, Handler1, {error, Other}, State,
Event, SName, crash),
@@ -737,6 +744,23 @@ report_error(_Handler, normal, _, _, _) -> ok;
report_error(_Handler, shutdown, _, _, _) -> ok;
report_error(_Handler, {swapped,_,_}, _, _, _) -> ok;
report_error(Handler, Reason, State, LastIn, SName) ->
+ ?LOG_ERROR(#{label=>{gen_event,terminate},
+ handler=>handler(Handler),
+ name=>SName,
+ last_message=>LastIn,
+ state=>format_status(terminate,Handler#handler.module,
+ get(),State),
+ reason=>Reason},
+ #{domain=>[beam,erlang,otp],
+ report_cb=>fun gen_event:format_log/1,
+ error_logger=>#{tag=>error}}).
+
+format_log(#{label:={gen_event,terminate},
+ handler:=Handler,
+ name:=SName,
+ last_message:=LastIn,
+ state:=State,
+ reason:=Reason}) ->
Reason1 =
case Reason of
{'EXIT',{undef,[{M,F,A,L}|MFAs]}} ->
@@ -756,23 +780,18 @@ report_error(Handler, Reason, State, LastIn, SName) ->
_ ->
Reason
end,
- Mod = Handler#handler.module,
- FmtState = case erlang:function_exported(Mod, format_status, 2) of
- true ->
- Args = [get(), State],
- case catch Mod:format_status(terminate, Args) of
- {'EXIT', _} -> State;
- Else -> Else
- end;
- _ ->
- State
- end,
- error_msg("** gen_event handler ~p crashed.~n"
- "** Was installed in ~tp~n"
- "** Last event was: ~tp~n"
- "** When handler state == ~tp~n"
- "** Reason == ~tp~n",
- [handler(Handler),SName,LastIn,FmtState,Reason1]).
+ {"** gen_event handler ~p crashed.~n"
+ "** Was installed in ~tp~n"
+ "** Last event was: ~tp~n"
+ "** When handler state == ~tp~n"
+ "** Reason == ~tp~n",
+ [Handler,SName,LastIn,State,Reason1]};
+format_log(#{label:={gen_event,no_handle_info},
+ module:=Mod,
+ message:=Msg}) ->
+ {"** Undefined handle_info in ~tp~n"
+ "** Unhandled message: ~tp~n",
+ [Mod, Msg]}.
handler(Handler) when not Handler#handler.id ->
Handler#handler.module;
@@ -805,17 +824,21 @@ format_status(Opt, StatusData) ->
[PDict, SysState, Parent, _Debug, [ServerName, MSL, _HibernateAfterTimeout, _Hib]] = StatusData,
Header = gen:format_status_header("Status for event handler",
ServerName),
- FmtMSL = [case erlang:function_exported(Mod, format_status, 2) of
- true ->
- Args = [PDict, State],
- case catch Mod:format_status(Opt, Args) of
- {'EXIT', _} -> MSL;
- Else -> MS#handler{state = Else}
- end;
- _ ->
- MS
- end || #handler{module = Mod, state = State} = MS <- MSL],
+ FmtMSL = [MS#handler{state=format_status(Opt, Mod, PDict, State)}
+ || #handler{module = Mod, state = State} = MS <- MSL],
[{header, Header},
{data, [{"Status", SysState},
{"Parent", Parent}]},
{items, {"Installed handlers", FmtMSL}}].
+
+format_status(Opt, Mod, PDict, State) ->
+ case erlang:function_exported(Mod, format_status, 2) of
+ true ->
+ Args = [PDict, State],
+ case catch Mod:format_status(Opt, Args) of
+ {'EXIT', _} -> State;
+ Else -> Else
+ end;
+ false ->
+ State
+ end.
diff --git a/lib/stdlib/src/gen_fsm.erl b/lib/stdlib/src/gen_fsm.erl
index 8c7db65563..77826c3dc6 100644
--- a/lib/stdlib/src/gen_fsm.erl
+++ b/lib/stdlib/src/gen_fsm.erl
@@ -105,6 +105,8 @@
%%%
%%% ---------------------------------------------------
+-include("logger.hrl").
+
-export([start/3, start/4,
start_link/3, start_link/4,
stop/1, stop/3,
@@ -124,6 +126,9 @@
system_replace_state/2,
format_status/2]).
+%% logger callback
+-export([format_log/1]).
+
-deprecated({start, 3, next_major_release}).
-deprecated({start, 4, next_major_release}).
-deprecated({start_link, 3, next_major_release}).
@@ -144,8 +149,6 @@
-deprecated({enter_loop, 5, next_major_release}).
-deprecated({enter_loop, 6, next_major_release}).
--import(error_logger, [format/2]).
-
%%% ---------------------------------------------------
%%% Interface functions.
%%% ---------------------------------------------------
@@ -499,8 +502,12 @@ handle_msg(Msg, Parent, Name, StateName, StateData, Mod, _Time, HibernateAfterTi
reply(From, Reply),
exit(R);
{'EXIT', {undef, [{Mod, handle_info, [_,_,_], _}|_]}} ->
- error_logger:warning_msg("** Undefined handle_info in ~p~n"
- "** Unhandled message: ~tp~n", [Mod, Msg]),
+ ?LOG_WARNING(#{label=>{gen_fsm,no_handle_info},
+ module=>Mod,
+ message=>Msg},
+ #{domain=>[beam,erlang,otp],
+ report_cb=>fun gen_fsm:format_log/1,
+ error_logger=>#{tag=>warning_msg}}),
loop(Parent, Name, StateName, StateData, Mod, infinity, HibernateAfterTimeout, []);
{'EXIT', What} ->
terminate(What, Name, Msg, Mod, StateName, StateData, []);
@@ -603,6 +610,24 @@ terminate(Reason, Name, Msg, Mod, StateName, StateData, Debug) ->
end.
error_info(Reason, Name, Msg, StateName, StateData, Debug) ->
+ ?LOG_ERROR(#{label=>{gen_fsm,terminate},
+ name=>Name,
+ last_message=>Msg,
+ state_name=>StateName,
+ state_data=>StateData,
+ reason=>Reason},
+ #{domain=>[beam,erlang,otp],
+ report_cb=>fun gen_fsm:format_log/1,
+ error_logger=>#{tag=>error}}),
+ sys:print_log(Debug),
+ ok.
+
+format_log(#{label:={gen_fsm,terminate},
+ name:=Name,
+ last_message:=Msg,
+ state_name:=StateName,
+ state_data:=StateData,
+ reason:=Reason}) ->
Reason1 =
case Reason of
{undef,[{M,F,A,L}|MFAs]} ->
@@ -620,14 +645,18 @@ error_info(Reason, Name, Msg, StateName, StateData, Debug) ->
_ ->
Reason
end,
- Str = "** State machine ~tp terminating \n" ++
- get_msg_str(Msg) ++
- "** When State == ~tp~n"
- "** Data == ~tp~n"
- "** Reason for termination = ~n** ~tp~n",
- format(Str, [Name, get_msg(Msg), StateName, StateData, Reason1]),
- sys:print_log(Debug),
- ok.
+ {"** State machine ~tp terminating \n" ++
+ get_msg_str(Msg) ++
+ "** When State == ~tp~n"
+ "** Data == ~tp~n"
+ "** Reason for termination = ~n** ~tp~n",
+ [Name, get_msg(Msg), StateName, StateData, Reason1]};
+format_log(#{label:={gen_fsm,no_handle_info},
+ module:=Mod,
+ message:=Msg}) ->
+ {"** Undefined handle_info in ~p~n"
+ "** Unhandled message: ~tp~n",
+ [Mod, Msg]}.
get_msg_str({'$gen_event', _Event}) ->
"** Last event in was ~tp~n";
diff --git a/lib/stdlib/src/gen_server.erl b/lib/stdlib/src/gen_server.erl
index f29314d0a2..f65ef78636 100644
--- a/lib/stdlib/src/gen_server.erl
+++ b/lib/stdlib/src/gen_server.erl
@@ -104,9 +104,14 @@
system_replace_state/2,
format_status/2]).
+%% logger callback
+-export([format_log/1]).
+
%% Internal exports
-export([init_it/6]).
+-include("logger.hrl").
+
-define(
STACKTRACE(),
element(2, erlang:process_info(self(), current_stacktrace))).
@@ -636,9 +641,13 @@ try_dispatch(Mod, Func, Msg, State) ->
error:undef = R:Stacktrace when Func == handle_info ->
case erlang:function_exported(Mod, handle_info, 2) of
false ->
- error_logger:warning_msg("** Undefined handle_info in ~p~n"
- "** Unhandled message: ~tp~n",
- [Mod, Msg]),
+ ?LOG_WARNING(
+ #{label=>{gen_server,no_handle_info},
+ module=>Mod,
+ message=>Msg},
+ #{domain=>[beam,erlang,otp],
+ report_cb=>fun gen_server:format_log/1,
+ error_logger=>#{tag=>warning_msg}}),
{ok, {noreply, State}};
true ->
{'EXIT', error, R, Stacktrace}
@@ -849,8 +858,7 @@ terminate(Class, Reason, Stacktrace, ReportReason, Name, From, Msg, Mod, State,
Reply = try_terminate(Mod, terminate_reason(Class, Reason, Stacktrace), State),
case Reply of
{'EXIT', C, R, S} ->
- FmtState = format_status(terminate, Mod, get(), State),
- error_info({R, S}, Name, From, Msg, FmtState, Debug),
+ error_info({R, S}, Name, From, Msg, Mod, State, Debug),
erlang:raise(C, R, S);
_ ->
case {Class, Reason} of
@@ -858,8 +866,7 @@ terminate(Class, Reason, Stacktrace, ReportReason, Name, From, Msg, Mod, State,
{exit, shutdown} -> ok;
{exit, {shutdown,_}} -> ok;
_ ->
- FmtState = format_status(terminate, Mod, get(), State),
- error_info(ReportReason, Name, From, Msg, FmtState, Debug)
+ error_info(ReportReason, Name, From, Msg, Mod, State, Debug)
end
end,
case Stacktrace of
@@ -872,12 +879,46 @@ terminate(Class, Reason, Stacktrace, ReportReason, Name, From, Msg, Mod, State,
terminate_reason(error, Reason, Stacktrace) -> {Reason, Stacktrace};
terminate_reason(exit, Reason, _Stacktrace) -> Reason.
-error_info(_Reason, application_controller, _From, _Msg, _State, _Debug) ->
+error_info(_Reason, application_controller, _From, _Msg, _Mod, _State, _Debug) ->
%% OTP-5811 Don't send an error report if it's the system process
%% application_controller which is terminating - let init take care
%% of it instead
ok;
-error_info(Reason, Name, From, Msg, State, Debug) ->
+error_info(Reason, Name, From, Msg, Mod, State, Debug) ->
+ ?LOG_ERROR(#{label=>{gen_server,terminate},
+ name=>Name,
+ last_message=>Msg,
+ state=>format_status(terminate, Mod, get(), State),
+ reason=>Reason,
+ client_info=>client_stacktrace(From)},
+ #{domain=>[beam,erlang,otp],
+ report_cb=>fun gen_server:format_log/1,
+ error_logger=>#{tag=>error}}),
+ sys:print_log(Debug),
+ ok.
+
+client_stacktrace(undefined) ->
+ undefined;
+client_stacktrace({From,_Tag}) ->
+ client_stacktrace(From);
+client_stacktrace(From) when is_pid(From), node(From) =:= node() ->
+ case process_info(From, [current_stacktrace, registered_name]) of
+ undefined ->
+ {From,dead};
+ [{current_stacktrace, Stacktrace}, {registered_name, []}] ->
+ {From,{From,Stacktrace}};
+ [{current_stacktrace, Stacktrace}, {registered_name, Name}] ->
+ {From,{Name,Stacktrace}}
+ end;
+client_stacktrace(From) when is_pid(From) ->
+ {From,remote}.
+
+format_log(#{label:={gen_server,terminate},
+ name:=Name,
+ last_message:=Msg,
+ state:=State,
+ reason:=Reason,
+ client_info:=Client}) ->
Reason1 =
case Reason of
{undef,[{M,F,A,L}|MFAs]} ->
@@ -893,36 +934,31 @@ error_info(Reason, Name, From, Msg, State, Debug) ->
end
end;
_ ->
- error_logger:limit_term(Reason)
+ logger:limit_term(Reason)
end,
- {ClientFmt, ClientArgs} = client_stacktrace(From),
- LimitedState = error_logger:limit_term(State),
- error_logger:format("** Generic server ~tp terminating \n"
- "** Last message in was ~tp~n"
- "** When Server state == ~tp~n"
- "** Reason for termination == ~n** ~tp~n" ++ ClientFmt,
- [Name, Msg, LimitedState, Reason1] ++ ClientArgs),
- sys:print_log(Debug),
- ok.
-client_stacktrace(undefined) ->
+ {ClientFmt,ClientArgs} = format_client_log(Client),
+ {"** Generic server ~tp terminating \n"
+ "** Last message in was ~tp~n"
+ "** When Server state == ~tp~n"
+ "** Reason for termination == ~n** ~tp~n" ++ ClientFmt,
+ [Name, Msg, logger:limit_term(State), Reason1] ++ ClientArgs};
+format_log(#{label:={gen_server,no_handle_info},
+ module:=Mod,
+ message:=Msg}) ->
+ {"** Undefined handle_info in ~p~n"
+ "** Unhandled message: ~tp~n",
+ [Mod, Msg]}.
+
+format_client_log(undefined) ->
{"", []};
-client_stacktrace({From, _Tag}) ->
- client_stacktrace(From);
-client_stacktrace(From) when is_pid(From), node(From) =:= node() ->
- case process_info(From, [current_stacktrace, registered_name]) of
- undefined ->
- {"** Client ~p is dead~n", [From]};
- [{current_stacktrace, Stacktrace}, {registered_name, []}] ->
- {"** Client ~p stacktrace~n"
- "** ~tp~n",
- [From, Stacktrace]};
- [{current_stacktrace, Stacktrace}, {registered_name, Name}] ->
- {"** Client ~tp stacktrace~n"
- "** ~tp~n",
- [Name, Stacktrace]}
- end;
-client_stacktrace(From) when is_pid(From) ->
- {"** Client ~p is remote on node ~p~n", [From, node(From)]}.
+format_client_log({From,dead}) ->
+ {"** Client ~p is dead~n", [From]};
+format_client_log({From,remote}) ->
+ {"** Client ~p is remote on node ~p~n", [From, node(From)]};
+format_client_log({_From,{Name,Stacktrace}}) ->
+ {"** Client ~tp stacktrace~n"
+ "** ~tp~n",
+ [Name, Stacktrace]}.
%%-----------------------------------------------------------------
%% Status information
diff --git a/lib/stdlib/src/gen_statem.erl b/lib/stdlib/src/gen_statem.erl
index f7dc0050b3..f558f0d33e 100644
--- a/lib/stdlib/src/gen_statem.erl
+++ b/lib/stdlib/src/gen_statem.erl
@@ -19,6 +19,8 @@
%%
-module(gen_statem).
+-include("logger.hrl").
+
%% API
-export(
[start/3,start/4,start_link/3,start_link/4,
@@ -44,6 +46,9 @@
-export(
[wakeup_from_hibernate/3]).
+%% logger callback
+-export([format_log/1]).
+
%% Type exports for templates and callback modules
-export_type(
[event_type/0,
@@ -702,7 +707,7 @@ init_it(Starter, Parent, ServerRef, Module, Args, Opts) ->
error_info(
Class, Reason, Stacktrace,
#state{name = Name},
- [], undefined),
+ []),
erlang:raise(Class, Reason, Stacktrace)
end.
@@ -733,7 +738,7 @@ init_result(Starter, Parent, ServerRef, Module, Result, Opts) ->
error_info(
error, Error, ?STACKTRACE(),
#state{name = Name},
- [], undefined),
+ []),
exit(Error)
end.
@@ -1849,9 +1854,7 @@ terminate(
catch
_ -> ok;
C:R:ST ->
- error_info(
- C, R, ST, S, Q,
- format_status(terminate, get(), S)),
+ error_info(C, R, ST, S, Q),
sys:print_log(Debug),
erlang:raise(C, R, ST)
end;
@@ -1867,9 +1870,7 @@ terminate(
{shutdown,_} ->
terminate_sys_debug(Debug, S, State, Reason);
_ ->
- error_info(
- Class, Reason, Stacktrace, S, Q,
- format_status(terminate, get(), S)),
+ error_info(Class, Reason, Stacktrace, S, Q),
sys:print_log(Debug)
end,
case Stacktrace of
@@ -1889,8 +1890,28 @@ error_info(
name = Name,
callback_mode = CallbackMode,
state_enter = StateEnter,
- postponed = P},
- Q, FmtData) ->
+ postponed = P} = S,
+ Q) ->
+ ?LOG_ERROR(#{label=>{gen_statem,terminate},
+ name=>Name,
+ queue=>Q,
+ postponed=>P,
+ callback_mode=>CallbackMode,
+ state_enter=>StateEnter,
+ state=>format_status(terminate, get(), S),
+ reason=>{Class,Reason,Stacktrace}},
+ #{domain=>[beam,erlang,otp],
+ report_cb=>fun gen_statem:format_log/1,
+ error_logger=>#{tag=>error}}).
+
+format_log(#{label:={gen_statem,terminate},
+ name:=Name,
+ queue:=Q,
+ postponed:=P,
+ callback_mode:=CallbackMode,
+ state_enter:=StateEnter,
+ state:=FmtData,
+ reason:={Class,Reason,Stacktrace}}) ->
{FixedReason,FixedStacktrace} =
case Stacktrace of
[{M,F,Args,_}|ST]
@@ -1917,7 +1938,7 @@ error_info(
_ -> {Reason,Stacktrace}
end,
[LimitedP, LimitedFmtData, LimitedFixedReason] =
- [error_logger:limit_term(D) || D <- [P, FmtData, FixedReason]],
+ [logger:limit_term(D) || D <- [P, FmtData, FixedReason]],
CBMode =
case StateEnter of
true ->
@@ -1925,48 +1946,46 @@ error_info(
false ->
CallbackMode
end,
- error_logger:format(
- "** State machine ~tp terminating~n" ++
- case Q of
- [] -> "";
- _ -> "** Last event = ~tp~n"
- end ++
- "** When server state = ~tp~n" ++
- "** Reason for termination = ~w:~tp~n" ++
- "** Callback mode = ~p~n" ++
- case Q of
- [_,_|_] -> "** Queued = ~tp~n";
- _ -> ""
- end ++
- case P of
- [] -> "";
- _ -> "** Postponed = ~tp~n"
- end ++
- case FixedStacktrace of
- [] -> "";
- _ -> "** Stacktrace =~n** ~tp~n"
- end,
- [Name |
- case Q of
- [] -> [];
- [Event|_] -> [Event]
- end] ++
- [LimitedFmtData,
- Class,LimitedFixedReason,
- CBMode] ++
- case Q of
- [_|[_|_] = Events] -> [Events];
- _ -> []
- end ++
- case P of
- [] -> [];
- _ -> [LimitedP]
- end ++
- case FixedStacktrace of
- [] -> [];
- _ -> [FixedStacktrace]
- end).
-
+ {"** State machine ~tp terminating~n" ++
+ case Q of
+ [] -> "";
+ _ -> "** Last event = ~tp~n"
+ end ++
+ "** When server state = ~tp~n" ++
+ "** Reason for termination = ~w:~tp~n" ++
+ "** Callback mode = ~p~n" ++
+ case Q of
+ [_,_|_] -> "** Queued = ~tp~n";
+ _ -> ""
+ end ++
+ case P of
+ [] -> "";
+ _ -> "** Postponed = ~tp~n"
+ end ++
+ case FixedStacktrace of
+ [] -> "";
+ _ -> "** Stacktrace =~n** ~tp~n"
+ end,
+ [Name |
+ case Q of
+ [] -> [];
+ [Event|_] -> [Event]
+ end] ++
+ [LimitedFmtData,
+ Class,LimitedFixedReason,
+ CBMode] ++
+ case Q of
+ [_|[_|_] = Events] -> [Events];
+ _ -> []
+ end ++
+ case P of
+ [] -> [];
+ _ -> [LimitedP]
+ end ++
+ case FixedStacktrace of
+ [] -> [];
+ _ -> [FixedStacktrace]
+ end}.
%% Call Module:format_status/2 or return a default value
format_status(
diff --git a/lib/stdlib/src/io_lib.erl b/lib/stdlib/src/io_lib.erl
index e37c13093b..3a5aba60b4 100644
--- a/lib/stdlib/src/io_lib.erl
+++ b/lib/stdlib/src/io_lib.erl
@@ -60,11 +60,12 @@
-module(io_lib).
--export([fwrite/2,fread/2,fread/3,format/2]).
--export([scan_format/2,unscan_format/1,build_text/1]).
+-export([fwrite/2,fwrite/3,fread/2,fread/3,format/2,format/3]).
+-export([scan_format/2,unscan_format/1,build_text/1,build_text/2]).
-export([print/1,print/4,indentation/2]).
-export([write/1,write/2,write/3,nl/0,format_prompt/1,format_prompt/2]).
+-export([write_binary/3]).
-export([write_atom/1,write_string/1,write_string/2,write_latin1_string/1,
write_latin1_string/2, write_char/1, write_latin1_char/1]).
@@ -87,7 +88,7 @@
-export([limit_term/2]).
-export_type([chars/0, latin1_string/0, continuation/0,
- fread_error/0, fread_item/0, format_spec/0]).
+ fread_error/0, fread_item/0, format_spec/0, chars_limit/0]).
%%----------------------------------------------------------------------
@@ -135,6 +136,18 @@
fwrite(Format, Args) ->
format(Format, Args).
+-type chars_limit() :: integer().
+
+-spec fwrite(Format, Data, Options) -> chars() when
+ Format :: io:format(),
+ Data :: [term()],
+ Options :: [Option],
+ Option :: {'chars_limit', CharsLimit},
+ CharsLimit :: chars_limit().
+
+fwrite(Format, Args, Options) ->
+ format(Format, Args, Options).
+
-spec fread(Format, String) -> Result when
Format :: string(),
String :: string(),
@@ -172,6 +185,21 @@ format(Format, Args) ->
Other
end.
+-spec format(Format, Data, Options) -> chars() when
+ Format :: io:format(),
+ Data :: [term()],
+ Options :: [Option],
+ Option :: {'chars_limit', CharsLimit},
+ CharsLimit :: chars_limit().
+
+format(Format, Args, Options) ->
+ case catch io_lib_format:fwrite(Format, Args, Options) of
+ {'EXIT',_} ->
+ erlang:error(badarg, [Format, Args, Options]);
+ Other ->
+ Other
+ end.
+
-spec scan_format(Format, Data) -> FormatList when
Format :: io:format(),
Data :: [term()],
@@ -197,6 +225,15 @@ unscan_format(FormatList) ->
build_text(FormatList) ->
io_lib_format:build(FormatList).
+-spec build_text(FormatList, Options) -> chars() when
+ FormatList :: [char() | format_spec()],
+ Options :: [Option],
+ Option :: {'chars_limit', CharsLimit},
+ CharsLimit :: chars_limit().
+
+build_text(FormatList, Options) ->
+ io_lib_format:build(FormatList, Options).
+
-spec print(Term) -> chars() when
Term :: term().
@@ -240,7 +277,7 @@ format_prompt(Prompt, Encoding) ->
do_format_prompt(add_modifier(Encoding, "p"), [Prompt]).
do_format_prompt(Format, Args) ->
- case catch io_lib:format(Format, Args) of
+ case catch format(Format, Args) of
{'EXIT',_} -> "???";
List -> List
end.
@@ -259,7 +296,8 @@ add_modifier(_, C) ->
-spec write(Term) -> chars() when
Term :: term().
-write(Term) -> write(Term, -1).
+write(Term) ->
+ write1(Term, -1, latin1).
-spec write(term(), depth(), boolean()) -> chars().
@@ -274,16 +312,29 @@ write(Term, D, false) ->
(Term, Options) -> chars() when
Term :: term(),
Options :: [Option],
- Option :: {'depth', Depth}
+ Option :: {'chars_limit', CharsLimit}
+ | {'depth', Depth}
| {'encoding', 'latin1' | 'utf8' | 'unicode'},
+ CharsLimit :: chars_limit(),
Depth :: depth().
write(Term, Options) when is_list(Options) ->
Depth = get_option(depth, Options, -1),
Encoding = get_option(encoding, Options, epp:default_encoding()),
- write1(Term, Depth, Encoding);
+ CharsLimit = get_option(chars_limit, Options, -1),
+ if
+ Depth =:= 0; CharsLimit =:= 0 ->
+ "...";
+ CharsLimit < 0 ->
+ write1(Term, Depth, Encoding);
+ CharsLimit > 0 ->
+ RecDefFun = fun(_, _) -> no end,
+ If = io_lib_pretty:intermediate
+ (Term, Depth, CharsLimit, RecDefFun, Encoding, _Str=false),
+ io_lib_pretty:write(If)
+ end;
write(Term, Depth) ->
- write1(Term, Depth, latin1).
+ write(Term, [{depth, Depth}, {encoding, latin1}]).
write1(_Term, 0, _E) -> "...";
write1(Term, _D, _E) when is_integer(Term) -> integer_to_list(Term);
@@ -300,7 +351,7 @@ write1([H|T], D, E) ->
if
D =:= 1 -> "[...]";
true ->
- [$[,[write1(H, D-1, E)|write_tail(T, D-1, E, $|)],$]]
+ [$[,[write1(H, D-1, E)|write_tail(T, D-1, E)],$]]
end;
write1(F, _D, _E) when is_function(F) ->
erlang:fun_to_list(F);
@@ -311,20 +362,24 @@ write1(T, D, E) when is_tuple(T) ->
D =:= 1 -> "{...}";
true ->
[${,
- [write1(element(1, T), D-1, E)|
- write_tail(tl(tuple_to_list(T)), D-1, E, $,)],
+ [write1(element(1, T), D-1, E)|write_tuple(T, 2, D-1, E)],
$}]
end.
-%% write_tail(List, Depth, CharacterBeforeDots)
+%% write_tail(List, Depth, Encoding)
%% Test the terminating case first as this looks better with depth.
-write_tail([], _D, _E, _S) -> "";
-write_tail(_, 1, _E, S) -> [S | "..."];
-write_tail([H|T], D, E, S) ->
- [$,,write1(H, D-1, E)|write_tail(T, D-1, E, S)];
-write_tail(Other, D, E, S) ->
- [S,write1(Other, D-1, E)].
+write_tail([], _D, _E) -> "";
+write_tail(_, 1, _E) -> [$| | "..."];
+write_tail([H|T], D, E) ->
+ [$,,write1(H, D-1, E)|write_tail(T, D-1, E)];
+write_tail(Other, D, E) ->
+ [$|,write1(Other, D-1, E)].
+
+write_tuple(T, I, _D, _E) when I > tuple_size(T) -> "";
+write_tuple(_, _I, 1, _E) -> [$, | "..."];
+write_tuple(T, I, D, E) ->
+ [$,,write1(element(I, T), D-1, E)|write_tuple(T, I+1, D-1, E)].
write_port(Port) ->
erlang:port_to_list(Port).
@@ -333,32 +388,43 @@ write_ref(Ref) ->
erlang:ref_to_list(Ref).
write_map(Map, D, E) when is_integer(D) ->
- [$#,${,write_map_body(maps:to_list(Map), D, E),$}].
+ [$#,${,write_map_body(maps:to_list(Map), D, D - 1, E),$}].
-write_map_body(_, 0, _E) -> "...";
-write_map_body([], _, _E) -> [];
-write_map_body([{K,V}], D, E) -> write_map_assoc(K, V, D, E);
-write_map_body([{K,V}|KVs], D, E) ->
- [write_map_assoc(K, V, D, E),$, | write_map_body(KVs, D-1, E)].
+write_map_body(_, 1, _D0, _E) -> "...";
+write_map_body([], _, _D0, _E) -> [];
+write_map_body([{K,V}], _D, D0, E) -> write_map_assoc(K, V, D0, E);
+write_map_body([{K,V}|KVs], D, D0, E) ->
+ [write_map_assoc(K, V, D0, E),$, | write_map_body(KVs, D - 1, D0, E)].
write_map_assoc(K, V, D, E) ->
- [write1(K, D - 1, E),"=>",write1(V, D-1, E)].
+ [write1(K, D, E)," => ",write1(V, D, E)].
write_binary(B, D) when is_integer(D) ->
- [$<,$<,write_binary_body(B, D),$>,$>].
-
-write_binary_body(<<>>, _D) ->
- "";
-write_binary_body(_B, 1) ->
- "...";
-write_binary_body(<<X:8>>, _D) ->
- [integer_to_list(X)];
-write_binary_body(<<X:8,Rest/bitstring>>, D) ->
- [integer_to_list(X),$,|write_binary_body(Rest, D-1)];
-write_binary_body(B, _D) ->
+ {S, _} = write_binary(B, D, -1),
+ S.
+
+write_binary(B, D, T) ->
+ {S, Rest} = write_binary_body(B, D, tsub(T, 4), []),
+ {[$<,$<,lists:reverse(S),$>,$>], Rest}.
+
+write_binary_body(<<>> = B, _D, _T, Acc) ->
+ {Acc, B};
+write_binary_body(B, D, T, Acc) when D =:= 1; T =:= 0->
+ {["..."|Acc], B};
+write_binary_body(<<X:8>>, _D, _T, Acc) ->
+ {[integer_to_list(X)|Acc], <<>>};
+write_binary_body(<<X:8,Rest/bitstring>>, D, T, Acc) ->
+ S = integer_to_list(X),
+ write_binary_body(Rest, D-1, tsub(T, length(S) + 1), [$,,S|Acc]);
+write_binary_body(B, _D, _T, Acc) ->
L = bit_size(B),
<<X:L>> = B,
- [integer_to_list(X),$:,integer_to_list(L)].
+ {[integer_to_list(L),$:,integer_to_list(X)|Acc], <<>>}.
+
+%% Make sure T does not change sign.
+tsub(T, _) when T < 0 -> T;
+tsub(T, E) when T >= E -> T - E;
+tsub(_, _) -> 0.
get_option(Key, TupleList, Default) ->
case lists:keyfind(Key, 1, TupleList) of
@@ -947,7 +1013,7 @@ limit(T, D) when is_tuple(T) ->
D =:= 1 -> {'...'};
true ->
list_to_tuple([limit(element(1, T), D-1)|
- limit_tail(tl(tuple_to_list(T)), D-1)])
+ limit_tuple(T, 2, D-1)])
end;
limit(<<_/bitstring>>=Term, D) -> limit_bitstring(Term, D);
limit(Term, _D) -> Term.
@@ -959,6 +1025,11 @@ limit_tail([H|T], D) ->
limit_tail(Other, D) ->
limit(Other, D-1).
+limit_tuple(T, I, _D) when I > tuple_size(T) -> [];
+limit_tuple(_, _I, 1) -> ['...'];
+limit_tuple(T, I, D) ->
+ [limit(element(I, T), D-1)|limit_tuple(T, I+1, D-1)].
+
%% Cannot limit maps properly since there is no guarantee that
%% maps:from_list() creates a map with the same internal ordering of
%% the selected associations as in Map. Instead of subtracting one
diff --git a/lib/stdlib/src/io_lib_format.erl b/lib/stdlib/src/io_lib_format.erl
index 64edbf1824..c814ab50d4 100644
--- a/lib/stdlib/src/io_lib_format.erl
+++ b/lib/stdlib/src/io_lib_format.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -21,7 +21,8 @@
%% Formatting functions of io library.
--export([fwrite/2,fwrite_g/1,indentation/2,scan/2,unscan/1,build/1]).
+-export([fwrite/2,fwrite/3,fwrite_g/1,indentation/2,scan/2,unscan/1,
+ build/1, build/2]).
%% Format the arguments in Args after string Format. Just generate
%% an error if there is an error in the arguments.
@@ -45,14 +46,42 @@
fwrite(Format, Args) ->
build(scan(Format, Args)).
+-spec fwrite(Format, Data, Options) -> FormatList when
+ Format :: io:format(),
+ Data :: [term()],
+ FormatList :: [char() | io_lib:format_spec()],
+ Options :: [Option],
+ Option :: {'chars_limit', CharsLimit},
+ CharsLimit :: io_lib:chars_limit().
+
+fwrite(Format, Args, Options) ->
+ build(scan(Format, Args), Options).
+
%% Build the output text for a pre-parsed format list.
-spec build(FormatList) -> io_lib:chars() when
FormatList :: [char() | io_lib:format_spec()].
build(Cs) ->
- Pc = pcount(Cs),
- build(Cs, Pc, 0).
+ build(Cs, []).
+
+-spec build(FormatList, Options) -> io_lib:chars() when
+ FormatList :: [char() | io_lib:format_spec()],
+ Options :: [Option],
+ Option :: {'chars_limit', CharsLimit},
+ CharsLimit :: io_lib:chars_limit().
+
+build(Cs, Options) ->
+ CharsLimit = get_option(chars_limit, Options, -1),
+ Res1 = build_small(Cs),
+ {P, S, W, Other} = count_small(Res1),
+ case P + S + W of
+ 0 ->
+ Res1;
+ NumOfLimited ->
+ RemainingChars = sub(CharsLimit, Other),
+ build_limited(Res1, P, NumOfLimited, RemainingChars, 0)
+ end.
%% Parse all control sequences in the format string.
@@ -202,40 +231,77 @@ collect_cc([$~|Fmt], Args) when is_list(Args) -> {$~,[],Fmt,Args};
collect_cc([$n|Fmt], Args) when is_list(Args) -> {$n,[],Fmt,Args};
collect_cc([$i|Fmt], [A|Args]) -> {$i,[A],Fmt,Args}.
-%% pcount([ControlC]) -> Count.
-%% Count the number of print requests.
-
-pcount(Cs) -> pcount(Cs, 0).
-
-pcount([#{control_char := $p}|Cs], Acc) -> pcount(Cs, Acc+1);
-pcount([#{control_char := $P}|Cs], Acc) -> pcount(Cs, Acc+1);
-pcount([_|Cs], Acc) -> pcount(Cs, Acc);
-pcount([], Acc) -> Acc.
-
-%% build([Control], Pc, Indentation) -> io_lib:chars().
+%% count_small([ControlC]) -> Count.
+%% Count the number of big (pPwWsS) print requests and
+%% number of characters of other print (small) requests.
+
+count_small(Cs) ->
+ count_small(Cs, #{p => 0, s => 0, w => 0, other => 0}).
+
+count_small([#{control_char := $p}|Cs], #{p := P} = Cnts) ->
+ count_small(Cs, Cnts#{p := P + 1});
+count_small([#{control_char := $P}|Cs], #{p := P} = Cnts) ->
+ count_small(Cs, Cnts#{p := P + 1});
+count_small([#{control_char := $w}|Cs], #{w := W} = Cnts) ->
+ count_small(Cs, Cnts#{w := W + 1});
+count_small([#{control_char := $W}|Cs], #{w := W} = Cnts) ->
+ count_small(Cs, Cnts#{w := W + 1});
+count_small([#{control_char := $s}|Cs], #{w := W} = Cnts) ->
+ count_small(Cs, Cnts#{w := W + 1});
+count_small([S|Cs], #{other := Other} = Cnts) when is_list(S) ->
+ count_small(Cs, Cnts#{other := Other + string:length(S)});
+count_small([C|Cs], #{other := Other} = Cnts) when is_integer(C) ->
+ count_small(Cs, Cnts#{other := Other + 1});
+count_small([], #{p := P, s := S, w := W, other := Other}) ->
+ {P, S, W, Other}.
+
+%% build_small([Control]) -> io_lib:chars().
+%% Interpret the control structures, but only the small ones.
+%% The big ones are saved for later.
+%% build_limited([Control], NumberOfPps, NumberOfLimited,
+%% CharsLimit, Indentation)
%% Interpret the control structures. Count the number of print
%% remaining and only calculate indentation when necessary. Must also
%% be smart when calculating indentation for characters in format.
-build([#{control_char := C, args := As, width := F, adjust := Ad,
- precision := P, pad_char := Pad, encoding := Enc,
- strings := Str} | Cs], Pc0, I) ->
- S = control(C, As, F, Ad, P, Pad, Enc, Str, I),
- Pc1 = decr_pc(C, Pc0),
+build_small([#{control_char := C, args := As, width := F, adjust := Ad,
+ precision := P, pad_char := Pad, encoding := Enc}=CC | Cs]) ->
+ case control_small(C, As, F, Ad, P, Pad, Enc) of
+ not_small -> [CC | build_small(Cs)];
+ S -> lists:flatten(S) ++ build_small(Cs)
+ end;
+build_small([C|Cs]) -> [C|build_small(Cs)];
+build_small([]) -> [].
+
+build_limited([#{control_char := C, args := As, width := F, adjust := Ad,
+ precision := P, pad_char := Pad, encoding := Enc,
+ strings := Str} | Cs], NumOfPs0, Count0, MaxLen0, I) ->
+ MaxChars = if
+ MaxLen0 < 0 -> MaxLen0;
+ true -> MaxLen0 div Count0
+ end,
+ S = control_limited(C, As, F, Ad, P, Pad, Enc, Str, MaxChars, I),
+ Len = string:length(S),
+ NumOfPs = decr_pc(C, NumOfPs0),
+ Count = Count0 - 1,
+ MaxLen = sub(MaxLen0, Len),
if
- Pc1 > 0 -> [S|build(Cs, Pc1, indentation(S, I))];
- true -> [S|build(Cs, Pc1, I)]
+ NumOfPs > 0 -> [S|build_limited(Cs, NumOfPs, Count,
+ MaxLen, indentation(S, I))];
+ true -> [S|build_limited(Cs, NumOfPs, Count, MaxLen, I)]
end;
-build([$\n|Cs], Pc, _I) -> [$\n|build(Cs, Pc, 0)];
-build([$\t|Cs], Pc, I) -> [$\t|build(Cs, Pc, ((I + 8) div 8) * 8)];
-build([C|Cs], Pc, I) -> [C|build(Cs, Pc, I+1)];
-build([], _Pc, _I) -> [].
+build_limited([$\n|Cs], NumOfPs, Count, MaxLen, _I) ->
+ [$\n|build_limited(Cs, NumOfPs, Count, MaxLen, 0)];
+build_limited([$\t|Cs], NumOfPs, Count, MaxLen, I) ->
+ [$\t|build_limited(Cs, NumOfPs, Count, MaxLen, ((I + 8) div 8) * 8)];
+build_limited([C|Cs], NumOfPs, Count, MaxLen, I) ->
+ [C|build_limited(Cs, NumOfPs, Count, MaxLen, I+1)];
+build_limited([], _, _, _, _) -> [].
decr_pc($p, Pc) -> Pc - 1;
decr_pc($P, Pc) -> Pc - 1;
decr_pc(_, Pc) -> Pc.
-
%% Calculate the indentation of the end of a string given its start
%% indentation. We assume tabs at 8 cols.
@@ -251,67 +317,74 @@ indentation([C|Cs], I) ->
indentation(Cs, indentation(C, I));
indentation([], I) -> I.
-%% control(FormatChar, [Argument], FieldWidth, Adjust, Precision, PadChar,
-%% Encoding, Indentation) -> String
-%% This is the main dispatch function for the various formatting commands.
-%% Field widths and precisions have already been calculated.
-
-control($w, [A], F, Adj, P, Pad, Enc, _Str, _I) ->
- term(io_lib:write(A, [{depth,-1}, {encoding, Enc}]), F, Adj, P, Pad);
-control($p, [A], F, Adj, P, Pad, Enc, Str, I) ->
- print(A, -1, F, Adj, P, Pad, Enc, Str, I);
-control($W, [A,Depth], F, Adj, P, Pad, Enc, _Str, _I) when is_integer(Depth) ->
- term(io_lib:write(A, [{depth,Depth}, {encoding, Enc}]), F, Adj, P, Pad);
-control($P, [A,Depth], F, Adj, P, Pad, Enc, Str, I) when is_integer(Depth) ->
- print(A, Depth, F, Adj, P, Pad, Enc, Str, I);
-control($s, [A], F, Adj, P, Pad, latin1, _Str, _I) when is_atom(A) ->
+%% control_small(FormatChar, [Argument], FieldWidth, Adjust, Precision,
+%% PadChar, Encoding) -> String
+%% control_limited(FormatChar, [Argument], FieldWidth, Adjust, Precision,
+%% PadChar, Encoding, StringP, ChrsLim, Indentation) -> String
+%% These are the dispatch functions for the various formatting controls.
+
+control_small($s, [A], F, Adj, P, Pad, latin1) when is_atom(A) ->
L = iolist_to_chars(atom_to_list(A)),
string(L, F, Adj, P, Pad);
-control($s, [A], F, Adj, P, Pad, unicode, _Str, _I) when is_atom(A) ->
+control_small($s, [A], F, Adj, P, Pad, unicode) when is_atom(A) ->
string(atom_to_list(A), F, Adj, P, Pad);
-control($s, [L0], F, Adj, P, Pad, latin1, _Str, _I) ->
- L = iolist_to_chars(L0),
- string(L, F, Adj, P, Pad);
-control($s, [L0], F, Adj, P, Pad, unicode, _Str, _I) ->
- L = cdata_to_chars(L0),
- uniconv(string(L, F, Adj, P, Pad));
-control($e, [A], F, Adj, P, Pad, _Enc, _Str, _I) when is_float(A) ->
+control_small($e, [A], F, Adj, P, Pad, _Enc) when is_float(A) ->
fwrite_e(A, F, Adj, P, Pad);
-control($f, [A], F, Adj, P, Pad, _Enc, _Str, _I) when is_float(A) ->
+control_small($f, [A], F, Adj, P, Pad, _Enc) when is_float(A) ->
fwrite_f(A, F, Adj, P, Pad);
-control($g, [A], F, Adj, P, Pad, _Enc, _Str, _I) when is_float(A) ->
+control_small($g, [A], F, Adj, P, Pad, _Enc) when is_float(A) ->
fwrite_g(A, F, Adj, P, Pad);
-control($b, [A], F, Adj, P, Pad, _Enc, _Str, _I) when is_integer(A) ->
+control_small($b, [A], F, Adj, P, Pad, _Enc) when is_integer(A) ->
unprefixed_integer(A, F, Adj, base(P), Pad, true);
-control($B, [A], F, Adj, P, Pad, _Enc, _Str, _I) when is_integer(A) ->
+control_small($B, [A], F, Adj, P, Pad, _Enc) when is_integer(A) ->
unprefixed_integer(A, F, Adj, base(P), Pad, false);
-control($x, [A,Prefix], F, Adj, P, Pad, _Enc, _Str, _I) when is_integer(A),
- is_atom(Prefix) ->
+control_small($x, [A,Prefix], F, Adj, P, Pad, _Enc) when is_integer(A),
+ is_atom(Prefix) ->
prefixed_integer(A, F, Adj, base(P), Pad, atom_to_list(Prefix), true);
-control($x, [A,Prefix], F, Adj, P, Pad, _Enc, _Str, _I) when is_integer(A) ->
+control_small($x, [A,Prefix], F, Adj, P, Pad, _Enc) when is_integer(A) ->
true = io_lib:deep_char_list(Prefix), %Check if Prefix a character list
prefixed_integer(A, F, Adj, base(P), Pad, Prefix, true);
-control($X, [A,Prefix], F, Adj, P, Pad, _Enc, _Str, _I) when is_integer(A),
- is_atom(Prefix) ->
+control_small($X, [A,Prefix], F, Adj, P, Pad, _Enc) when is_integer(A),
+ is_atom(Prefix) ->
prefixed_integer(A, F, Adj, base(P), Pad, atom_to_list(Prefix), false);
-control($X, [A,Prefix], F, Adj, P, Pad, _Enc, _Str, _I) when is_integer(A) ->
+control_small($X, [A,Prefix], F, Adj, P, Pad, _Enc) when is_integer(A) ->
true = io_lib:deep_char_list(Prefix), %Check if Prefix a character list
prefixed_integer(A, F, Adj, base(P), Pad, Prefix, false);
-control($+, [A], F, Adj, P, Pad, _Enc, _Str, _I) when is_integer(A) ->
+control_small($+, [A], F, Adj, P, Pad, _Enc) when is_integer(A) ->
Base = base(P),
Prefix = [integer_to_list(Base), $#],
prefixed_integer(A, F, Adj, Base, Pad, Prefix, true);
-control($#, [A], F, Adj, P, Pad, _Enc, _Str, _I) when is_integer(A) ->
+control_small($#, [A], F, Adj, P, Pad, _Enc) when is_integer(A) ->
Base = base(P),
Prefix = [integer_to_list(Base), $#],
prefixed_integer(A, F, Adj, Base, Pad, Prefix, false);
-control($c, [A], F, Adj, P, Pad, unicode, _Str, _I) when is_integer(A) ->
+control_small($c, [A], F, Adj, P, Pad, unicode) when is_integer(A) ->
char(A, F, Adj, P, Pad);
-control($c, [A], F, Adj, P, Pad, _Enc, _Str, _I) when is_integer(A) ->
+control_small($c, [A], F, Adj, P, Pad, _Enc) when is_integer(A) ->
char(A band 255, F, Adj, P, Pad);
-control($~, [], F, Adj, P, Pad, _Enc, _Str, _I) -> char($~, F, Adj, P, Pad);
-control($n, [], F, Adj, P, Pad, _Enc, _Str, _I) -> newline(F, Adj, P, Pad);
-control($i, [_A], _F, _Adj, _P, _Pad, _Enc, _Str, _I) -> [].
+control_small($~, [], F, Adj, P, Pad, _Enc) -> char($~, F, Adj, P, Pad);
+control_small($n, [], F, Adj, P, Pad, _Enc) -> newline(F, Adj, P, Pad);
+control_small($i, [_A], _F, _Adj, _P, _Pad, _Enc) -> [];
+control_small(_C, _As, _F, _Adj, _P, _Pad, _Enc) -> not_small.
+
+control_limited($s, [L0], F, Adj, P, Pad, latin1, _Str, CL, _I) ->
+ L = iolist_to_chars(L0),
+ string(limit_string(L, F, CL), limit_field(F, CL), Adj, P, Pad);
+control_limited($s, [L0], F, Adj, P, Pad, unicode, _Str, CL, _I) ->
+ L = cdata_to_chars(L0),
+ uniconv(string(limit_string(L, F, CL), limit_field(F, CL), Adj, P, Pad));
+control_limited($w, [A], F, Adj, P, Pad, Enc, _Str, CL, _I) ->
+ Chars = io_lib:write(A, [{depth, -1}, {encoding, Enc}, {chars_limit, CL}]),
+ term(Chars, F, Adj, P, Pad);
+control_limited($p, [A], F, Adj, P, Pad, Enc, Str, CL, I) ->
+ print(A, -1, F, Adj, P, Pad, Enc, Str, CL, I);
+control_limited($W, [A,Depth], F, Adj, P, Pad, Enc, _Str, CL, _I)
+ when is_integer(Depth) ->
+ Chars = io_lib:write(A, [{depth, Depth}, {encoding, Enc}, {chars_limit, CL}]),
+ term(Chars, F, Adj, P, Pad);
+control_limited($P, [A,Depth], F, Adj, P, Pad, Enc, Str, CL, I)
+ when is_integer(Depth) ->
+ print(A, Depth, F, Adj, P, Pad, Enc, Str, CL, I).
-ifdef(UNICODE_AS_BINARIES).
uniconv(C) ->
@@ -348,12 +421,13 @@ term(T, F, Adj, P0, Pad) ->
%% Print a term. Field width sets maximum line length, Precision sets
%% initial indentation.
-print(T, D, none, Adj, P, Pad, E, Str, I) ->
- print(T, D, 80, Adj, P, Pad, E, Str, I);
-print(T, D, F, Adj, none, Pad, E, Str, I) ->
- print(T, D, F, Adj, I+1, Pad, E, Str, I);
-print(T, D, F, right, P, _Pad, Enc, Str, _I) ->
- Options = [{column, P},
+print(T, D, none, Adj, P, Pad, E, Str, ChLim, I) ->
+ print(T, D, 80, Adj, P, Pad, E, Str, ChLim, I);
+print(T, D, F, Adj, none, Pad, E, Str, ChLim, I) ->
+ print(T, D, F, Adj, I+1, Pad, E, Str, ChLim, I);
+print(T, D, F, right, P, _Pad, Enc, Str, ChLim, _I) ->
+ Options = [{chars_limit, ChLim},
+ {column, P},
{line_length, F},
{depth, D},
{encoding, Enc},
@@ -670,6 +744,18 @@ cdata_to_chars(B) when is_binary(B) ->
_ -> binary_to_list(B)
end.
+limit_string(S, F, CharsLimit) when CharsLimit < 0; CharsLimit >= F -> S;
+limit_string(S, _F, CharsLimit) ->
+ case string:length(S) =< CharsLimit of
+ true -> S;
+ false -> [string:slice(S, 0, sub(CharsLimit, 3)), "..."]
+ end.
+
+limit_field(F, CharsLimit) when CharsLimit < 0; F =:= none ->
+ F;
+limit_field(F, CharsLimit) ->
+ max(3, min(F, CharsLimit)).
+
%% string(String, Field, Adjust, Precision, PadChar)
string(S, none, _Adj, none, _Pad) -> S;
@@ -783,3 +869,15 @@ lowercase([H|T]) ->
[H|lowercase(T)];
lowercase([]) ->
[].
+
+%% Make sure T does change sign.
+sub(T, _) when T < 0 -> T;
+sub(T, E) when T >= E -> T - E;
+sub(_, _) -> 0.
+
+get_option(Key, TupleList, Default) ->
+ case lists:keyfind(Key, 1, TupleList) of
+ false -> Default;
+ {Key, Value} -> Value;
+ _ -> Default
+ end.
diff --git a/lib/stdlib/src/io_lib_pretty.erl b/lib/stdlib/src/io_lib_pretty.erl
index 89e1931d2d..3d5a979b3e 100644
--- a/lib/stdlib/src/io_lib_pretty.erl
+++ b/lib/stdlib/src/io_lib_pretty.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1996-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1996-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -26,6 +26,9 @@
-export([print/1,print/2,print/3,print/4,print/5,print/6]).
+%% To be used by io_lib only.
+-export([intermediate/6, write/1]).
+
%%%
%%% Exported functions
%%%
@@ -45,20 +48,23 @@ print(Term) ->
%% Used by the shell for printing records and for Unicode.
-type rec_print_fun() :: fun((Tag :: atom(), NFields :: non_neg_integer()) ->
- no | [FieldName :: atom()]).
+ 'no' | [FieldName :: atom()]).
-type column() :: integer().
+-type encoding() :: epp:source_encoding() | 'unicode'.
-type line_length() :: pos_integer().
-type depth() :: integer().
--type max_chars() :: integer().
+-type line_max_chars() :: integer().
+-type chars_limit() :: integer().
-type chars() :: io_lib:chars().
--type option() :: {column, column()}
- | {line_length, line_length()}
- | {depth, depth()}
- | {max_chars, max_chars()}
- | {record_print_fun, rec_print_fun()}
- | {strings, boolean()}
- | {encoding, latin1 | utf8 | unicode}.
+-type option() :: {'chars_limit', chars_limit()}
+ | {'column', column()}
+ | {'depth', depth()}
+ | {'encoding', encoding()}
+ | {'line_length', line_length()}
+ | {'line_max_chars', line_max_chars()}
+ | {'record_print_fun', rec_print_fun()}
+ | {'strings', boolean()}.
-type options() :: [option()].
-spec print(term(), rec_print_fun()) -> chars();
@@ -68,11 +74,12 @@ print(Term, Options) when is_list(Options) ->
Col = get_option(column, Options, 1),
Ll = get_option(line_length, Options, 80),
D = get_option(depth, Options, -1),
- M = get_option(max_chars, Options, -1),
+ M = get_option(line_max_chars, Options, -1),
+ T = get_option(chars_limit, Options, -1),
RecDefFun = get_option(record_print_fun, Options, no_fun),
Encoding = get_option(encoding, Options, epp:default_encoding()),
Strings = get_option(strings, Options, true),
- print(Term, Col, Ll, D, M, RecDefFun, Encoding, Strings);
+ print(Term, Col, Ll, D, M, T, RecDefFun, Encoding, Strings);
print(Term, RecDefFun) ->
print(Term, -1, RecDefFun).
@@ -84,35 +91,43 @@ print(Term, Depth, RecDefFun) ->
-spec print(term(), column(), line_length(), depth()) -> chars().
print(Term, Col, Ll, D) ->
- print(Term, Col, Ll, D, _M=-1, no_fun, latin1, true).
+ print(Term, Col, Ll, D, _M=-1, _T=-1, no_fun, latin1, true).
-spec print(term(), column(), line_length(), depth(), rec_print_fun()) ->
chars().
print(Term, Col, Ll, D, RecDefFun) ->
print(Term, Col, Ll, D, _M=-1, RecDefFun).
--spec print(term(), column(), line_length(), depth(), max_chars(),
+-spec print(term(), column(), line_length(), depth(), line_max_chars(),
rec_print_fun()) -> chars().
print(Term, Col, Ll, D, M, RecDefFun) ->
- print(Term, Col, Ll, D, M, RecDefFun, latin1, true).
+ print(Term, Col, Ll, D, M, _T=-1, RecDefFun, latin1, true).
%% D = Depth, default -1 (infinite), or LINEMAX=30 when printing from shell
+%% T = chars_limit, that is, maximal number of characters, default -1
+%% Used together with D to limit the output. It is possible that
+%% more than T characters are returned.
%% Col = current column, default 1
%% Ll = line length/~p field width, default 80
%% M = CHAR_MAX (-1 if no max, 60 when printing from shell)
-print(_, _, _, 0, _M, _RF, _Enc, _Str) -> "...";
-print(Term, Col, Ll, D, M, RecDefFun, Enc, Str) when Col =< 0 ->
+print(_, _, _, 0, _M, _T, _RF, _Enc, _Str) -> "...";
+print(_, _, _, _D, _M, 0, _RF, _Enc, _Str) -> "...";
+print(Term, Col, Ll, D, M, T, RecDefFun, Enc, Str) when Col =< 0 ->
%% ensure Col is at least 1
- print(Term, 1, Ll, D, M, RecDefFun, Enc, Str);
-print(Atom, _Col, _Ll, _D, _M, _RF, Enc, _Str) when is_atom(Atom) ->
+ print(Term, 1, Ll, D, M, T, RecDefFun, Enc, Str);
+print(Atom, _Col, _Ll, _D, _M, _T, _RF, Enc, _Str) when is_atom(Atom) ->
write_atom(Atom, Enc);
-print(Term, Col, Ll, D, M0, RecDefFun, Enc, Str) when is_tuple(Term);
- is_list(Term);
- is_map(Term);
- is_bitstring(Term) ->
+print(Term, Col, Ll, D, M0, T, RecDefFun, Enc, Str) when is_tuple(Term);
+ is_list(Term);
+ is_map(Term);
+ is_bitstring(Term) ->
%% preprocess and compute total number of chars
- If = {_S, Len} = print_length(Term, D, RecDefFun, Enc, Str),
+ {_, Len, _Dots, _} = If =
+ case T < 0 of
+ true -> print_length(Term, D, T, RecDefFun, Enc, Str);
+ false -> intermediate(Term, D, T, RecDefFun, Enc, Str)
+ end,
%% use Len as CHAR_MAX if M0 = -1
M = max_cs(M0, Len),
if
@@ -126,7 +141,7 @@ print(Term, Col, Ll, D, M0, RecDefFun, Enc, Str) when is_tuple(Term);
1),
pp(If, Col, Ll, M, TInd, indent(Col), 0, 0)
end;
-print(Term, _Col, _Ll, _D, _M, _RF, _Enc, _Str) ->
+print(Term, _Col, _Ll, _D, _M, _T, _RF, _Enc, _Str) ->
%% atomic data types (bignums, atoms, ...) are never truncated
io_lib:write(Term).
@@ -147,28 +162,28 @@ max_cs(M, _Len) ->
?ATM(element(3, element(1, Pair)))). % Value
-define(ATM_FLD(Field), ?ATM(element(4, element(1, Field)))).
-pp({_S, Len} = If, Col, Ll, M, _TInd, _Ind, LD, W)
+pp({_S,Len,_,_} = If, Col, Ll, M, _TInd, _Ind, LD, W)
when Len < Ll - Col - LD, Len + W + LD =< M ->
write(If);
-pp({{list,L}, _Len}, Col, Ll, M, TInd, Ind, LD, W) ->
+pp({{list,L}, _Len, _, _}, Col, Ll, M, TInd, Ind, LD, W) ->
[$[, pp_list(L, Col + 1, Ll, M, TInd, indent(1, Ind), LD, $|, W + 1), $]];
-pp({{tuple,true,L}, _Len}, Col, Ll, M, TInd, Ind, LD, W) ->
+pp({{tuple,true,L}, _Len, _, _}, Col, Ll, M, TInd, Ind, LD, W) ->
[${, pp_tag_tuple(L, Col, Ll, M, TInd, Ind, LD, W + 1), $}];
-pp({{tuple,false,L}, _Len}, Col, Ll, M, TInd, Ind, LD, W) ->
+pp({{tuple,false,L}, _Len, _, _}, Col, Ll, M, TInd, Ind, LD, W) ->
[${, pp_list(L, Col + 1, Ll, M, TInd, indent(1, Ind), LD, $,, W + 1), $}];
-pp({{map,Pairs},_Len}, Col, Ll, M, TInd, Ind, LD, W) ->
+pp({{map,Pairs}, _Len, _, _}, Col, Ll, M, TInd, Ind, LD, W) ->
[$#, ${, pp_map(Pairs, Col + 2, Ll, M, TInd, indent(2, Ind), LD, W + 1),
$}];
-pp({{record,[{Name,NLen} | L]}, _Len}, Col, Ll, M, TInd, Ind, LD, W) ->
+pp({{record,[{Name,NLen} | L]}, _Len, _, _}, Col, Ll, M, TInd, Ind, LD, W) ->
[Name, ${, pp_record(L, NLen, Col, Ll, M, TInd, Ind, LD, W + NLen+1), $}];
-pp({{bin,S}, _Len}, Col, Ll, M, _TInd, Ind, LD, W) ->
+pp({{bin,S}, _Len, _, _}, Col, Ll, M, _TInd, Ind, LD, W) ->
pp_binary(S, Col + 2, Ll, M, indent(2, Ind), LD, W);
-pp({S, _Len}, _Col, _Ll, _M, _TInd, _Ind, _LD, _W) ->
+pp({S,_Len,_,_}, _Col, _Ll, _M, _TInd, _Ind, _LD, _W) ->
S.
%% Print a tagged tuple by indenting the rest of the elements
%% differently to the tag. Tuple has size >= 2.
-pp_tag_tuple([{Tag,Tlen} | L], Col, Ll, M, TInd, Ind, LD, W) ->
+pp_tag_tuple([{Tag,Tlen,_,_} | L], Col, Ll, M, TInd, Ind, LD, W) ->
%% this uses TInd
TagInd = Tlen + 2,
Tcol = Col + TagInd,
@@ -184,18 +199,18 @@ pp_tag_tuple([{Tag,Tlen} | L], Col, Ll, M, TInd, Ind, LD, W) ->
end.
pp_map([], _Col, _Ll, _M, _TInd, _Ind, _LD, _W) ->
- "";
-pp_map({dots, _}, _Col, _Ll, _M, _TInd, _Ind, _LD, _W) ->
- "...";
+ ""; % cannot happen
+pp_map({dots, _, _, _}, _Col, _Ll, _M, _TInd, _Ind, _LD, _W) ->
+ "..."; % cannot happen
pp_map([P | Ps], Col, Ll, M, TInd, Ind, LD, W) ->
{PS, PW} = pp_pair(P, Col, Ll, M, TInd, Ind, last_depth(Ps, LD), W),
[PS | pp_pairs_tail(Ps, Col, Col + PW, Ll, M, TInd, Ind, LD, PW)].
pp_pairs_tail([], _Col0, _Col, _Ll, _M, _TInd, _Ind, _LD, _W) ->
"";
-pp_pairs_tail({dots, _}, _Col0, _Col, _M, _Ll, _TInd, _Ind, _LD, _W) ->
+pp_pairs_tail({dots, _, _, _}, _Col0, _Col, _M, _Ll, _TInd, _Ind, _LD, _W) ->
",...";
-pp_pairs_tail([{_, Len}=P | Ps], Col0, Col, Ll, M, TInd, Ind, LD, W) ->
+pp_pairs_tail([{_, Len, _, _}=P | Ps], Col0, Col, Ll, M, TInd, Ind, LD, W) ->
LD1 = last_depth(Ps, LD),
ELen = 1 + Len,
if
@@ -209,7 +224,7 @@ pp_pairs_tail([{_, Len}=P | Ps], Col0, Col, Ll, M, TInd, Ind, LD, W) ->
pp_pairs_tail(Ps, Col0, Col0 + PW, Ll, M, TInd, Ind, LD, PW)]
end.
-pp_pair({_, Len}=Pair, Col, Ll, M, _TInd, _Ind, LD, W)
+pp_pair({_, Len, _, _}=Pair, Col, Ll, M, _TInd, _Ind, LD, W)
when Len < Ll - Col - LD, Len + W + LD =< M ->
{write_pair(Pair), if
?ATM_PAIR(Pair) ->
@@ -217,7 +232,7 @@ pp_pair({_, Len}=Pair, Col, Ll, M, _TInd, _Ind, LD, W)
true ->
Ll % force nl
end};
-pp_pair({{map_pair, K, V}, _Len}, Col0, Ll, M, TInd, Ind0, LD, W) ->
+pp_pair({{map_pair, K, V}, _Len, _, _}, Col0, Ll, M, TInd, Ind0, LD, W) ->
I = map_value_indent(TInd),
Ind = indent(I, Ind0),
{[pp(K, Col0, Ll, M, TInd, Ind0, LD, W), " =>\n",
@@ -225,7 +240,7 @@ pp_pair({{map_pair, K, V}, _Len}, Col0, Ll, M, TInd, Ind0, LD, W) ->
pp_record([], _Nlen, _Col, _Ll, _M, _TInd, _Ind, _LD, _W) ->
"";
-pp_record({dots, _}, _Nlen, _Col, _Ll, _M, _TInd, _Ind, _LD, _W) ->
+pp_record({dots, _, _, _}, _Nlen, _Col, _Ll, _M, _TInd, _Ind, _LD, _W) ->
"...";
pp_record([F | Fs], Nlen, Col0, Ll, M, TInd, Ind0, LD, W0) ->
Nind = Nlen + 1,
@@ -235,9 +250,9 @@ pp_record([F | Fs], Nlen, Col0, Ll, M, TInd, Ind0, LD, W0) ->
pp_fields_tail([], _Col0, _Col, _Ll, _M, _TInd, _Ind, _LD, _W) ->
"";
-pp_fields_tail({dots, _}, _Col0, _Col, _M, _Ll, _TInd, _Ind, _LD, _W) ->
+pp_fields_tail({dots, _, _ ,_}, _Col0, _Col, _M, _Ll, _TInd, _Ind, _LD, _W) ->
",...";
-pp_fields_tail([{_, Len}=F | Fs], Col0, Col, Ll, M, TInd, Ind, LD, W) ->
+pp_fields_tail([{_, Len, _, _}=F | Fs], Col0, Col, Ll, M, TInd, Ind, LD, W) ->
LD1 = last_depth(Fs, LD),
ELen = 1 + Len,
if
@@ -251,7 +266,7 @@ pp_fields_tail([{_, Len}=F | Fs], Col0, Col, Ll, M, TInd, Ind, LD, W) ->
pp_fields_tail(Fs, Col0, Col0 + FW, Ll, M, TInd, Ind, LD, FW)]
end.
-pp_field({_, Len}=Fl, Col, Ll, M, _TInd, _Ind, LD, W)
+pp_field({_, Len, _, _}=Fl, Col, Ll, M, _TInd, _Ind, LD, W)
when Len < Ll - Col - LD, Len + W + LD =< M ->
{write_field(Fl), if
?ATM_FLD(Fl) ->
@@ -259,7 +274,7 @@ pp_field({_, Len}=Fl, Col, Ll, M, _TInd, _Ind, LD, W)
true ->
Ll % force nl
end};
-pp_field({{field, Name, NameL, F}, _Len}, Col0, Ll, M, TInd, Ind0, LD, W0) ->
+pp_field({{field, Name, NameL, F},_,_, _}, Col0, Ll, M, TInd, Ind0, LD, W0) ->
{Col, Ind, S, W} = rec_indent(NameL, TInd, Col0, Ind0, W0 + NameL),
Sep = case S of
[$\n | _] -> " =";
@@ -286,15 +301,15 @@ rec_indent(RInd, TInd, Col0, Ind0, W0) ->
end,
{Col, Ind, S, W}.
-pp_list({dots, _}, _Col0, _Ll, _M, _TInd, _Ind, _LD, _S, _W) ->
+pp_list({dots, _, _, _}, _Col0, _Ll, _M, _TInd, _Ind, _LD, _S, _W) ->
"...";
pp_list([E | Es], Col0, Ll, M, TInd, Ind, LD, S, W) ->
{ES, WE} = pp_element(E, Col0, Ll, M, TInd, Ind, last_depth(Es, LD), W),
[ES | pp_tail(Es, Col0, Col0 + WE, Ll, M, TInd, Ind, LD, S, W + WE)].
pp_tail([], _Col0, _Col, _Ll, _M, _TInd, _Ind, _LD, _S, _W) ->
- "";
-pp_tail([{_, Len}=E | Es], Col0, Col, Ll, M, TInd, Ind, LD, S, W) ->
+ [];
+pp_tail([{_, Len, _, _}=E | Es], Col0, Col, Ll, M, TInd, Ind, LD, S, W) ->
LD1 = last_depth(Es, LD),
ELen = 1 + Len,
if
@@ -307,9 +322,9 @@ pp_tail([{_, Len}=E | Es], Col0, Col, Ll, M, TInd, Ind, LD, S, W) ->
[$,, $\n, Ind, ES |
pp_tail(Es, Col0, Col0 + WE, Ll, M, TInd, Ind, LD, S, WE)]
end;
-pp_tail({dots, _}, _Col0, _Col, _Ll, _M, _TInd, _Ind, _LD, S, _W) ->
+pp_tail({dots, _, _, _}, _Col0, _Col, _Ll, _M, _TInd, _Ind, _LD, S, _W) ->
[S | "..."];
-pp_tail({_, Len}=E, _Col0, Col, Ll, M, _TInd, _Ind, LD, S, W)
+pp_tail({_, Len, _, _}=E, _Col0, Col, Ll, M, _TInd, _Ind, LD, S, W)
when Len + 1 < Ll - Col - (LD + 1),
Len + 1 + W + (LD + 1) =< M,
?ATM(E) ->
@@ -317,7 +332,7 @@ pp_tail({_, Len}=E, _Col0, Col, Ll, M, _TInd, _Ind, LD, S, W)
pp_tail(E, Col0, _Col, Ll, M, TInd, Ind, LD, S, _W) ->
[S, $\n, Ind | pp(E, Col0, Ll, M, TInd, Ind, LD + 1, 0)].
-pp_element({_, Len}=E, Col, Ll, M, _TInd, _Ind, LD, W)
+pp_element({_, Len, _, _}=E, Col, Ll, M, _TInd, _Ind, LD, W)
when Len < Ll - Col - LD, Len + W + LD =< M, ?ATM(E) ->
{write(E), Len};
pp_element(E, Col, Ll, M, TInd, Ind, LD, W) ->
@@ -348,42 +363,42 @@ pp_binary(S, N, _N0, Ind) ->
end.
%% write the whole thing on a single line
-write({{tuple, _IsTagged, L}, _}) ->
+write({{tuple, _IsTagged, L}, _, _, _}) ->
[${, write_list(L, $,), $}];
-write({{list, L}, _}) ->
+write({{list, L}, _, _, _}) ->
[$[, write_list(L, $|), $]];
-write({{map, Pairs}, _}) ->
+write({{map, Pairs}, _, _, _}) ->
[$#,${, write_list(Pairs, $,), $}];
-write({{map_pair, _K, _V}, _}=Pair) ->
+write({{map_pair, _K, _V}, _, _, _}=Pair) ->
write_pair(Pair);
-write({{record, [{Name,_} | L]}, _}) ->
+write({{record, [{Name,_} | L]}, _, _, _}) ->
[Name, ${, write_fields(L), $}];
-write({{bin, S}, _}) ->
+write({{bin, S}, _, _, _}) ->
S;
-write({S, _}) ->
+write({S, _, _, _}) ->
S.
-write_pair({{map_pair, K, V}, _}) ->
+write_pair({{map_pair, K, V}, _, _, _}) ->
[write(K), " => ", write(V)].
write_fields([]) ->
"";
-write_fields({dots, _}) ->
+write_fields({dots, _, _, _}) ->
"...";
write_fields([F | Fs]) ->
[write_field(F) | write_fields_tail(Fs)].
write_fields_tail([]) ->
"";
-write_fields_tail({dots, _}) ->
+write_fields_tail({dots, _, _, _}) ->
",...";
write_fields_tail([F | Fs]) ->
[$,, write_field(F) | write_fields_tail(Fs)].
-write_field({{field, Name, _NameL, F}, _}) ->
+write_field({{field, Name, _NameL, F}, _, _, _}) ->
[Name, " = " | write(F)].
-write_list({dots, _}, _S) ->
+write_list({dots, _, _, _}, _S) ->
"...";
write_list([E | Es], S) ->
[write(E) | write_tail(Es, S)].
@@ -392,192 +407,359 @@ write_tail([], _S) ->
[];
write_tail([E | Es], S) ->
[$,, write(E) | write_tail(Es, S)];
-write_tail({dots, _}, S) ->
+write_tail({dots, _, _, _}, S) ->
[S | "..."];
write_tail(E, S) ->
[S | write(E)].
+-type more() :: fun((chars_limit(), DeltaDepth :: non_neg_integer()) ->
+ intermediate_format()).
+
+-type if_list() :: maybe_improper_list(intermediate_format(),
+ {'dots', non_neg_integer(),
+ non_neg_integer(), more()}).
+
+-type intermediate_format() ::
+ {chars()
+ | {'bin', chars()}
+ | 'dots'
+ | {'field', Name :: chars(), NameLen :: non_neg_integer(),
+ intermediate_format()}
+ | {'list', if_list()}
+ | {'map', if_list()}
+ | {'map_pair', K :: intermediate_format(),
+ V :: intermediate_format()}
+ | {'record', [{Name :: chars(), NameLen :: non_neg_integer()}
+ | if_list()]}
+ | {'tuple', IsTagged :: boolean(), if_list()},
+ Len :: non_neg_integer(),
+ NumOfDots :: non_neg_integer(),
+ More :: more() | 'no_more'
+ }.
+
+-spec intermediate(term(), depth(), pos_integer(), rec_print_fun(),
+ encoding(), boolean()) -> intermediate_format().
+
+intermediate(Term, D, T, RF, Enc, Str) when T > 0 ->
+ D0 = 1,
+ If = print_length(Term, D0, T, RF, Enc, Str),
+ case If of
+ {_, Len, Dots, _} when Dots =:= 0; Len > T; D =:= 1 ->
+ If;
+ _ ->
+ find_upper(If, Term, T, D0, 2, D, RF, Enc, Str)
+ end.
+
+find_upper(Lower, Term, T, Dl, Dd, D, RF, Enc, Str) ->
+ Dd2 = Dd * 2,
+ D1 = case D < 0 of
+ true -> Dl + Dd2;
+ false -> min(Dl + Dd2, D)
+ end,
+ If = expand(Lower, T, D1 - Dl),
+ case If of
+ {_, _, _Dots=0, _} -> % even if Len > T
+ If;
+ {_, Len, _, _} when Len =< T, D1 < D orelse D < 0 ->
+ find_upper(If, Term, T, D1, Dd2, D, RF, Enc, Str);
+ _ ->
+ search_depth(Lower, If, Term, T, Dl, D1, RF, Enc, Str)
+ end.
+
+%% Lower has NumOfDots > 0 and Len =< T.
+%% Upper has NumOfDots > 0 and Len > T.
+search_depth(Lower, Upper, _Term, T, Dl, Du, _RF, _Enc, _Str)
+ when Du - Dl =:= 1 ->
+ %% The returned intermediate format has Len >= T.
+ case Lower of
+ {_, T, _, _} ->
+ Lower;
+ _ ->
+ Upper
+ end;
+search_depth(Lower, Upper, Term, T, Dl, Du, RF, Enc, Str) ->
+ D1 = (Dl + Du) div 2,
+ If = expand(Lower, T, D1 - Dl),
+ case If of
+ {_, Len, _, _} when Len > T ->
+ %% Len can be greater than Upper's length.
+ %% This is a bit expensive since the work to
+ %% crate Upper is wasted. It is the price
+ %% to pay to get a more balanced output.
+ search_depth(Lower, If, Term, T, Dl, D1, RF, Enc, Str);
+ _ ->
+ search_depth(If, Upper, Term, T, D1, Du, RF, Enc, Str)
+ end.
+
%% The depth (D) is used for extracting and counting the characters to
%% print. The structure is kept so that the returned intermediate
%% format can be formatted. The separators (list, tuple, record, map) are
%% counted but need to be added later.
%% D =/= 0
-print_length([], _D, _RF, _Enc, _Str) ->
- {"[]", 2};
-print_length({}, _D, _RF, _Enc, _Str) ->
- {"{}", 2};
-print_length(#{}=M, _D, _RF, _Enc, _Str) when map_size(M) =:= 0 ->
- {"#{}", 3};
-print_length(Atom, _D, _RF, Enc, _Str) when is_atom(Atom) ->
+print_length([], _D, _T, _RF, _Enc, _Str) ->
+ {"[]", 2, 0, no_more};
+print_length({}, _D, _T, _RF, _Enc, _Str) ->
+ {"{}", 2, 0, no_more};
+print_length(#{}=M, _D, _T, _RF, _Enc, _Str) when map_size(M) =:= 0 ->
+ {"#{}", 3, 0, no_more};
+print_length(Atom, _D, _T, _RF, Enc, _Str) when is_atom(Atom) ->
S = write_atom(Atom, Enc),
- {S, lists:flatlength(S)};
-print_length(List, D, RF, Enc, Str) when is_list(List) ->
+ {S, string:length(S), 0, no_more};
+print_length(List, D, T, RF, Enc, Str) when is_list(List) ->
%% only flat lists are "printable"
- case Str andalso printable_list(List, D, Enc) of
+ case Str andalso printable_list(List, D, T, Enc) of
true ->
%% print as string, escaping double-quotes in the list
S = write_string(List, Enc),
- {S, length(S)};
- %% Truncated lists could break some existing code.
- % {true, Prefix} ->
- % S = write_string(Prefix, Enc),
- % {[S | "..."], 3 + length(S)};
+ {S, string:length(S), 0, no_more};
+ {true, Prefix} ->
+ %% Truncated lists when T < 0 could break some existing code.
+ S = write_string(Prefix, Enc),
+ %% NumOfDots = 0 to avoid looping--increasing the depth
+ %% does not make Prefix longer.
+ {[S | "..."], 3 + string:length(S), 0, no_more};
false ->
- print_length_list(List, D, RF, Enc, Str)
+ case print_length_list(List, D, T, RF, Enc, Str) of
+ {What, Len, Dots, _More} when Dots > 0 ->
+ More = fun(T1, Dd) ->
+ ?FUNCTION_NAME(List, D+Dd, T1, RF, Enc, Str)
+ end,
+ {What, Len, Dots, More};
+ If ->
+ If
+ end
end;
-print_length(Fun, _D, _RF, _Enc, _Str) when is_function(Fun) ->
+print_length(Fun, _D, _T, _RF, _Enc, _Str) when is_function(Fun) ->
S = io_lib:write(Fun),
- {S, iolist_size(S)};
-print_length(R, D, RF, Enc, Str) when is_atom(element(1, R)),
- is_function(RF) ->
+ {S, iolist_size(S), 0, no_more};
+print_length(R, D, T, RF, Enc, Str) when is_atom(element(1, R)),
+ is_function(RF) ->
case RF(element(1, R), tuple_size(R) - 1) of
no ->
- print_length_tuple(R, D, RF, Enc, Str);
+ print_length_tuple(R, D, T, RF, Enc, Str);
RDefs ->
- print_length_record(R, D, RF, RDefs, Enc, Str)
+ print_length_record(R, D, T, RF, RDefs, Enc, Str)
end;
-print_length(Tuple, D, RF, Enc, Str) when is_tuple(Tuple) ->
- print_length_tuple(Tuple, D, RF, Enc, Str);
-print_length(Map, D, RF, Enc, Str) when is_map(Map) ->
- print_length_map(Map, D, RF, Enc, Str);
-print_length(<<>>, _D, _RF, _Enc, _Str) ->
- {"<<>>", 4};
-print_length(<<_/bitstring>>, 1, _RF, _Enc, _Str) ->
- {"<<...>>", 7};
-print_length(<<_/bitstring>>=Bin, D, _RF, Enc, Str) ->
- case bit_size(Bin) rem 8 of
- 0 ->
- D1 = D - 1,
- case Str andalso printable_bin(Bin, D1, Enc) of
- {true, List} when is_list(List) ->
- S = io_lib:write_string(List, $"), %"
- {[$<,$<,S,$>,$>], 4 + length(S)};
- {false, List} when is_list(List) ->
- S = io_lib:write_string(List, $"), %"
- {[$<,$<,S,"/utf8>>"], 9 + length(S)};
- {true, true, Prefix} ->
- S = io_lib:write_string(Prefix, $"), %"
- {[$<,$<, S | "...>>"], 7 + length(S)};
- {false, true, Prefix} ->
- S = io_lib:write_string(Prefix, $"), %"
- {[$<,$<, S | "/utf8...>>"], 12 + length(S)};
- false ->
- S = io_lib:write(Bin, D),
- {{bin,S}, iolist_size(S)}
- end;
- _ ->
- S = io_lib:write(Bin, D),
- {{bin,S}, iolist_size(S)}
+print_length(Tuple, D, T, RF, Enc, Str) when is_tuple(Tuple) ->
+ print_length_tuple(Tuple, D, T, RF, Enc, Str);
+print_length(Map, D, T, RF, Enc, Str) when is_map(Map) ->
+ print_length_map(Map, D, T, RF, Enc, Str);
+print_length(<<>>, _D, _T, _RF, _Enc, _Str) ->
+ {"<<>>", 4, 0, no_more};
+print_length(<<_/bitstring>> = Bin, 1, _T, RF, Enc, Str) ->
+ More = fun(T1, Dd) -> ?FUNCTION_NAME(Bin, 1+Dd, T1, RF, Enc, Str) end,
+ {"<<...>>", 7, 3, More};
+print_length(<<_/bitstring>> = Bin, D, T, RF, Enc, Str) ->
+ D1 = D - 1,
+ case
+ Str andalso
+ (bit_size(Bin) rem 8) =:= 0 andalso
+ printable_bin0(Bin, D1, tsub(T, 6), Enc)
+ of
+ {true, List} when is_list(List) ->
+ S = io_lib:write_string(List, $"), %"
+ {[$<,$<,S,$>,$>], 4 + length(S), 0, no_more};
+ {false, List} when is_list(List) ->
+ S = io_lib:write_string(List, $"), %"
+ {[$<,$<,S,"/utf8>>"], 9 + string:length(S), 0, no_more};
+ {true, true, Prefix} ->
+ S = io_lib:write_string(Prefix, $"), %"
+ More = fun(T1, Dd) ->
+ ?FUNCTION_NAME(Bin, D+Dd, T1, RF, Enc, Str)
+ end,
+ {[$<,$<,S|"...>>"], 7 + length(S), 3, More};
+ {false, true, Prefix} ->
+ S = io_lib:write_string(Prefix, $"), %"
+ More = fun(T1, Dd) ->
+ ?FUNCTION_NAME(Bin, D+Dd, T1, RF, Enc, Str)
+ end,
+ {[$<,$<,S|"/utf8...>>"], 12 + string:length(S), 3, More};
+ false ->
+ case io_lib:write_binary(Bin, D, T) of
+ {S, <<>>} ->
+ {{bin, S}, iolist_size(S), 0, no_more};
+ {S, _Rest} ->
+ More = fun(T1, Dd) ->
+ ?FUNCTION_NAME(Bin, D+Dd, T1, RF, Enc, Str)
+ end,
+ {{bin, S}, iolist_size(S), 3, More}
+ end
end;
-print_length(Term, _D, _RF, _Enc, _Str) ->
+print_length(Term, _D, _T, _RF, _Enc, _Str) ->
S = io_lib:write(Term),
%% S can contain unicode, so iolist_size(S) cannot be used here
- {S, string:length(S)}.
-
-print_length_map(_Map, 1, _RF, _Enc, _Str) ->
- {"#{...}", 6};
-print_length_map(Map, D, RF, Enc, Str) when is_map(Map) ->
- Pairs = print_length_map_pairs(limit_map(maps:iterator(Map), D, []), D, RF, Enc, Str),
- {{map, Pairs}, list_length(Pairs, 3)}.
-
-limit_map(_I, 0, Acc) ->
- Acc;
-limit_map(I, D, Acc) ->
- case maps:next(I) of
- {K, V, NextI} ->
- limit_map(NextI, D-1, [{K,V} | Acc]);
- none ->
- Acc
- end.
-
-print_length_map_pairs([], _D, _RF, _Enc, _Str) ->
+ {S, string:length(S), 0, no_more}.
+
+print_length_map(Map, 1, _T, RF, Enc, Str) ->
+ More = fun(T1, Dd) -> ?FUNCTION_NAME(Map, 1+Dd, T1, RF, Enc, Str) end,
+ {"#{...}", 6, 3, More};
+print_length_map(Map, D, T, RF, Enc, Str) when is_map(Map) ->
+ Next = maps:next(maps:iterator(Map)),
+ PairsS = print_length_map_pairs(Next, D, D - 1, tsub(T, 3), RF, Enc, Str),
+ {Len, Dots} = list_length(PairsS, 3, 0),
+ {{map, PairsS}, Len, Dots, no_more}.
+
+print_length_map_pairs(none, _D, _D0, _T, _RF, _Enc, _Str) ->
[];
-print_length_map_pairs(_Pairs, 1, _RF, _Enc, _Str) ->
- {dots, 3};
-print_length_map_pairs([{K, V} | Pairs], D, RF, Enc, Str) ->
- [print_length_map_pair(K, V, D - 1, RF, Enc, Str) |
- print_length_map_pairs(Pairs, D - 1, RF, Enc, Str)].
-
-print_length_map_pair(K, V, D, RF, Enc, Str) ->
- {KS, KL} = print_length(K, D, RF, Enc, Str),
- {VS, VL} = print_length(V, D, RF, Enc, Str),
+print_length_map_pairs(Term, D, D0, T, RF, Enc, Str) when D =:= 1; T =:= 0->
+ More = fun(T1, Dd) ->
+ ?FUNCTION_NAME(Term, D+Dd, D0, T1, RF, Enc, Str)
+ end,
+ {dots, 3, 3, More};
+print_length_map_pairs({K, V, Iter}, D, D0, T, RF, Enc, Str) ->
+ Pair1 = print_length_map_pair(K, V, D0, tsub(T, 1), RF, Enc, Str),
+ {_, Len1, _, _} = Pair1,
+ Next = maps:next(Iter),
+ [Pair1 |
+ print_length_map_pairs(Next, D - 1, D0, tsub(T, Len1+1), RF, Enc, Str)].
+
+print_length_map_pair(K, V, D, T, RF, Enc, Str) ->
+ {_, KL, KD, _} = P1 = print_length(K, D, T, RF, Enc, Str),
KL1 = KL + 4,
- {{map_pair, {KS, KL1}, {VS, VL}}, KL1 + VL}.
-
-print_length_tuple(_Tuple, 1, _RF, _Enc, _Str) ->
- {"{...}", 5};
-print_length_tuple(Tuple, D, RF, Enc, Str) ->
- L = print_length_list1(tuple_to_list(Tuple), D, RF, Enc, Str),
+ {_, VL, VD, _} = P2 = print_length(V, D, tsub(T, KL1), RF, Enc, Str),
+ {{map_pair, P1, P2}, KL1 + VL, KD + VD, no_more}.
+
+print_length_tuple(Tuple, 1, _T, RF, Enc, Str) ->
+ More = fun(T1, Dd) -> ?FUNCTION_NAME(Tuple, 1+Dd, T1, RF, Enc, Str) end,
+ {"{...}", 5, 3, More};
+print_length_tuple(Tuple, D, T, RF, Enc, Str) ->
+ L = print_length_tuple1(Tuple, 1, D, tsub(T, 2), RF, Enc, Str),
IsTagged = is_atom(element(1, Tuple)) and (tuple_size(Tuple) > 1),
- {{tuple,IsTagged,L}, list_length(L, 2)}.
+ {Len, Dots} = list_length(L, 2, 0),
+ {{tuple,IsTagged,L}, Len, Dots, no_more}.
-print_length_record(_Tuple, 1, _RF, _RDefs, _Enc, _Str) ->
- {"{...}", 5};
-print_length_record(Tuple, D, RF, RDefs, Enc, Str) ->
+print_length_tuple1(Tuple, I, _D, _T, _RF, _Enc, _Str)
+ when I > tuple_size(Tuple) ->
+ [];
+print_length_tuple1(Tuple, I, D, T, RF, Enc, Str) when D =:= 1; T =:= 0->
+ More = fun(T1, Dd) -> ?FUNCTION_NAME(Tuple, I, D+Dd, T1, RF, Enc, Str) end,
+ {dots, 3, 3, More};
+print_length_tuple1(Tuple, I, D, T, RF, Enc, Str) ->
+ E = element(I, Tuple),
+ T1 = tsub(T, 1),
+ {_, Len1, _, _} = Elem1 = print_length(E, D - 1, T1, RF, Enc, Str),
+ T2 = tsub(T1, Len1),
+ [Elem1 | print_length_tuple1(Tuple, I + 1, D - 1, T2, RF, Enc, Str)].
+
+print_length_record(Tuple, 1, _T, RF, RDefs, Enc, Str) ->
+ More = fun(T1, Dd) ->
+ ?FUNCTION_NAME(Tuple, 1+Dd, T1, RF, RDefs, Enc, Str)
+ end,
+ {"{...}", 5, 3, More};
+print_length_record(Tuple, D, T, RF, RDefs, Enc, Str) ->
Name = [$# | write_atom(element(1, Tuple), Enc)],
- NameL = length(Name),
- Elements = tl(tuple_to_list(Tuple)),
- L = print_length_fields(RDefs, D - 1, Elements, RF, Enc, Str),
- {{record, [{Name,NameL} | L]}, list_length(L, NameL + 2)}.
-
-print_length_fields([], _D, [], _RF, _Enc, _Str) ->
+ NameL = string:length(Name),
+ T1 = tsub(T, NameL+2),
+ L = print_length_fields(RDefs, D - 1, T1, Tuple, 2, RF, Enc, Str),
+ {Len, Dots} = list_length(L, NameL + 2, 0),
+ {{record, [{Name,NameL} | L]}, Len, Dots, no_more}.
+
+print_length_fields([], _D, _T, Tuple, I, _RF, _Enc, _Str)
+ when I > tuple_size(Tuple) ->
[];
-print_length_fields(_, 1, _, _RF, _Enc, _Str) ->
- {dots, 3};
-print_length_fields([Def | Defs], D, [E | Es], RF, Enc, Str) ->
- [print_length_field(Def, D - 1, E, RF, Enc, Str) |
- print_length_fields(Defs, D - 1, Es, RF, Enc, Str)].
-
-print_length_field(Def, D, E, RF, Enc, Str) ->
+print_length_fields(Term, D, T, Tuple, I, RF, Enc, Str)
+ when D =:= 1; T =:= 0 ->
+ More = fun(T1, Dd) ->
+ ?FUNCTION_NAME(Term, D+Dd, T1, Tuple, I, RF, Enc, Str)
+ end,
+ {dots, 3, 3, More};
+print_length_fields([Def | Defs], D, T, Tuple, I, RF, Enc, Str) ->
+ E = element(I, Tuple),
+ T1 = tsub(T, 1),
+ Field1 = print_length_field(Def, D - 1, T1, E, RF, Enc, Str),
+ {_, Len1, _, _} = Field1,
+ T2 = tsub(T1, Len1),
+ [Field1 |
+ print_length_fields(Defs, D - 1, T2, Tuple, I + 1, RF, Enc, Str)].
+
+print_length_field(Def, D, T, E, RF, Enc, Str) ->
Name = write_atom(Def, Enc),
- {S, L} = print_length(E, D, RF, Enc, Str),
- NameL = length(Name) + 3,
- {{field, Name, NameL, {S, L}}, NameL + L}.
+ NameL = string:length(Name) + 3,
+ {_, Len, Dots, _} =
+ Field = print_length(E, D, tsub(T, NameL), RF, Enc, Str),
+ {{field, Name, NameL, Field}, NameL + Len, Dots, no_more}.
-print_length_list(List, D, RF, Enc, Str) ->
- L = print_length_list1(List, D, RF, Enc, Str),
- {{list, L}, list_length(L, 2)}.
+print_length_list(List, D, T, RF, Enc, Str) ->
+ L = print_length_list1(List, D, tsub(T, 2), RF, Enc, Str),
+ {Len, Dots} = list_length(L, 2, 0),
+ {{list, L}, Len, Dots, no_more}.
-print_length_list1([], _D, _RF, _Enc, _Str) ->
+print_length_list1([], _D, _T, _RF, _Enc, _Str) ->
[];
-print_length_list1(_, 1, _RF, _Enc, _Str) ->
- {dots, 3};
-print_length_list1([E | Es], D, RF, Enc, Str) ->
- [print_length(E, D - 1, RF, Enc, Str) |
- print_length_list1(Es, D - 1, RF, Enc, Str)];
-print_length_list1(E, D, RF, Enc, Str) ->
- print_length(E, D - 1, RF, Enc, Str).
-
-list_length([], Acc) ->
- Acc;
-list_length([{_, Len} | Es], Acc) ->
- list_length_tail(Es, Acc + Len);
-list_length({_, Len}, Acc) ->
- Acc + Len.
-
-list_length_tail([], Acc) ->
- Acc;
-list_length_tail([{_,Len} | Es], Acc) ->
- list_length_tail(Es, Acc + 1 + Len);
-list_length_tail({_, Len}, Acc) ->
- Acc + 1 + Len.
+print_length_list1(Term, D, T, RF, Enc, Str) when D =:= 1; T =:= 0->
+ More = fun(T1, Dd) -> ?FUNCTION_NAME(Term, D+Dd, T1, RF, Enc, Str) end,
+ {dots, 3, 3, More};
+print_length_list1([E | Es], D, T, RF, Enc, Str) ->
+ {_, Len1, _, _} = Elem1 = print_length(E, D - 1, tsub(T, 1), RF, Enc, Str),
+ [Elem1 | print_length_list1(Es, D - 1, tsub(T, Len1 + 1), RF, Enc, Str)];
+print_length_list1(E, D, T, RF, Enc, Str) ->
+ print_length(E, D - 1, T, RF, Enc, Str).
+
+list_length([], Acc, DotsAcc) ->
+ {Acc, DotsAcc};
+list_length([{_, Len, Dots, _} | Es], Acc, DotsAcc) ->
+ list_length_tail(Es, Acc + Len, DotsAcc + Dots);
+list_length({_, Len, Dots, _}, Acc, DotsAcc) ->
+ {Acc + Len, DotsAcc + Dots}.
+
+list_length_tail([], Acc, DotsAcc) ->
+ {Acc, DotsAcc};
+list_length_tail([{_, Len, Dots, _} | Es], Acc, DotsAcc) ->
+ list_length_tail(Es, Acc + 1 + Len, DotsAcc + Dots);
+list_length_tail({_, Len, Dots, _}, Acc, DotsAcc) ->
+ {Acc + 1 + Len, DotsAcc + Dots}.
%% ?CHARS printable characters has depth 1.
-define(CHARS, 4).
%% only flat lists are "printable"
-printable_list(_L, 1, _Enc) ->
+printable_list(_L, 1, _T, _Enc) ->
false;
-printable_list(L, _D, latin1) ->
+printable_list(L, _D, T, latin1) when T < 0 ->
io_lib:printable_latin1_list(L);
-printable_list(L, _D, _Uni) ->
+printable_list(L, _D, T, Enc) when T >= 0 ->
+ case slice(L, tsub(T, 2)) of
+ {prefix, ""} ->
+ false;
+ {prefix, Prefix} when Enc =:= latin1 ->
+ io_lib:printable_latin1_list(Prefix) andalso {true, Prefix};
+ {prefix, Prefix} ->
+ %% Probably an overestimation.
+ io_lib:printable_list(Prefix) andalso {true, Prefix};
+ all when Enc =:= latin1 ->
+ io_lib:printable_latin1_list(L);
+ all ->
+ io_lib:printable_list(L)
+ end;
+printable_list(L, _D, T, _Uni) when T < 0->
io_lib:printable_list(L).
-printable_bin(Bin, D, Enc) when D >= 0, ?CHARS * D =< byte_size(Bin) ->
- printable_bin(Bin, erlang:min(?CHARS * D, byte_size(Bin)), D, Enc);
-printable_bin(Bin, D, Enc) ->
- printable_bin(Bin, byte_size(Bin), D, Enc).
+slice(L, N) ->
+ case string:length(L) =< N of
+ true ->
+ all;
+ false ->
+ {prefix, string:slice(L, 0, N)}
+ end.
+
+printable_bin0(Bin, D, T, Enc) ->
+ Len = case D >= 0 of
+ true ->
+ %% Use byte_size() also if Enc =/= latin1.
+ DChars = erlang:min(?CHARS * D, byte_size(Bin)),
+ case T >= 0 of
+ true ->
+ erlang:min(T, DChars);
+ false ->
+ DChars
+ end;
+ false when T < 0 ->
+ byte_size(Bin);
+ false when T >= 0 -> % cannot happen
+ T
+ end,
+ printable_bin(Bin, Len, D, Enc).
printable_bin(Bin, Len, D, latin1) ->
N = erlang:min(20, Len),
@@ -689,28 +871,70 @@ write_string(S, latin1) ->
write_string(S, _Uni) ->
io_lib:write_string(S, $"). %"
+expand({_, _, _Dots=0, no_more} = If, _T, _Dd) -> If;
+%% expand({{list,L}, _Len, _, no_more}, T, Dd) ->
+%% {NL, NLen, NDots} = expand_list(L, T, Dd, 2),
+%% {{list,NL}, NLen, NDots, no_more};
+expand({{tuple,IsTagged,L}, _Len, _, no_more}, T, Dd) ->
+ {NL, NLen, NDots} = expand_list(L, T, Dd, 2),
+ {{tuple,IsTagged,NL}, NLen, NDots, no_more};
+expand({{map, Pairs}, _Len, _, no_more}, T, Dd) ->
+ {NPairs, NLen, NDots} = expand_list(Pairs, T, Dd, 3),
+ {{map, NPairs}, NLen, NDots, no_more};
+expand({{map_pair, K, V}, _Len, _, no_more}, T, Dd) ->
+ {_, KL, KD, _} = P1 = expand(K, tsub(T, 1), Dd),
+ KL1 = KL + 4,
+ {_, VL, VD, _} = P2 = expand(V, tsub(T, KL1), Dd),
+ {{map_pair, P1, P2}, KL1 + VL, KD + VD, no_more};
+expand({{record, [{Name,NameL} | L]}, _Len, _, no_more}, T, Dd) ->
+ {NL, NLen, NDots} = expand_list(L, T, Dd, NameL + 2),
+ {{record, [{Name,NameL} | NL]}, NLen, NDots, no_more};
+expand({{field, Name, NameL, Field}, _Len, _, no_more}, T, Dd) ->
+ F = {_S, L, Dots, _} = expand(Field, tsub(T, NameL), Dd),
+ {{field, Name, NameL, F}, NameL + L, Dots, no_more};
+expand({_, _, _, More}, T, Dd) ->
+ More(T, Dd).
+
+expand_list(Ifs, T, Dd, L0) ->
+ L = expand_list(Ifs, tsub(T, L0), Dd),
+ {Len, Dots} = list_length(L, L0, 0),
+ {L, Len, Dots}.
+
+expand_list([], _T, _Dd) ->
+ [];
+expand_list([If | Ifs], T, Dd) ->
+ {_, Len1, _, _} = Elem1 = expand(If, tsub(T, 1), Dd),
+ [Elem1 | expand_list(Ifs, tsub(T, Len1 + 1), Dd)];
+expand_list({_, _, _, More}, T, Dd) ->
+ More(T, Dd).
+
+%% Make sure T does not change sign.
+tsub(T, _) when T < 0 -> T;
+tsub(T, E) when T >= E -> T - E;
+tsub(_, _) -> 0.
+
%% Throw 'no_good' if the indentation exceeds half the line length
%% unless there is room for M characters on the line.
-cind({_S, Len}, Col, Ll, M, Ind, LD, W) when Len < Ll - Col - LD,
- Len + W + LD =< M ->
+cind({_S, Len, _, _}, Col, Ll, M, Ind, LD, W) when Len < Ll - Col - LD,
+ Len + W + LD =< M ->
Ind;
-cind({{list,L}, _Len}, Col, Ll, M, Ind, LD, W) ->
+cind({{list,L}, _Len, _, _}, Col, Ll, M, Ind, LD, W) ->
cind_list(L, Col + 1, Ll, M, Ind, LD, W + 1);
-cind({{tuple,true,L}, _Len}, Col, Ll, M, Ind, LD, W) ->
+cind({{tuple,true,L}, _Len, _ ,_}, Col, Ll, M, Ind, LD, W) ->
cind_tag_tuple(L, Col, Ll, M, Ind, LD, W + 1);
-cind({{tuple,false,L}, _Len}, Col, Ll, M, Ind, LD, W) ->
+cind({{tuple,false,L}, _Len, _, _}, Col, Ll, M, Ind, LD, W) ->
cind_list(L, Col + 1, Ll, M, Ind, LD, W + 1);
-cind({{map,Pairs},_Len}, Col, Ll, M, Ind, LD, W) ->
+cind({{map,Pairs}, _Len, _, _}, Col, Ll, M, Ind, LD, W) ->
cind_map(Pairs, Col + 2, Ll, M, Ind, LD, W + 2);
-cind({{record,[{_Name,NLen} | L]}, _Len}, Col, Ll, M, Ind, LD, W) ->
+cind({{record,[{_Name,NLen} | L]}, _Len, _, _}, Col, Ll, M, Ind, LD, W) ->
cind_record(L, NLen, Col, Ll, M, Ind, LD, W + NLen + 1);
-cind({{bin,_S}, _Len}, _Col, _Ll, _M, Ind, _LD, _W) ->
+cind({{bin,_S}, _Len, _, _}, _Col, _Ll, _M, Ind, _LD, _W) ->
Ind;
-cind({_S, _Len}, _Col, _Ll, _M, Ind, _LD, _W) ->
+cind({_S,_Len,_,_}, _Col, _Ll, _M, Ind, _LD, _W) ->
Ind.
-cind_tag_tuple([{_Tag,Tlen} | L], Col, Ll, M, Ind, LD, W) ->
+cind_tag_tuple([{_Tag,Tlen,_,_} | L], Col, Ll, M, Ind, LD, W) ->
TagInd = Tlen + 2,
Tcol = Col + TagInd,
if
@@ -732,9 +956,9 @@ cind_map([P | Ps], Col, Ll, M, Ind, LD, W) ->
PW = cind_pair(P, Col, Ll, M, Ind, last_depth(Ps, LD), W),
cind_pairs_tail(Ps, Col, Col + PW, Ll, M, Ind, LD, W + PW);
cind_map(_, _Col, _Ll, _M, Ind, _LD, _W) ->
- Ind.
+ Ind. % cannot happen
-cind_pairs_tail([{_, Len}=P | Ps], Col0, Col, Ll, M, Ind, LD, W) ->
+cind_pairs_tail([{_, Len, _, _} = P | Ps], Col0, Col, Ll, M, Ind, LD, W) ->
LD1 = last_depth(Ps, LD),
ELen = 1 + Len,
if
@@ -748,7 +972,7 @@ cind_pairs_tail([{_, Len}=P | Ps], Col0, Col, Ll, M, Ind, LD, W) ->
cind_pairs_tail(_, _Col0, _Col, _Ll, _M, Ind, _LD, _W) ->
Ind.
-cind_pair({{map_pair, _Key, _Value}, Len}=Pair, Col, Ll, M, _Ind, LD, W)
+cind_pair({{map_pair, _Key, _Value}, Len, _, _}=Pair, Col, Ll, M, _Ind, LD, W)
when Len < Ll - Col - LD, Len + W + LD =< M ->
if
?ATM_PAIR(Pair) ->
@@ -756,7 +980,7 @@ cind_pair({{map_pair, _Key, _Value}, Len}=Pair, Col, Ll, M, _Ind, LD, W)
true ->
Ll
end;
-cind_pair({{map_pair, K, V}, _Len}, Col0, Ll, M, Ind, LD, W0) ->
+cind_pair({{map_pair, K, V}, _Len, _, _}, Col0, Ll, M, Ind, LD, W0) ->
cind(K, Col0, Ll, M, Ind, LD, W0),
I = map_value_indent(Ind),
cind(V, Col0 + I, Ll, M, Ind, LD, 0),
@@ -778,7 +1002,7 @@ cind_record([F | Fs], Nlen, Col0, Ll, M, Ind, LD, W0) ->
cind_record(_, _Nlen, _Col, _Ll, _M, Ind, _LD, _W) ->
Ind.
-cind_fields_tail([{_, Len}=F | Fs], Col0, Col, Ll, M, Ind, LD, W) ->
+cind_fields_tail([{_, Len, _, _} = F | Fs], Col0, Col, Ll, M, Ind, LD, W) ->
LD1 = last_depth(Fs, LD),
ELen = 1 + Len,
if
@@ -792,7 +1016,7 @@ cind_fields_tail([{_, Len}=F | Fs], Col0, Col, Ll, M, Ind, LD, W) ->
cind_fields_tail(_, _Col0, _Col, _Ll, _M, Ind, _LD, _W) ->
Ind.
-cind_field({{field, _N, _NL, _F}, Len}=Fl, Col, Ll, M, _Ind, LD, W)
+cind_field({{field, _N, _NL, _F}, Len, _, _}=Fl, Col, Ll, M, _Ind, LD, W)
when Len < Ll - Col - LD, Len + W + LD =< M ->
if
?ATM_FLD(Fl) ->
@@ -800,7 +1024,7 @@ cind_field({{field, _N, _NL, _F}, Len}=Fl, Col, Ll, M, _Ind, LD, W)
true ->
Ll
end;
-cind_field({{field, _Name, NameL, F}, _Len}, Col0, Ll, M, Ind, LD, W0) ->
+cind_field({{field, _Name, NameL, F},_Len,_,_}, Col0, Ll, M, Ind, LD, W0) ->
{Col, W} = cind_rec(NameL, Col0, Ll, M, Ind, W0 + NameL),
cind(F, Col, Ll, M, Ind, LD, W),
Ll.
@@ -823,7 +1047,7 @@ cind_rec(RInd, Col0, Ll, M, Ind, W0) ->
throw(no_good)
end.
-cind_list({dots, _}, _Col0, _Ll, _M, Ind, _LD, _W) ->
+cind_list({dots, _, _, _}, _Col0, _Ll, _M, Ind, _LD, _W) ->
Ind;
cind_list([E | Es], Col0, Ll, M, Ind, LD, W) ->
WE = cind_element(E, Col0, Ll, M, Ind, last_depth(Es, LD), W),
@@ -831,7 +1055,7 @@ cind_list([E | Es], Col0, Ll, M, Ind, LD, W) ->
cind_tail([], _Col0, _Col, _Ll, _M, Ind, _LD, _W) ->
Ind;
-cind_tail([{_, Len}=E | Es], Col0, Col, Ll, M, Ind, LD, W) ->
+cind_tail([{_, Len, _, _} = E | Es], Col0, Col, Ll, M, Ind, LD, W) ->
LD1 = last_depth(Es, LD),
ELen = 1 + Len,
if
@@ -842,9 +1066,9 @@ cind_tail([{_, Len}=E | Es], Col0, Col, Ll, M, Ind, LD, W) ->
WE = cind_element(E, Col0, Ll, M, Ind, LD1, 0),
cind_tail(Es, Col0, Col0 + WE, Ll, M, Ind, LD, WE)
end;
-cind_tail({dots, _}, _Col0, _Col, _Ll, _M, Ind, _LD, _W) ->
+cind_tail({dots, _, _, _}, _Col0, _Col, _Ll, _M, Ind, _LD, _W) ->
Ind;
-cind_tail({_, Len}=E, _Col0, Col, Ll, M, Ind, LD, W)
+cind_tail({_, Len, _, _}=E, _Col0, Col, Ll, M, Ind, LD, W)
when Len + 1 < Ll - Col - (LD + 1),
Len + 1 + W + (LD + 1) =< M,
?ATM(E) ->
@@ -852,7 +1076,7 @@ cind_tail({_, Len}=E, _Col0, Col, Ll, M, Ind, LD, W)
cind_tail(E, _Col0, Col, Ll, M, Ind, LD, _W) ->
cind(E, Col, Ll, M, Ind, LD + 1, 0).
-cind_element({_, Len}=E, Col, Ll, M, _Ind, LD, W)
+cind_element({_, Len, _, _}=E, Col, Ll, M, _Ind, LD, W)
when Len < Ll - Col - LD, Len + W + LD =< M, ?ATM(E) ->
Len;
cind_element(E, Col, Ll, M, Ind, LD, W) ->
diff --git a/lib/stdlib/src/proc_lib.erl b/lib/stdlib/src/proc_lib.erl
index 1991585c13..8d01840313 100644
--- a/lib/stdlib/src/proc_lib.erl
+++ b/lib/stdlib/src/proc_lib.erl
@@ -30,7 +30,7 @@
start/3, start/4, start/5, start_link/3, start_link/4, start_link/5,
hibernate/3,
init_ack/1, init_ack/2,
- init_p/3,init_p/5,format/1,format/2,format/3,
+ init_p/3,init_p/5,format/1,format/2,format/3,report_cb/1,
initial_call/1,
translate_initial_call/1,
stop/1, stop/3]).
@@ -40,6 +40,8 @@
-export_type([spawn_option/0]).
+-include("logger.hrl").
+
%%-----------------------------------------------------------------------------
-type priority_level() :: 'high' | 'low' | 'max' | 'normal'.
@@ -503,10 +505,13 @@ crash_report(exit, normal, _, _) -> ok;
crash_report(exit, shutdown, _, _) -> ok;
crash_report(exit, {shutdown,_}, _, _) -> ok;
crash_report(Class, Reason, StartF, Stacktrace) ->
- OwnReport = my_info(Class, Reason, StartF, Stacktrace),
- LinkReport = linked_info(self()),
- Rep = [OwnReport,LinkReport],
- error_logger:error_report(crash_report, Rep).
+ ?LOG_ERROR(#{label=>{proc_lib,crash},
+ report=>[my_info(Class, Reason, StartF, Stacktrace),
+ linked_info(self())]},
+ #{domain=>[beam,erlang,otp,sasl],
+ report_cb=>fun proc_lib:report_cb/1,
+ logger_formatter=>#{title=>"CRASH REPORT"},
+ error_logger=>#{tag=>error_report,type=>crash_report}}).
my_info(Class, Reason, [], Stacktrace) ->
my_info_1(Class, Reason, Stacktrace);
@@ -548,10 +553,10 @@ get_ancestors(Pid) ->
%% assumed that all report handlers call proc_lib:format().
get_messages(Pid) ->
Messages = get_process_messages(Pid),
- {messages, error_logger:limit_term(Messages)}.
+ {messages, logger:limit_term(Messages)}.
get_process_messages(Pid) ->
- Depth = error_logger:get_format_depth(),
+ Depth = logger:get_format_depth(),
case Pid =/= self() orelse Depth =:= unlimited of
true ->
{messages, Messages} = get_process_info(Pid, messages),
@@ -581,7 +586,7 @@ get_cleaned_dictionary(Pid) ->
cleaned_dict(Dict) ->
CleanDict = clean_dict(Dict),
- error_logger:limit_term(CleanDict).
+ logger:limit_term(CleanDict).
clean_dict([{'$ancestors',_}|Dict]) ->
clean_dict(Dict);
@@ -742,9 +747,18 @@ check({badrpc,Error}) -> Error;
check(Res) -> Res.
%%% -----------------------------------------------------------
-%%% Format (and write) a generated crash info structure.
+%%% Format a generated crash info structure.
%%% -----------------------------------------------------------
+-spec report_cb(CrashReport) -> {Format,Args} when
+ CrashReport :: #{label=>{proc_lib,crash},report=>[term()]},
+ Format :: io:format(),
+ Args :: [term()].
+report_cb(#{label:={proc_lib,crash},
+ report:=CrashReport}) ->
+ Depth = logger:get_format_depth(),
+ get_format_and_args(CrashReport, utf8, Depth).
+
-spec format(CrashReport) -> string() when
CrashReport :: [term()].
format(CrashReport) ->
@@ -762,61 +776,74 @@ format(CrashReport, Encoding) ->
Encoding :: latin1 | unicode | utf8,
Depth :: unlimited | pos_integer().
-format([OwnReport,LinkReport], Encoding, Depth) ->
+format(CrashReport, Encoding, Depth) ->
+ {F,A} = get_format_and_args(CrashReport, Encoding, Depth),
+ lists:flatten(io_lib:format(F,A)).
+
+get_format_and_args([OwnReport,LinkReport], Encoding, Depth) ->
Extra = {Encoding,Depth},
MyIndent = " ",
- OwnFormat = format_report(OwnReport, MyIndent, Extra),
- LinkFormat = format_link_report(LinkReport, MyIndent, Extra),
- Str = io_lib:format(" crasher:~n~ts neighbours:~n~ts",
- [OwnFormat, LinkFormat]),
- lists:flatten(Str).
+ {OwnFormat,OwnArgs} = format_report(OwnReport, MyIndent, Extra, [], []),
+ {LinkFormat,LinkArgs} = format_link_report(LinkReport, MyIndent, Extra, [], []),
+ {" crasher:~n"++OwnFormat++" neighbours:~n"++LinkFormat,OwnArgs++LinkArgs}.
-format_link_report([Link|Reps], Indent, Extra) ->
+format_link_report([], _Indent, _Extra, Format, Args) ->
+ {lists:flatten(lists:reverse(Format)),lists:append(lists:reverse(Args))};
+format_link_report([Link|Reps], Indent, Extra, Format, Args) ->
Rep = case Link of
{neighbour,Rep0} -> Rep0;
_ -> Link
end,
LinkIndent = [" ",Indent],
- [Indent,"neighbour:\n",format_report(Rep, LinkIndent, Extra)|
- format_link_report(Reps, Indent, Extra)];
-format_link_report(Rep, Indent, Extra) ->
- format_report(Rep, Indent, Extra).
-
-format_report(Rep, Indent, Extra) when is_list(Rep) ->
- format_rep(Rep, Indent, Extra);
-format_report(Rep, Indent, {Enc,unlimited}) ->
- io_lib:format("~s~"++modifier(Enc)++"p~n", [Indent, Rep]);
-format_report(Rep, Indent, {Enc,Depth}) ->
- io_lib:format("~s~"++modifier(Enc)++"P~n", [Indent, Rep, Depth]).
-
-format_rep([{initial_call,InitialCall}|Rep], Indent, Extra) ->
- [format_mfa(Indent, InitialCall, Extra)|format_rep(Rep, Indent, Extra)];
-format_rep([{error_info,{Class,Reason,StackTrace}}|Rep], Indent, Extra) ->
- [format_exception(Class, Reason, StackTrace, Extra)|
- format_rep(Rep, Indent, Extra)];
-format_rep([{Tag,Data}|Rep], Indent, Extra) ->
- [format_tag(Indent, Tag, Data, Extra)|format_rep(Rep, Indent, Extra)];
-format_rep(_, _, _Extra) ->
- [].
-
-format_exception(Class, Reason, StackTrace, {Enc,_}=Extra) ->
- PF = pp_fun(Extra),
- StackFun = fun(M, _F, _A) -> (M =:= erl_eval) or (M =:= ?MODULE) end,
- %% EI = " exception: ",
- EI = " ",
- [EI, lib:format_exception(1+length(EI), Class, Reason,
- StackTrace, StackFun, PF, Enc), "\n"].
+ {LinkFormat,LinkArgs} = format_report(Rep, LinkIndent, Extra, [], []),
+ F = "~sneighbour:\n"++LinkFormat,
+ A = [Indent|LinkArgs],
+ format_link_report(Reps, Indent, Extra, [F|Format], [A|Args]);
+format_link_report(Rep, Indent, Extra, Format, Args) ->
+ {F,A} = format_report(Rep, Indent, Extra, [], []),
+ format_link_report([], Indent, Extra, [F|Format],[A|Args]).
+
+format_report([], _Indent, _Extra, Format, Args) ->
+ {lists:flatten(lists:reverse(Format)),lists:append(lists:reverse(Args))};
+format_report([Rep|Reps], Indent, Extra, Format, Args) ->
+ {F,A} = format_rep(Rep, Indent, Extra),
+ format_report(Reps, Indent, Extra, [F|Format], [A|Args]);
+format_report(Rep, Indent, {Enc,unlimited}=Extra, Format, Args) ->
+ {F,A} = {"~s~"++modifier(Enc)++"p~n", [Indent, Rep]},
+ format_report([], Indent, Extra, [F|Format], [A|Args]);
+format_report(Rep, Indent, {Enc,Depth}=Extra, Format, Args) ->
+ {F,A} = {"~s~"++modifier(Enc)++"P~n", [Indent, Rep, Depth]},
+ format_report([], Indent, Extra, [F|Format], [A|Args]).
+
+format_rep({initial_call,InitialCall}, Indent, Extra) ->
+ format_mfa(Indent, InitialCall, Extra);
+format_rep({error_info,{Class,Reason,StackTrace}}, _Indent, Extra) ->
+ {lists:flatten(format_exception(Class, Reason, StackTrace, Extra)),[]};
+format_rep({Tag,Data}, Indent, Extra) ->
+ format_tag(Indent, Tag, Data, Extra).
format_mfa(Indent, {M,F,Args}=StartF, {Enc,_}=Extra) ->
try
A = length(Args),
- [Indent,"initial call: ",atom_to_list(M),$:,to_string(F, Enc),$/,
- integer_to_list(A),"\n"]
+ {lists:flatten([Indent,"initial call: ",atom_to_list(M),
+ $:,to_string(F, Enc),$/,integer_to_list(A),"\n"]),[]}
catch
error:_ ->
format_tag(Indent, initial_call, StartF, Extra)
end.
+format_tag(Indent, Tag, Data, {Enc,Depth}) ->
+ {P,Tl} = p(Enc, Depth),
+ {"~s~p: ~80.18" ++ P ++ "\n", [Indent, Tag, Data|Tl]}.
+
+format_exception(Class, Reason, StackTrace, {Enc,_}=Extra) ->
+ PF = pp_fun(Extra),
+ StackFun = fun(M, _F, _A) -> (M =:= erl_eval) or (M =:= ?MODULE) end,
+ %% EI = " exception: ",
+ EI = " ",
+ [EI, lib:format_exception(1+length(EI), Class, Reason,
+ StackTrace, StackFun, PF, Enc), "\n"].
+
to_string(A, latin1) ->
io_lib:write_atom_as_latin1(A);
to_string(A, _) ->
@@ -828,10 +855,6 @@ pp_fun({Enc,Depth}) ->
io_lib:format("~." ++ integer_to_list(I) ++ P, [Term|Tl])
end.
-format_tag(Indent, Tag, Data, {Enc,Depth}) ->
- {P,Tl} = p(Enc, Depth),
- io_lib:format("~s~p: ~80.18" ++ P ++ "\n", [Indent, Tag, Data|Tl]).
-
p(Encoding, Depth) ->
{Letter, Tl} = case Depth of
unlimited -> {"p", []};
diff --git a/lib/stdlib/src/shell.erl b/lib/stdlib/src/shell.erl
index e4153e7899..1be37672e7 100644
--- a/lib/stdlib/src/shell.erl
+++ b/lib/stdlib/src/shell.erl
@@ -1416,7 +1416,7 @@ pp(V, I, D, RT) ->
true
end,
io_lib_pretty:print(V, ([{column, I}, {line_length, columns()},
- {depth, D}, {max_chars, ?CHAR_MAX},
+ {depth, D}, {line_max_chars, ?CHAR_MAX},
{strings, Strings},
{record_print_fun, record_print_fun(RT)}]
++ enc())).
diff --git a/lib/stdlib/src/stdlib.appup.src b/lib/stdlib/src/stdlib.appup.src
index e4e3fb83e9..8d1cc09a8b 100644
--- a/lib/stdlib/src/stdlib.appup.src
+++ b/lib/stdlib/src/stdlib.appup.src
@@ -18,7 +18,9 @@
%% %CopyrightEnd%
{"%VSN%",
%% Up from - max one major revision back
- [{<<"3\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}], % OTP-20.*
+ [{<<"3\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.*
+ {<<"3\\.5(\\.[0-9]+)*">>,[restart_new_emulator]}],% OTP-21.*
%% Down to - max one major revision back
- [{<<"3\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-20.*
+ [{<<"3\\.4(\\.[0-9]+)*">>,[restart_new_emulator]}, % OTP-20.*
+ {<<"3\\.5(\\.[0-9]+)*">>,[restart_new_emulator]}] % OTP-20.*
}.
diff --git a/lib/stdlib/src/supervisor.erl b/lib/stdlib/src/supervisor.erl
index e56415650f..eb46ac611a 100644
--- a/lib/stdlib/src/supervisor.erl
+++ b/lib/stdlib/src/supervisor.erl
@@ -35,6 +35,20 @@
%% For release_handler only
-export([get_callback_module/1]).
+-include("logger.hrl").
+
+-define(report_error(Error, Reason, Child, SupName),
+ ?LOG_ERROR(#{label=>{supervisor,Error},
+ report=>[{supervisor,SupName},
+ {errorContext,Error},
+ {reason,Reason},
+ {offender,extract_child(Child)}]},
+ #{domain=>[beam,erlang,otp,sasl],
+ report_cb=>fun logger:format_otp_report/1,
+ logger_formatter=>#{title=>"SUPERVISOR REPORT"},
+ error_logger=>#{tag=>error_report,
+ type=>supervisor_report}})).
+
%%--------------------------------------------------------------------------
-export_type([sup_flags/0, child_spec/0, startchild_ret/0, strategy/0]).
@@ -340,7 +354,7 @@ start_children(Children, SupName) ->
{ok, Pid, _Extra} ->
{update,Child#child{pid = Pid}};
{error, Reason} ->
- report_error(start_error, Reason, Child, SupName),
+ ?report_error(start_error, Reason, Child, SupName),
{abort,{failed_to_start_child,Id,Reason}}
end
end,
@@ -565,8 +579,9 @@ handle_info({'EXIT', Pid, Reason}, State) ->
end;
handle_info(Msg, State) ->
- error_logger:error_msg("Supervisor received unexpected message: ~tp~n",
- [Msg]),
+ ?LOG_ERROR("Supervisor received unexpected message: ~tp~n",[Msg],
+ #{domain=>[beam,erlang,otp],
+ error_logger=>#{tag=>error}}),
{noreply, State}.
%%
@@ -683,7 +698,7 @@ restart_child(Pid, Reason, State) ->
end.
do_restart(Reason, Child, State) when ?is_permanent(Child) ->
- report_error(child_terminated, Reason, Child, State#state.name),
+ ?report_error(child_terminated, Reason, Child, State#state.name),
restart(Child, State);
do_restart(normal, Child, State) ->
NState = del_child(Child, State),
@@ -695,10 +710,10 @@ do_restart({shutdown, _Term}, Child, State) ->
NState = del_child(Child, State),
{ok, NState};
do_restart(Reason, Child, State) when ?is_transient(Child) ->
- report_error(child_terminated, Reason, Child, State#state.name),
+ ?report_error(child_terminated, Reason, Child, State#state.name),
restart(Child, State);
do_restart(Reason, Child, State) when ?is_temporary(Child) ->
- report_error(child_terminated, Reason, Child, State#state.name),
+ ?report_error(child_terminated, Reason, Child, State#state.name),
NState = del_child(Child, State),
{ok, NState}.
@@ -718,7 +733,7 @@ restart(Child, State) ->
Other
end;
{terminate, NState} ->
- report_error(shutdown, reached_max_restart_intensity,
+ ?report_error(shutdown, reached_max_restart_intensity,
Child, State#state.name),
{shutdown, del_child(Child, NState)}
end.
@@ -745,7 +760,7 @@ restart(simple_one_for_one, Child, State0) ->
NRestarts = State2#state.dynamic_restarts + 1,
State3 = State2#state{dynamic_restarts = NRestarts},
NState = dyn_store(ROldPid, A, State3),
- report_error(start_error, Error, Child, NState#state.name),
+ ?report_error(start_error, Error, Child, NState#state.name),
{{try_again, ROldPid}, NState}
end;
restart(one_for_one, #child{id=Id} = Child, State) ->
@@ -759,7 +774,7 @@ restart(one_for_one, #child{id=Id} = Child, State) ->
{ok, NState};
{error, Reason} ->
NState = set_pid(restarting(OldPid), Id, State),
- report_error(start_error, Reason, Child, State#state.name),
+ ?report_error(start_error, Reason, Child, State#state.name),
{{try_again,Id}, NState}
end;
restart(rest_for_one, #child{id=Id} = Child, #state{name=SupName} = State) ->
@@ -820,7 +835,7 @@ do_terminate(Child, SupName) when is_pid(Child#child.pid) ->
{error, normal} when not (?is_permanent(Child)) ->
ok;
{error, OtherReason} ->
- report_error(shutdown_error, OtherReason, Child, SupName)
+ ?report_error(shutdown_error, OtherReason, Child, SupName)
end,
ok;
do_terminate(_Child, _SupName) ->
@@ -924,7 +939,7 @@ terminate_dynamic_children(State) ->
end,
%% Unroll stacked errors and report them
dict:fold(fun(Reason, Ls, _) ->
- report_error(shutdown_error, Reason,
+ ?report_error(shutdown_error, Reason,
Child#child{pid=Ls}, State#state.name)
end, ok, EStack).
@@ -1385,14 +1400,6 @@ inPeriod(Then, Now, Period) ->
%%% ------------------------------------------------------
%%% Error and progress reporting.
%%% ------------------------------------------------------
-
-report_error(Error, Reason, Child, SupName) ->
- ErrorMsg = [{supervisor, SupName},
- {errorContext, Error},
- {reason, Reason},
- {offender, extract_child(Child)}],
- error_logger:error_report(supervisor_report, ErrorMsg).
-
extract_child(Child) when is_list(Child#child.pid) ->
[{nb_children, length(Child#child.pid)},
{id, Child#child.id},
@@ -1409,9 +1416,13 @@ extract_child(Child) ->
{child_type, Child#child.child_type}].
report_progress(Child, SupName) ->
- Progress = [{supervisor, SupName},
- {started, extract_child(Child)}],
- error_logger:info_report(progress, Progress).
+ ?LOG_INFO(#{label=>{supervisor,progress},
+ report=>[{supervisor,SupName},
+ {started,extract_child(Child)}]},
+ #{domain=>[beam,erlang,otp,sasl],
+ report_cb=>fun logger:format_otp_report/1,
+ logger_formatter=>#{title=>"PROGRESS REPORT"},
+ error_logger=>#{tag=>info_report,type=>progress}}).
format_status(terminate, [_PDict, State]) ->
State;
diff --git a/lib/stdlib/src/supervisor_bridge.erl b/lib/stdlib/src/supervisor_bridge.erl
index af1e046d30..39372935fa 100644
--- a/lib/stdlib/src/supervisor_bridge.erl
+++ b/lib/stdlib/src/supervisor_bridge.erl
@@ -21,6 +21,8 @@
-behaviour(gen_server).
+-include("logger.hrl").
+
%% External exports
-export([start_link/2, start_link/3]).
%% Internal exports
@@ -129,13 +131,22 @@ terminate_pid(Reason, #state{mod = Mod, child_state = ChildState}) ->
Mod:terminate(Reason, ChildState).
report_progress(Pid, Mod, StartArgs, SupName) ->
- Progress = [{supervisor, SupName},
- {started, [{pid, Pid}, {mfa, {Mod, init, [StartArgs]}}]}],
- error_logger:info_report(progress, Progress).
+ ?LOG_INFO(#{label=>{supervisor,progress},
+ report=>[{supervisor, SupName},
+ {started, [{pid, Pid},
+ {mfa, {Mod, init, [StartArgs]}}]}]},
+ #{domain=>[beam,erlang,otp,sasl],
+ report_cb=>fun logger:format_otp_report/1,
+ logger_formatter=>#{title=>"PROGRESS REPORT"},
+ error_logger=>#{tag=>info_report,type=>progress}}).
report_error(Error, Reason, #state{name = Name, pid = Pid, mod = Mod}) ->
- ErrorMsg = [{supervisor, Name},
- {errorContext, Error},
- {reason, Reason},
- {offender, [{pid, Pid}, {mod, Mod}]}],
- error_logger:error_report(supervisor_report, ErrorMsg).
+ ?LOG_ERROR(#{label=>{supervisor,error},
+ report=>[{supervisor, Name},
+ {errorContext, Error},
+ {reason, Reason},
+ {offender, [{pid, Pid}, {mod, Mod}]}]},
+ #{domain=>[beam,erlang,otp,sasl],
+ report_cb=>fun logger:format_otp_report/1,
+ logger_formatter=>#{title=>"SUPERVISOR REPORT"},
+ error_logger=>#{tag=>error_report,type=>supervisor_report}}).
diff --git a/lib/stdlib/src/sys.erl b/lib/stdlib/src/sys.erl
index 0c578acf21..0064414d6f 100644
--- a/lib/stdlib/src/sys.erl
+++ b/lib/stdlib/src/sys.erl
@@ -44,6 +44,7 @@
-type system_event() :: {'in', Msg :: _}
| {'in', Msg :: _, From :: _}
| {'out', Msg :: _, To :: _}
+ | {'out', Msg :: _, To :: _, State :: _}
| term().
-opaque dbg_opt() :: {'trace', 'true'}
| {'log',
@@ -56,7 +57,8 @@
MessagesIn :: non_neg_integer(),
MessagesOut :: non_neg_integer()}}
| {'log_to_file', file:io_device()}
- | {Func :: dbg_fun(), FuncState :: term()}.
+ | {Func :: dbg_fun(), FuncState :: term()}
+ | {FuncId :: term(), Func :: dbg_fun(), FuncState :: term()}.
-type dbg_fun() :: fun((FuncState :: _,
Event :: system_event(),
ProcState :: _) -> 'done' | (NewFuncState :: _)).
@@ -267,33 +269,41 @@ no_debug(Name, Timeout) -> send_system_msg(Name, {debug, no_debug}, Timeout).
-spec install(Name, FuncSpec) -> 'ok' when
Name :: name(),
- FuncSpec :: {Func, FuncState},
+ FuncSpec :: {Func, FuncState} | {FuncId, Func, FuncState},
+ FuncId :: term(),
Func :: dbg_fun(),
FuncState :: term().
install(Name, {Func, FuncState}) ->
- send_system_msg(Name, {debug, {install, {Func, FuncState}}}).
+ send_system_msg(Name, {debug, {install, {Func, FuncState}}});
+install(Name, {FuncId, Func, FuncState}) ->
+ send_system_msg(Name, {debug, {install, {FuncId, Func, FuncState}}}).
-spec install(Name, FuncSpec, Timeout) -> 'ok' when
Name :: name(),
- FuncSpec :: {Func, FuncState},
+ FuncSpec :: {Func, FuncState} | {FuncId, Func, FuncState},
+ FuncId :: term(),
Func :: dbg_fun(),
FuncState :: term(),
Timeout :: timeout().
install(Name, {Func, FuncState}, Timeout) ->
- send_system_msg(Name, {debug, {install, {Func, FuncState}}}, Timeout).
+ send_system_msg(Name, {debug, {install, {Func, FuncState}}}, Timeout);
+install(Name, {FuncId, Func, FuncState}, Timeout) ->
+ send_system_msg(Name, {debug, {install, {FuncId, Func, FuncState}}}, Timeout).
--spec remove(Name, Func) -> 'ok' when
+-spec remove(Name, Func | FuncId) -> 'ok' when
Name :: name(),
- Func :: dbg_fun().
-remove(Name, Func) ->
- send_system_msg(Name, {debug, {remove, Func}}).
+ Func :: dbg_fun(),
+ FuncId :: term().
+remove(Name, FuncOrFuncId) ->
+ send_system_msg(Name, {debug, {remove, FuncOrFuncId}}).
--spec remove(Name, Func, Timeout) -> 'ok' when
+-spec remove(Name, Func | FuncId, Timeout) -> 'ok' when
Name :: name(),
Func :: dbg_fun(),
+ FuncId :: term(),
Timeout :: timeout().
-remove(Name, Func, Timeout) ->
- send_system_msg(Name, {debug, {remove, Func}}, Timeout).
+remove(Name, FuncOrFuncId, Timeout) ->
+ send_system_msg(Name, {debug, {remove, FuncOrFuncId}}, Timeout).
%%-----------------------------------------------------------------
%% All system messages sent are on the form {system, From, Msg}
@@ -387,6 +397,13 @@ handle_debug([{log_to_file, Fd} | T], FormFunc, State, Event) ->
handle_debug([{statistics, StatData} | T], FormFunc, State, Event) ->
NStatData = stat(Event, StatData),
[{statistics, NStatData} | handle_debug(T, FormFunc, State, Event)];
+handle_debug([{FuncId, {Func, FuncState}} | T], FormFunc, State, Event) ->
+ case catch Func(FuncState, Event, State) of
+ done -> handle_debug(T, FormFunc, State, Event);
+ {'EXIT', _} -> handle_debug(T, FormFunc, State, Event);
+ NFuncState ->
+ [{FuncId, {Func, NFuncState}} | handle_debug(T, FormFunc, State, Event)]
+ end;
handle_debug([{Func, FuncState} | T], FormFunc, State, Event) ->
case catch Func(FuncState, Event, State) of
done -> handle_debug(T, FormFunc, State, Event);
@@ -544,8 +561,10 @@ debug_cmd(no_debug, Debug) ->
{ok, []};
debug_cmd({install, {Func, FuncState}}, Debug) ->
{ok, install_debug(Func, FuncState, Debug)};
-debug_cmd({remove, Func}, Debug) ->
- {ok, remove_debug(Func, Debug)};
+debug_cmd({install, {FuncId, Func, FuncState}}, Debug) ->
+ {ok, install_debug(FuncId, {Func, FuncState}, Debug)};
+debug_cmd({remove, FuncOrFuncId}, Debug) ->
+ {ok, remove_debug(FuncOrFuncId, Debug)};
debug_cmd(_Unknown, Debug) ->
{unknown_debug, Debug}.
@@ -573,6 +592,7 @@ get_stat(_) ->
stat({in, _Msg}, {Time, Reds, In, Out}) -> {Time, Reds, In+1, Out};
stat({in, _Msg, _From}, {Time, Reds, In, Out}) -> {Time, Reds, In+1, Out};
stat({out, _Msg, _To}, {Time, Reds, In, Out}) -> {Time, Reds, In, Out+1};
+stat({out, _Msg, _To, _State}, {Time, Reds, In, Out}) -> {Time, Reds, In, Out+1};
stat(_, StatData) -> StatData.
trim(N, LogData) ->
@@ -582,9 +602,9 @@ trim(N, LogData) ->
%% Debug structure manipulating functions
%%-----------------------------------------------------------------
install_debug(Item, Data, Debug) ->
- case get_debug2(Item, Debug, undefined) of
- undefined -> [{Item, Data} | Debug];
- _ -> Debug
+ case lists:keysearch(Item, 1, Debug) of
+ false -> [{Item, Data} | Debug];
+ _ -> Debug
end.
remove_debug(Item, Debug) -> lists:keydelete(Item, 1, Debug).
@@ -635,7 +655,8 @@ close_log_file(Debug) ->
| {'log_to_file', FileName}
| {'install', FuncSpec},
FileName :: file:name(),
- FuncSpec :: {Func, FuncState},
+ FuncSpec :: {Func, FuncState} | {FuncId, Func, FuncState},
+ FuncId :: term(),
Func :: dbg_fun(),
FuncState :: term().
debug_options(Options) ->
@@ -658,6 +679,8 @@ debug_options([{log_to_file, FileName} | T], Debug) ->
end;
debug_options([{install, {Func, FuncState}} | T], Debug) ->
debug_options(T, install_debug(Func, FuncState, Debug));
+debug_options([{install, {FuncId, Func, FuncState}} | T], Debug) ->
+ debug_options(T, install_debug(FuncId, {Func, FuncState}, Debug));
debug_options([_ | T], Debug) ->
debug_options(T, Debug);
debug_options([], Debug) ->
diff --git a/lib/stdlib/test/calendar_SUITE.erl b/lib/stdlib/test/calendar_SUITE.erl
index 20053dfe54..55118e251c 100644
--- a/lib/stdlib/test/calendar_SUITE.erl
+++ b/lib/stdlib/test/calendar_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1997-2016. All Rights Reserved.
+%% Copyright Ericsson AB 1997-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -30,7 +30,8 @@
leap_years/1,
last_day_of_the_month/1,
local_time_to_universal_time_dst/1,
- iso_week_number/1]).
+ iso_week_number/1,
+ system_time/1, rfc3339/1]).
-define(START_YEAR, 1947).
-define(END_YEAR, 2012).
@@ -40,7 +41,8 @@ suite() -> [{ct_hooks,[ts_install_cth]}].
all() ->
[gregorian_days, gregorian_seconds, day_of_the_week,
day_of_the_week_calibrate, leap_years,
- last_day_of_the_month, local_time_to_universal_time_dst, iso_week_number].
+ last_day_of_the_month, local_time_to_universal_time_dst,
+ iso_week_number, system_time, rfc3339].
groups() ->
[].
@@ -157,10 +159,163 @@ local_time_to_universal_time_dst_x(Config) when is_list(Config) ->
iso_week_number(Config) when is_list(Config) ->
check_iso_week_number().
+system_time(Config) when is_list(Config) ->
+ EpochDate = {{1970,1,1}, {0,0,0}},
+ Epoch = calendar:datetime_to_gregorian_seconds(EpochDate),
+ Y0 = {{0,1,1},{0,0,0}},
+
+ EpochDate = calendar:system_time_to_universal_time(0, second),
+ 0 = calendar:datetime_to_gregorian_seconds(Y0),
+ Y0 = calendar:system_time_to_universal_time(-Epoch, second),
+
+ T = erlang:system_time(second),
+ UDate = calendar:system_time_to_universal_time(T, second),
+ LDate = erlang:universaltime_to_localtime(UDate),
+ LDate = calendar:system_time_to_local_time(T, second),
+
+ ok.
+
+rfc3339(Config) when is_list(Config) ->
+ Ms = [{unit, millisecond}],
+ Mys = [{unit, microsecond}],
+ Ns = [{unit, nanosecond}],
+ S = [{unit, second}],
+ D = [{time_designator, $\s}],
+ Z = [{offset, "Z"}],
+
+ "1985-04-12T23:20:50.52Z" = test_parse("1985-04-12T23:20:50.52Z", Ms),
+ "1985-04-12T23:20:50.52Z" = test_parse("1985-04-12t23:20:50.52z", Ms),
+ "1985-04-12T21:20:50.52Z" =
+ test_parse("1985-04-12T23:20:50.52+02:00", Ms),
+ "1985-04-12T23:20:50Z" = test_parse("1985-04-12T23:20:50.52Z", S),
+ "1985-04-12T23:20:50.52Z" = test_parse("1985-04-12T23:20:50.52Z", Ms),
+ "1985-04-12T23:20:50.52Z" = test_parse("1985-04-12t23:20:50.52z", Mys),
+ "1985-04-12 21:20:50.52Z" =
+ test_parse("1985-04-12 23:20:50.52+02:00", Ns++D),
+ "1985-04-12T23:20:50Z" = test_parse("1985-04-12T23:20:50.52Z"),
+ "1996-12-20T00:39:57Z" = test_parse("1996-12-19T16:39:57-08:00"),
+ "1991-01-01T00:00:00Z" = test_parse("1990-12-31T23:59:60Z"),
+ "1991-01-01T08:00:00Z" = test_parse("1990-12-31T23:59:60-08:00"),
+
+ "1996-12-20T00:39:57Z" = test_parse("1996-12-19T16:39:57-08:00"),
+ %% The leap second is not handled:
+ "1991-01-01T00:00:00Z" = test_parse("1990-12-31T23:59:60Z"),
+
+ "9999-12-31T23:59:59Z" = do_format_z(253402300799, []),
+ "9999-12-31T23:59:59.999Z" = do_format_z(253402300799*1000+999, Ms),
+ "9999-12-31T23:59:59.999999Z" =
+ do_format_z(253402300799*1000000+999999, Mys),
+ "9999-12-31T23:59:59.999999999Z" =
+ do_format_z(253402300799*1000000000+999999999, Ns),
+ {'EXIT', _} = (catch do_format_z(253402300799+1, [])),
+ {'EXIT', _} = (catch do_parse("9999-12-31T23:59:60Z", [])),
+ {'EXIT', _} = (catch do_format_z(253402300799*1000000000+999999999+1, Ns)),
+ 253402300799 = do_parse("9999-12-31T23:59:59Z", []),
+
+ "0000-01-01T00:00:00Z" = test_parse("0000-01-01T00:00:00.0+00:00"),
+ "9999-12-31T00:00:00Z" = test_parse("9999-12-31T00:00:00.0+00:00"),
+ "1584-03-04T00:00:00Z" = test_parse("1584-03-04T00:00:00.0+00:00"),
+ "1900-01-01T00:00:00Z" = test_parse("1900-01-01T00:00:00.0+00:00"),
+ "2016-01-24T00:00:00Z" = test_parse("2016-01-24T00:00:00.0+00:00"),
+ "1970-01-01T00:00:00Z" = test_parse("1970-01-01T00:00:00Z"),
+ "1970-01-02T00:00:00Z" = test_parse("1970-01-01T23:59:60Z"),
+ "1970-01-02T00:00:00Z" = test_parse("1970-01-01T23:59:60.5Z"),
+ "1970-01-02T00:00:00Z" = test_parse("1970-01-01T23:59:60.55Z"),
+ "1970-01-02T00:00:00.55Z" = test_parse("1970-01-01T23:59:60.55Z", Ms),
+ "1970-01-02T00:00:00.55Z" = test_parse("1970-01-01T23:59:60.55Z", Mys),
+ "1970-01-02T00:00:00.55Z" = test_parse("1970-01-01T23:59:60.55Z", Ns),
+ "1970-01-02T00:00:00.999999Z" =
+ test_parse("1970-01-01T23:59:60.999999Z", Mys),
+ "1970-01-02T00:00:01Z" =
+ test_parse("1970-01-01T23:59:60.999999Z", Ms),
+ "1970-01-01T00:00:00Z" = test_parse("1970-01-01T00:00:00+00:00"),
+ "1970-01-01T00:00:00Z" = test_parse("1970-01-01T00:00:00-00:00"),
+ "1969-12-31T00:01:00Z" = test_parse("1970-01-01T00:00:00+23:59"),
+ "1918-11-11T09:00:00Z" = test_parse("1918-11-11T11:00:00+02:00", Mys),
+ "1970-01-01T00:00:00.000001Z" =
+ test_parse("1970-01-01T00:00:00.000001Z", Mys),
+
+ test_time(erlang:system_time(second), []),
+ test_time(erlang:system_time(second), Z),
+ test_time(erlang:system_time(second), Z ++ S),
+ test_time(erlang:system_time(second), [{offset, "+02:20"}]),
+ test_time(erlang:system_time(millisecond), Ms),
+ test_time(erlang:system_time(microsecond), Mys++[{offset, "-02:20"}]),
+
+ T = erlang:system_time(second),
+ TS = do_format(T, []),
+ TS = do_format(T * 1000, Ms),
+ TS = do_format(T * 1000 * 1000, Mys),
+ TS = do_format(T * 1000 * 1000 * 1000, Ns),
+
+ 946720800 = TO = do_parse("2000-01-01 10:00:00Z", []),
+ Str = "2000-01-01T10:02:00+00:02",
+ Str = do_format(TO, [{offset, 120}]),
+ Str = do_format(TO * 1000, [{offset, 120 * 1000}]++Ms),
+ Str = do_format(TO * 1000 * 1000, [{offset, 120 * 1000 * 1000}]++Mys),
+ Str = do_format(TO * 1000 * 1000 * 1000,
+ [{offset, 120 * 1000 * 1000 * 1000}]++Ns),
+
+ NStr = "2000-01-01T09:58:00-00:02",
+ NStr = do_format(TO, [{offset, -120}]),
+ NStr = do_format(TO * 1000, [{offset, -120 * 1000}]++Ms),
+ NStr = do_format(TO * 1000 * 1000, [{offset, -120 * 1000 * 1000}]++Mys),
+ NStr = do_format(TO * 1000 * 1000 * 1000,
+ [{offset, -120 * 1000 * 1000 * 1000}]++Ns),
+
+ 543210000 = do_parse("1970-01-01T00:00:00.54321Z", Ns),
+ 54321000 = do_parse("1970-01-01T00:00:00.054321Z", Ns),
+ 543210 = do_parse("1970-01-01T00:00:00.54321Z", Mys),
+ 543 = do_parse("1970-01-01T00:00:00.54321Z", Ms),
+ 0 = do_parse("1970-01-01T00:00:00.000001Z", Ms),
+ 1 = do_parse("1970-01-01T00:00:00.000001Z", Mys),
+ 1000 = do_parse("1970-01-01T00:00:00.000001Z", Ns),
+ 0 = do_parse("1970-01-01Q00:00:00.00049Z", Ms),
+ 1 = do_parse("1970-01-01Q00:00:00.0005Z", Ms),
+ 6543210 = do_parse("1970-01-01T00:00:06.54321Z", Mys),
+ 298815132000000 = do_parse("1979-06-21T12:12:12Z", Mys),
+ -1613826000000000 = do_parse("1918-11-11T11:00:00Z", Mys),
+ -1613833200000000 = do_parse("1918-11-11T11:00:00+02:00", Mys),
+ -1613833200000000 = do_parse("1918-11-11T09:00:00Z", Mys),
+
+ "1970-01-01T00:00:00Z" = do_format_z(0, Mys),
+ "1970-01-01T00:00:01Z" = do_format_z(1, S),
+ "1970-01-01T00:00:00.001Z" = do_format_z(1, Ms),
+ "1970-01-01T00:00:00.000001Z" = do_format_z(1, Mys),
+ "1970-01-01T00:00:00.000000001Z" = do_format_z(1, Ns),
+ "1970-01-01T00:00:01Z" = do_format_z(1000000, Mys),
+ "1970-01-01T00:00:00.54321Z" = do_format_z(543210, Mys),
+ "1970-01-01T00:00:00.543Z" = do_format_z(543, Ms),
+ "1970-01-01T00:00:00.54321Z" = do_format_z(543210000, Ns),
+ "1970-01-01T00:00:06.54321Z" = do_format_z(6543210, Mys),
+ "1979-06-21T12:12:12Z" = do_format_z(298815132000000, Mys),
+ "1918-11-11T13:00:00Z" = do_format_z(-1613818800000000, Mys),
+ ok.
+
%%
%% LOCAL FUNCTIONS
%%
+test_parse(String) ->
+ test_parse(String, []).
+
+test_parse(String, Options) ->
+ T = do_parse(String, Options),
+ calendar:system_time_to_rfc3339(T, [{offset, "Z"} | Options]).
+
+do_parse(String, Options) ->
+ calendar:rfc3339_to_system_time(String, Options).
+
+test_time(Time, Options) ->
+ F = calendar:system_time_to_rfc3339(Time, Options),
+ Time = calendar:rfc3339_to_system_time(F, Options).
+
+do_format_z(Time, Options) ->
+ do_format(Time, [{offset, "Z"}|Options]).
+
+do_format(Time, Options) ->
+ calendar:system_time_to_rfc3339(Time, Options).
+
%% check_gregorian_days
%%
check_gregorian_days(Days, MaxDays) when Days < MaxDays ->
diff --git a/lib/stdlib/test/erl_eval_SUITE.erl b/lib/stdlib/test/erl_eval_SUITE.erl
index 8eb85cab8e..f4019d477b 100644
--- a/lib/stdlib/test/erl_eval_SUITE.erl
+++ b/lib/stdlib/test/erl_eval_SUITE.erl
@@ -1,7 +1,7 @@
%%
%% %CopyrightBegin%
%%
-%% Copyright Ericsson AB 1998-2017. All Rights Reserved.
+%% Copyright Ericsson AB 1998-2018. All Rights Reserved.
%%
%% Licensed under the Apache License, Version 2.0 (the "License");
%% you may not use this file except in compliance with the License.
@@ -47,7 +47,8 @@
eval_expr_5/1,
zero_width/1,
eep37/1,
- eep43/1]).
+ eep43/1,
+ otp_15035/1]).
%%
%% Define to run outside of test server
@@ -87,7 +88,7 @@ all() ->
otp_6539, otp_6543, otp_6787, otp_6977, otp_7550,
otp_8133, otp_10622, otp_13228, otp_14826,
funs, try_catch, eval_expr_5, zero_width,
- eep37, eep43].
+ eep37, eep43, otp_15035].
groups() ->
[].
@@ -1606,6 +1607,55 @@ eep43(Config) when is_list(Config) ->
error_check("(#{})#{nonexisting:=value}.", {badkey,nonexisting}),
ok.
+otp_15035(Config) when is_list(Config) ->
+ check(fun() ->
+ fun() when #{} ->
+ a;
+ () when #{a => b} ->
+ b;
+ () when #{a => b} =:= #{a => b} ->
+ c
+ end()
+ end,
+ "fun() when #{} ->
+ a;
+ () when #{a => b} ->
+ b;
+ () when #{a => b} =:= #{a => b} ->
+ c
+ end().",
+ c),
+ check(fun() ->
+ F = fun(M) when M#{} ->
+ a;
+ (M) when M#{a => b} ->
+ b;
+ (M) when M#{a := b} ->
+ c;
+ (M) when M#{a := b} =:= M#{a := b} ->
+ d;
+ (M) when M#{a => b} =:= M#{a => b} ->
+ e
+ end,
+ {F(#{}), F(#{a => b})}
+ end,
+ "fun() ->
+ F = fun(M) when M#{} ->
+ a;
+ (M) when M#{a => b} ->
+ b;
+ (M) when M#{a := b} ->
+ c;
+ (M) when M#{a := b} =:= M#{a := b} ->
+ d;
+ (M) when M#{a => b} =:= M#{a => b} ->
+ e
+ end,
+ {F(#{}), F(#{a => b})}
+ end().",
+ {e, d}),
+ ok.
+
%% Check the string in different contexts: as is; in fun; from compiled code.
check(F, String, Result) ->
check1(F, String, Result),
diff --git a/lib/stdlib/test/error_logger_h_SUITE.erl b/lib/stdlib/test/error_logger_h_SUITE.erl
index 9dc04f27a1..d533305939 100644
--- a/lib/stdlib/test/error_logger_h_SUITE.erl
+++ b/lib/stdlib/test/error_logger_h_SUITE.erl
@@ -62,6 +62,7 @@ logfile(Config) ->
error_logger:logfile({open,Log}),
ok = rpc:call(Node, erlang, apply, [fun gen_events/1,[Ev]]),
AtNode = iolist_to_binary(["** at node ",atom_to_list(Node)," **"]),
+ timer:sleep(1000), % some time get all log events in the log
error_logger:logfile(close),
analyse_events(Log, Ev, [AtNode], unlimited),
@@ -124,6 +125,7 @@ tty(Config) ->
ok = rpc:call(Node, erlang, apply, [fun gen_events/1,[Ev]]),
tty_log_close(),
AtNode = iolist_to_binary(["** at node ",atom_to_list(Node)," **"]),
+ timer:sleep(1000), % some time get all log events in the log
analyse_events(Log, Ev, [AtNode], unlimited),
test_server:stop_node(Node),
@@ -207,7 +209,7 @@ event_templates() ->
gen_events(Ev) ->
io:format("node = ~p\n", [node()]),
io:format("group leader = ~p\n", [group_leader()]),
- io:format("~p\n", [gen_event:which_handlers(error_logger)]),
+ io:format("~p\n", [error_logger:which_report_handlers()]),
call_error_logger(Ev),
{Pid,Ref} = spawn_monitor(fun() -> error(ouch) end),
@@ -240,6 +242,7 @@ analyse_events(Log, Ev, AtNode, Depth) ->
call_error_logger([{F,Args}|T]) ->
apply(error_logger, F, Args),
+ timer:sleep(10),
call_error_logger(T);
call_error_logger([]) -> ok.
diff --git a/lib/stdlib/test/io_SUITE.erl b/lib/stdlib/test/io_SUITE.erl
index 6f4e7ad7e0..9f48fbf5e3 100644
--- a/lib/stdlib/test/io_SUITE.erl
+++ b/lib/stdlib/test/io_SUITE.erl
@@ -31,9 +31,9 @@
otp_10836/1, io_lib_width_too_small/1,
io_with_huge_message_queue/1, format_string/1,
maps/1, coverage/1, otp_14178_unicode_atoms/1, otp_14175/1,
- otp_14285/1, limit_term/1]).
+ otp_14285/1, limit_term/1, otp_14983/1]).
--export([pretty/2]).
+-export([pretty/2, trf/3]).
%%-define(debug, true).
@@ -63,7 +63,7 @@ all() ->
io_lib_print_binary_depth_one, otp_10302, otp_10755, otp_10836,
io_lib_width_too_small, io_with_huge_message_queue,
format_string, maps, coverage, otp_14178_unicode_atoms, otp_14175,
- otp_14285, limit_term].
+ otp_14285, limit_term, otp_14983].
%% Error cases for output.
error_1(Config) when is_list(Config) ->
@@ -1750,7 +1750,7 @@ printable_range(Suite) when is_list(Suite) ->
PrettyOptions = [{column,1},
{line_length,109},
{depth,30},
- {max_chars,60},
+ {line_max_chars,60},
{record_print_fun,
fun(_,_) -> no end},
{encoding,unicode}],
@@ -1886,7 +1886,7 @@ otp_10302(Suite) when is_list(Suite) ->
pretty(Term, Depth) when is_integer(Depth) ->
Opts = [{column, 1}, {line_length, 20},
- {depth, Depth}, {max_chars, 60},
+ {depth, Depth}, {line_max_chars, 60},
{record_print_fun, fun rfd/2},
{encoding, unicode}],
pretty(Term, Opts);
@@ -2053,19 +2053,19 @@ maps(_Config) ->
%% in a map with more than one element.
"#{}" = fmt("~w", [#{}]),
- "#{a=>b}" = fmt("~w", [#{a=>b}]),
- re_fmt(<<"#\\{(a=>b|c=>d),[.][.][.]=>[.][.][.]\\}">>,
- "~W", [#{a=>b,c=>d},2]),
- re_fmt(<<"#\\{(a=>b|c=>d|e=>f),[.][.][.]=>[.][.][.],[.][.][.]\\}">>,
- "~W", [#{a=>b,c=>d,e=>f},2]),
+ "#{a => b}" = fmt("~w", [#{a=>b}]),
+ re_fmt(<<"#\\{(a => b),[.][.][.]\\}">>,
+ "~W", [#{a => b,c => d},2]),
+ re_fmt(<<"#\\{(a => b),[.][.][.]\\}">>,
+ "~W", [#{a => b,c => d,e => f},2]),
"#{}" = fmt("~p", [#{}]),
- "#{a => b}" = fmt("~p", [#{a=>b}]),
- "#{...}" = fmt("~P", [#{a=>b},1]),
+ "#{a => b}" = fmt("~p", [#{a => b}]),
+ "#{...}" = fmt("~P", [#{a => b},1]),
re_fmt(<<"#\\{(a => b|c => d),[.][.][.]\\}">>,
- "~P", [#{a=>b,c=>d},2]),
+ "~P", [#{a => b,c => d},2]),
re_fmt(<<"#\\{(a => b|c => d|e => f),[.][.][.]\\}">>,
- "~P", [#{a=>b,c=>d,e=>f},2]),
+ "~P", [#{a => b,c => d,e => f},2]),
List = [{I,I*I} || I <- lists:seq(1, 20)],
Map = maps:from_list(List),
@@ -2441,7 +2441,7 @@ limit_term(_Config) ->
{_, 1} = limt(T, 0),
{_, 2} = limt(T, 1),
{_, 2} = limt(T, 2),
- {_, 1} = limt(T, 3),
+ {_, 2} = limt(T, 3),
{_, 1} = limt(T, 4),
T2 = #{[] => {},{} => []},
{_, 2} = limt(T2, 1),
@@ -2489,3 +2489,129 @@ limt_pp(Term, Depth) when is_integer(Depth) ->
pp(Term, Depth) ->
lists:flatten(io_lib:format("~P", [Term, Depth])).
+
+otp_14983(_Config) ->
+ trunc_depth(-1, fun trp/3),
+ trunc_depth(10, fun trp/3),
+ trunc_depth(-1, fun trw/3),
+ trunc_depth(10, fun trw/3),
+ trunc_depth_p(-1),
+ trunc_depth_p(10),
+ trunc_string(),
+ ok.
+
+trunc_string() ->
+ "str " = trf("str ", [], 10),
+ "str ..." = trf("str ~s", ["str"], 6),
+ "str str" = trf("str ~s", ["str"], 7),
+ "str ..." = trf("str ~8s", ["str"], 6),
+ Pa = filename:dirname(code:which(?MODULE)),
+ {ok, UNode} = test_server:start_node(printable_range_unicode, slave,
+ [{args, " +pc unicode -pa " ++ Pa}]),
+ U = "кирилли́ческий атом",
+ UFun = fun(Format, Args, CharsLimit) ->
+ rpc:call(UNode,
+ ?MODULE, trf, [Format, Args, CharsLimit])
+ end,
+ "str кир" = UFun("str ~3ts", [U], 7),
+ "str ..." = UFun("str ~3ts", [U], 6),
+ "str ..." = UFun("str ~30ts", [U], 6),
+ "str кир..." = UFun("str ~30ts", [U], 10),
+ "str кирилл..." = UFun("str ~30ts", [U], 13),
+ "str кирилли́..." = UFun("str ~30ts", [U], 14),
+ "str кирилли́ч..." = UFun("str ~30ts", [U], 15),
+ "\"кирилли́ческ\"..." = UFun("~tp", [U], 13),
+ BU = <<"кирилли́ческий атом"/utf8>>,
+ "<<\"кирилли́\"/utf8...>>" = UFun("~tp", [BU], 20),
+ "<<\"кирилли́\"/utf8...>>" = UFun("~tp", [BU], 21),
+ "<<\"кирилли́ческ\"/utf8...>>" = UFun("~tp", [BU], 22),
+ test_server:stop_node(UNode).
+
+trunc_depth(D, Fun) ->
+ "..." = Fun("", D, 0),
+ "[]" = Fun("", D, 1),
+
+ "#{}" = Fun(#{}, D, 1),
+ "#{a => 1}" = Fun(#{a => 1}, D, 7),
+ "#{...}" = Fun(#{a => 1}, D, 5),
+ "#{a => 1}" = Fun(#{a => 1}, D, 6),
+ A = lists:seq(1, 1000),
+ M = #{A => A, {A,A} => {A,A}},
+ "#{...}" = Fun(M, D, 6),
+ "#{{...} => {...},...}" = Fun(M, D, 7),
+ "#{{[...],...} => {[...],...},...}" = Fun(M, D, 22),
+ "#{{[...],...} => {[...],...},[...] => [...]}" = Fun(M, D, 31),
+ "#{{[...],...} => {[...],...},[...] => [...]}" = Fun(M, D, 33),
+ "#{{[1|...],[...]} => {[1|...],[...]},[1,2|...] => [...]}" =
+ Fun(M, D, 50),
+
+ "..." = Fun({c, 1, 2}, D, 0),
+ "{...}" = Fun({c, 1, 2}, D, 1),
+
+ "..." = Fun({}, D, 0),
+ "{}" = Fun({}, D, 1),
+ T = {A, A, A},
+ "{...}" = Fun(T, D, 5),
+ "{[...],...}" = Fun(T, D, 6),
+ "{[1|...],[...],...}" = Fun(T, D, 12),
+ "{[1,2|...],[1|...],...}" = Fun(T, D, 20),
+ "{[1,2|...],[1|...],[...]}" = Fun(T, D, 21),
+ "{[1,2,3|...],[1,2|...],[1|...]}" = Fun(T, D, 28),
+
+ "{[1],[1,2|...]}" = Fun({[1],[1,2,3,4]}, D, 14).
+
+trunc_depth_p(D) ->
+ UOpts = [{record_print_fun, fun rfd/2},
+ {encoding, unicode}],
+ LOpts = [{record_print_fun, fun rfd/2},
+ {encoding, latin1}],
+ trunc_depth_p(D, UOpts),
+ trunc_depth_p(D, LOpts).
+
+trunc_depth_p(D, Opts) ->
+ "[...]" = trp("abcdefg", D, 4, Opts),
+ "\"abc\"..." = trp("abcdefg", D, 5, Opts),
+ "\"abcdef\"..." = trp("abcdefg", D, 8, Opts),
+ "\"abcdefg\"" = trp("abcdefg", D, 9, Opts),
+ "\"abcdefghijkl\"" = trp("abcdefghijkl", D, -1, Opts),
+ AZ = lists:seq($A, $Z),
+ AZb = list_to_binary(AZ),
+ AZbS = "<<\"" ++ AZ ++ "\">>",
+ AZbS = trp(AZb, D, -1),
+ "<<\"ABCDEFGH\"...>>" = trp(AZb, D, 17, Opts), % 4 chars even if D = -1...
+ "<<\"ABCDEFGHIJKL\"...>>" = trp(AZb, D, 18, Opts),
+ B1 = <<"abcdef",0:8>>,
+ "<<\"ab\"...>>" = trp(B1, D, 8, Opts),
+ "<<\"abcdef\"...>>" = trp(B1, D, 14, Opts),
+ "<<97,98,99,100,...>>" = trp(B1, D, 16, Opts),
+ "<<97,98,99,100,101,102,0>>" = trp(B1, D, -1, Opts),
+ B2 = <<AZb/binary,0:8>>,
+ "<<\"AB\"...>>" = trp(B2, D, 8, Opts),
+ "<<\"ABCDEFGH\"...>>" = trp(B2, D, 14, Opts),
+ "<<65,66,67,68,69,70,71,72,0>>" = trp(<<"ABCDEFGH",0:8>>, D, -1, Opts),
+ "<<97,0,107,108,...>>" = trp(<<"a",0:8,"kllkjlksdjfsj">>, D, 20, Opts),
+
+ A = lists:seq(1, 1000),
+ "#c{...}" = trp({c, 1, 2}, D, 6),
+ "#c{...}" = trp({c, 1, 2}, D, 7),
+ "#c{f1 = [...],...}" = trp({c, A, A}, D, 18),
+ "#c{f1 = [1|...],f2 = [...]}" = trp({c, A, A}, D, 19),
+ "#c{f1 = [1,2|...],f2 = [1|...]}" = trp({c, A, A}, D, 31),
+ "#c{f1 = [1,2,3|...],f2 = [1,2|...]}" = trp({c, A, A}, D, 32).
+
+trp(Term, D, T) ->
+ trp(Term, D, T, [{record_print_fun, fun rfd/2}]).
+
+trp(Term, D, T, Opts) ->
+ R = io_lib_pretty:print(Term, [{depth, D},
+ {chars_limit, T}|Opts]),
+ lists:flatten(io_lib:format("~s", [R])).
+
+trw(Term, D, T) ->
+ lists:flatten(io_lib:format("~W", [Term, D], [{chars_limit, T}])).
+
+trf(Format, Args, T) ->
+ trf(Format, Args, T, [{record_print_fun, fun rfd/2}]).
+
+trf(Format, Args, T, Opts) ->
+ lists:flatten(io_lib:format(Format, Args, [{chars_limit, T}|Opts])).
diff --git a/lib/stdlib/test/proc_lib_SUITE.erl b/lib/stdlib/test/proc_lib_SUITE.erl
index fbdcb518b2..81bf9020b8 100644
--- a/lib/stdlib/test/proc_lib_SUITE.erl
+++ b/lib/stdlib/test/proc_lib_SUITE.erl
@@ -542,16 +542,17 @@ system_terminate(Reason,_Parent,_Deb,_State) ->
t_format(_Config) ->
- error_logger:tty(false),
+ logger:add_handler_filter(logger_std_h,stop_all,{fun(_,_) -> stop end,ok}),
+ error_logger:add_report_handler(?MODULE, self()),
try
t_format()
after
- error_logger:tty(true)
+ error_logger:delete_report_handler(?MODULE),
+ logger:remove_handler_filter(logger_std_h,stop_all)
end,
ok.
t_format() ->
- error_logger:add_report_handler(?MODULE, self()),
Pid = proc_lib:spawn(fun '\x{aaa}t_format_looper'/0),
HugeData = gb_sets:from_list(lists:seq(1, 100)),
SomeData1 = list_to_atom([246]),
@@ -584,11 +585,11 @@ t_format() ->
ok.
t_format_arbitrary(_Config) ->
- error_logger:tty(false),
+ logger:add_handler_filter(logger_std_h,stop_all,{fun(_,_) -> stop end,ok}),
try
t_format_arbitrary()
after
- error_logger:tty(true)
+ logger:remove_handler_filter(logger_std_h,stop_all)
end,
ok.
diff --git a/lib/stdlib/test/sys_SUITE.erl b/lib/stdlib/test/sys_SUITE.erl
index b44df0fbda..439a23d82d 100644
--- a/lib/stdlib/test/sys_SUITE.erl
+++ b/lib/stdlib/test/sys_SUITE.erl
@@ -84,7 +84,7 @@ stats(Config) when is_list(Config) ->
{ok,-44} = public_call(44),
{ok,Stats} = sys:statistics(?server,get),
true = lists:member({messages_in,1}, Stats),
- true = lists:member({messages_out,0}, Stats),
+ true = lists:member({messages_out,1}, Stats),
ok = sys:statistics(?server,false),
{status,_Pid,{module,_Mod},[_PDict,running,Self,_,_]} =
sys:get_status(?server),
@@ -133,7 +133,8 @@ install(Config) when is_list(Config) ->
Master ! {spy_got,{request,Arg},ProcState};
Other ->
io:format("Trigged other=~p\n",[Other])
- end
+ end,
+ func_state
end,
sys:install(?server,{SpyFun,func_state}),
{ok,-1} = (catch public_call(1)),
@@ -142,10 +143,27 @@ install(Config) when is_list(Config) ->
sys:install(?server,{SpyFun,func_state}),
sys:install(?server,{SpyFun,func_state}),
{ok,-3} = (catch public_call(3)),
- sys:remove(?server,SpyFun),
{ok,-4} = (catch public_call(4)),
+ sys:remove(?server,SpyFun),
+ {ok,-5} = (catch public_call(5)),
+ [{spy_got,{request,1},sys_SUITE_server},
+ {spy_got,{request,3},sys_SUITE_server},
+ {spy_got,{request,4},sys_SUITE_server}] = get_messages(),
+
+ sys:install(?server,{id1, SpyFun, func_state}),
+ sys:install(?server,{id1, SpyFun, func_state}), %% should not be installed
+ sys:install(?server,{id2, SpyFun, func_state}),
+ {ok,-1} = (catch public_call(1)),
+ %% We have two SpyFun installed:
[{spy_got,{request,1},sys_SUITE_server},
- {spy_got,{request,3},sys_SUITE_server}] = get_messages(),
+ {spy_got,{request,1},sys_SUITE_server}] = get_messages(),
+ sys:remove(?server, id1),
+ {ok,-1} = (catch public_call(1)),
+ %% We have one SpyFun installed:
+ [{spy_got,{request,1},sys_SUITE_server}] = get_messages(),
+ sys:no_debug(?server),
+ {ok,-1} = (catch public_call(1)),
+ [] = get_messages(),
stop(),
ok.
diff --git a/system/doc/design_principles/spec_proc.xml b/system/doc/design_principles/spec_proc.xml
index 5f4e7ac685..f910c3dba3 100644
--- a/system/doc/design_principles/spec_proc.xml
+++ b/system/doc/design_principles/spec_proc.xml
@@ -312,7 +312,7 @@ sys:handle_debug(Deb, Func, Info, Event) => Deb1</code>
define what a system event is and how it is to be
represented. Typically at least incoming and outgoing
messages are considered system events and represented by
- the tuples <c>{in,Msg[,From]}</c> and <c>{out,Msg,To}</c>,
+ the tuples <c>{in,Msg[,From]}</c> and <c>{out,Msg,To[,State]}</c>,
respectively.</item>
</list>
<p><c>handle_debug</c> returns an updated debug structure